[perf] Share ResolvedTargetData across worker threads This commit modifies ResolvedTargetData to be thread-safe using a combination of std::shared_mutex for the target-to-info map and per-target mutexes with atomic flags for individual property computations (double-checked locking). By sharing a single ResolvedTargetData instance across all worker threads in command_gen.cc, we eliminate redundant computations of transitive dependencies (libs, hard_deps, etc.) that were previously performed independently by each thread. Hyperfine benchmark results on a large Chromium build graph (gn gen): - main: 3.538 s ± 0.041 s [User: 18.883 s, System: 2.922 s] - shared: 2.894 s ± 0.041 s [User: 13.647 s, System: 3.072 s] - Improvement: ~22% faster wall-clock time, ~28% reduction in user CPU time. Bug: 484863025 Change-Id: Ib9c31feee6a118423b1bb6cf4e5552be660cb7c4 Reviewed-on: https://gn-review.googlesource.com/c/gn/+/21000 Commit-Queue: Takuto Ikuta <tikuta@google.com> Reviewed-by: David Turner <digit@google.com>
diff --git a/src/gn/command_gen.cc b/src/gn/command_gen.cc index 75004e5..8c2a45a 100644 --- a/src/gn/command_gen.cc +++ b/src/gn/command_gen.cc
@@ -91,23 +91,19 @@ NinjaOutputsMap ninja_outputs_map; - using ResolvedMap = std::unordered_map<std::thread::id, ResolvedTargetData>; - std::unique_ptr<ResolvedMap> resolved_map = std::make_unique<ResolvedMap>(); + std::unique_ptr<ResolvedTargetData> resolved = + std::make_unique<ResolvedTargetData>(); - void LeakOnPurpose() { (void)resolved_map.release(); } + void LeakOnPurpose() { (void)resolved.release(); } }; // Called on worker thread to write the ninja file. void BackgroundDoWrite(TargetWriteInfo* write_info, const Target* target) { - ResolvedTargetData* resolved; + ResolvedTargetData* resolved = write_info->resolved.get(); std::vector<OutputFile> target_ninja_outputs; std::vector<OutputFile>* ninja_outputs = write_info->want_ninja_outputs ? &target_ninja_outputs : nullptr; - { - std::lock_guard<std::mutex> lock(write_info->lock); - resolved = &((*write_info->resolved_map)[std::this_thread::get_id()]); - } std::string rule = NinjaTargetWriter::RunAndWriteFile(target, resolved, ninja_outputs);
diff --git a/src/gn/resolved_target_data.cc b/src/gn/resolved_target_data.cc index efe65ce..86ab233 100644 --- a/src/gn/resolved_target_data.cc +++ b/src/gn/resolved_target_data.cc
@@ -8,6 +8,15 @@ ResolvedTargetData::TargetInfo* ResolvedTargetData::GetTargetInfo( const Target* target) const { + { + std::shared_lock<std::shared_mutex> lock(map_mutex_); + size_t index = targets_.IndexOf(target); + if (index != UniqueVector<const Target*>::kIndexNone) { + return infos_[index].get(); + } + } + + std::unique_lock<std::shared_mutex> lock(map_mutex_); auto ret = targets_.PushBackWithIndex(target); if (ret.first) { infos_.push_back(std::make_unique<TargetInfo>(target));
diff --git a/src/gn/resolved_target_data.h b/src/gn/resolved_target_data.h index febbf39..d95459d 100644 --- a/src/gn/resolved_target_data.h +++ b/src/gn/resolved_target_data.h
@@ -5,7 +5,10 @@ #ifndef TOOLS_GN_RESOLVED_TARGET_DATA_H_ #define TOOLS_GN_RESOLVED_TARGET_DATA_H_ +#include <atomic> #include <memory> +#include <mutex> +#include <shared_mutex> #include <vector> #include "base/containers/span.h" @@ -155,14 +158,15 @@ const Target* target = nullptr; ResolvedTargetDeps deps; + mutable std::mutex mutex; - bool has_lib_info = false; - bool has_framework_info = false; - bool has_hard_deps = false; - bool has_inherited_libs = false; - bool has_module_deps_information = false; - bool has_rust_libs = false; - bool has_swift_values = false; + std::atomic<bool> has_lib_info = false; + std::atomic<bool> has_framework_info = false; + std::atomic<bool> has_hard_deps = false; + std::atomic<bool> has_inherited_libs = false; + std::atomic<bool> has_module_deps_information = false; + std::atomic<bool> has_rust_libs = false; + std::atomic<bool> has_swift_values = false; // Only valid if |has_lib_info| is true. std::vector<SourceDir> lib_dirs; @@ -209,63 +213,84 @@ const TargetInfo* GetTargetLibInfo(const Target* target) const { TargetInfo* info = GetTargetInfo(target); - if (!info->has_lib_info) { - ComputeLibInfo(info); - DCHECK(info->has_lib_info); + if (!info->has_lib_info.load(std::memory_order_acquire)) { + std::lock_guard<std::mutex> lock(info->mutex); + if (!info->has_lib_info.load(std::memory_order_relaxed)) { + ComputeLibInfo(info); + info->has_lib_info.store(true, std::memory_order_release); + } } return info; } const TargetInfo* GetTargetFrameworkInfo(const Target* target) const { TargetInfo* info = GetTargetInfo(target); - if (!info->has_framework_info) { - ComputeFrameworkInfo(info); - DCHECK(info->has_framework_info); + if (!info->has_framework_info.load(std::memory_order_acquire)) { + std::lock_guard<std::mutex> lock(info->mutex); + if (!info->has_framework_info.load(std::memory_order_relaxed)) { + ComputeFrameworkInfo(info); + info->has_framework_info.store(true, std::memory_order_release); + } } return info; } const TargetInfo* GetTargetHardDeps(const Target* target) const { TargetInfo* info = GetTargetInfo(target); - if (!info->has_hard_deps) { - ComputeHardDeps(info); - DCHECK(info->has_hard_deps); + if (!info->has_hard_deps.load(std::memory_order_acquire)) { + std::lock_guard<std::mutex> lock(info->mutex); + if (!info->has_hard_deps.load(std::memory_order_relaxed)) { + ComputeHardDeps(info); + info->has_hard_deps.store(true, std::memory_order_release); + } } return info; } const TargetInfo* GetTargetInheritedLibs(const Target* target) const { TargetInfo* info = GetTargetInfo(target); - if (!info->has_inherited_libs) { - ComputeInheritedLibs(info); - DCHECK(info->has_inherited_libs); + if (!info->has_inherited_libs.load(std::memory_order_acquire)) { + std::lock_guard<std::mutex> lock(info->mutex); + if (!info->has_inherited_libs.load(std::memory_order_relaxed)) { + ComputeInheritedLibs(info); + info->has_inherited_libs.store(true, std::memory_order_release); + } } return info; } const TargetInfo* GetTargetModuleDepsInformation(const Target* target) const { TargetInfo* info = GetTargetInfo(target); - if (!info->has_module_deps_information) { - ComputeModuleDepsInformation(info); - DCHECK(info->has_module_deps_information); + if (!info->has_module_deps_information.load(std::memory_order_acquire)) { + std::lock_guard<std::mutex> lock(info->mutex); + if (!info->has_module_deps_information.load(std::memory_order_relaxed)) { + ComputeModuleDepsInformation(info); + info->has_module_deps_information.store(true, std::memory_order_release); + } } return info; } const TargetInfo* GetTargetRustLibs(const Target* target) const { TargetInfo* info = GetTargetInfo(target); - if (!info->has_rust_libs) { - ComputeRustLibs(info); - DCHECK(info->has_rust_libs); + if (!info->has_rust_libs.load(std::memory_order_acquire)) { + std::lock_guard<std::mutex> lock(info->mutex); + if (!info->has_rust_libs.load(std::memory_order_relaxed)) { + ComputeRustLibs(info); + info->has_rust_libs.store(true, std::memory_order_release); + } } return info; } const TargetInfo* GetTargetSwiftValues(const Target* target) const { TargetInfo* info = GetTargetInfo(target); - if (!info->has_swift_values) { - ComputeSwiftValues(info); - DCHECK(info->has_swift_values); + if (!info->has_swift_values.load(std::memory_order_acquire)) { + std::lock_guard<std::mutex> lock(info->mutex); + if (!info->has_swift_values.load(std::memory_order_relaxed)) { + ComputeSwiftValues(info); + info->has_swift_values.store(true, std::memory_order_release); + } } return info; } @@ -307,6 +332,7 @@ // on demand (hence the mutable qualifier). Implemented with a // UniqueVector<> and a parallel vector of unique TargetInfo // instances for best performance. + mutable std::shared_mutex map_mutex_; mutable UniqueVector<const Target*> targets_; mutable std::vector<std::unique_ptr<TargetInfo>> infos_; };