1 // Copyright 2018 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "base/trace_event/cpufreq_monitor_android.h"
6 
7 #include <fcntl.h>
8 
9 #include "base/atomicops.h"
10 #include "base/bind.h"
11 #include "base/files/file_util.h"
12 #include "base/files/scoped_file.h"
13 #include "base/memory/scoped_refptr.h"
14 #include "base/no_destructor.h"
15 #include "base/strings/string_number_conversions.h"
16 #include "base/strings/string_split.h"
17 #include "base/strings/stringprintf.h"
18 #include "base/task/post_task.h"
19 #include "base/task/task_traits.h"
20 #include "base/task/thread_pool.h"
21 #include "base/trace_event/trace_event.h"
22 
23 namespace base {
24 
25 namespace trace_event {
26 
27 namespace {
28 
29 const size_t kNumBytesToReadForSampling = 32;
30 constexpr const char kTraceCategory[] = TRACE_DISABLED_BY_DEFAULT("power");
31 const char kEventTitle[] = "CPU Frequency";
32 
33 }  // namespace
34 
CPUFreqMonitorDelegate()35 CPUFreqMonitorDelegate::CPUFreqMonitorDelegate() {}
36 
GetScalingCurFreqPathString(unsigned int cpu_id) const37 std::string CPUFreqMonitorDelegate::GetScalingCurFreqPathString(
38     unsigned int cpu_id) const {
39   return base::StringPrintf(
40       "/sys/devices/system/cpu/cpu%d/cpufreq/scaling_cur_freq", cpu_id);
41 }
42 
IsTraceCategoryEnabled() const43 bool CPUFreqMonitorDelegate::IsTraceCategoryEnabled() const {
44   bool enabled;
45   TRACE_EVENT_CATEGORY_GROUP_ENABLED(kTraceCategory, &enabled);
46   return enabled;
47 }
48 
GetKernelMaxCPUs() const49 unsigned int CPUFreqMonitorDelegate::GetKernelMaxCPUs() const {
50   std::string str;
51   if (!base::ReadFileToString(
52           base::FilePath("/sys/devices/system/cpu/kernel_max"), &str)) {
53     // If we fail to read the kernel_max file, we just assume that CPU0 exists.
54     return 0;
55   }
56 
57   unsigned int kernel_max_cpu = 0;
58   base::StringToUint(str, &kernel_max_cpu);
59   return kernel_max_cpu;
60 }
61 
GetRelatedCPUsPathString(unsigned int cpu_id) const62 std::string CPUFreqMonitorDelegate::GetRelatedCPUsPathString(
63     unsigned int cpu_id) const {
64   return base::StringPrintf(
65       "/sys/devices/system/cpu/cpu%d/cpufreq/related_cpus", cpu_id);
66 }
67 
GetCPUIds(std::vector<unsigned int> * ids) const68 void CPUFreqMonitorDelegate::GetCPUIds(std::vector<unsigned int>* ids) const {
69   ids->clear();
70   unsigned int kernel_max_cpu = GetKernelMaxCPUs();
71   // CPUs related to one that's already marked for monitoring get set to "false"
72   // so we don't needlessly monitor CPUs with redundant frequency information.
73   char cpus_to_monitor[kernel_max_cpu + 1];
74   std::memset(cpus_to_monitor, 1, kernel_max_cpu + 1);
75 
76   // Rule out the related CPUs for each one so we only end up with the CPUs
77   // that are representative of the cluster.
78   for (unsigned int i = 0; i <= kernel_max_cpu; i++) {
79     if (!cpus_to_monitor[i])
80       continue;
81 
82     std::string filename = GetRelatedCPUsPathString(i);
83     std::string line;
84     if (!base::ReadFileToString(base::FilePath(filename), &line))
85       continue;
86     // When reading the related_cpus file, we expected the format to be
87     // something like "0 1 2 3" for CPU0-3 if they're all in one cluster.
88     for (auto& str_piece :
89          base::SplitString(line, " ", base::WhitespaceHandling::TRIM_WHITESPACE,
90                            base::SplitResult::SPLIT_WANT_NONEMPTY)) {
91       unsigned int cpu_id;
92       if (base::StringToUint(str_piece, &cpu_id)) {
93         if (cpu_id != i && cpu_id >= 0 && cpu_id <= kernel_max_cpu)
94           cpus_to_monitor[cpu_id] = 0;
95       }
96     }
97     ids->push_back(i);
98   }
99 
100   // If none of the files were readable, we assume CPU0 exists and fall back to
101   // using that.
102   if (ids->size() == 0)
103     ids->push_back(0);
104 }
105 
RecordFrequency(unsigned int cpu_id,unsigned int freq)106 void CPUFreqMonitorDelegate::RecordFrequency(unsigned int cpu_id,
107                                              unsigned int freq) {
108   TRACE_COUNTER_ID1(kTraceCategory, kEventTitle, cpu_id, freq);
109 }
110 
111 scoped_refptr<SingleThreadTaskRunner>
CreateTaskRunner()112 CPUFreqMonitorDelegate::CreateTaskRunner() {
113   return base::ThreadPool::CreateSingleThreadTaskRunner(
114       {base::MayBlock(), base::TaskShutdownBehavior::SKIP_ON_SHUTDOWN,
115        base::TaskPriority::BEST_EFFORT},
116       base::SingleThreadTaskRunnerThreadMode::SHARED);
117 }
118 
CPUFreqMonitor()119 CPUFreqMonitor::CPUFreqMonitor()
120     : CPUFreqMonitor(std::make_unique<CPUFreqMonitorDelegate>()) {}
121 
CPUFreqMonitor(std::unique_ptr<CPUFreqMonitorDelegate> delegate)122 CPUFreqMonitor::CPUFreqMonitor(std::unique_ptr<CPUFreqMonitorDelegate> delegate)
123     : delegate_(std::move(delegate)) {}
124 
~CPUFreqMonitor()125 CPUFreqMonitor::~CPUFreqMonitor() {
126   Stop();
127 }
128 
129 // static
GetInstance()130 CPUFreqMonitor* CPUFreqMonitor::GetInstance() {
131   static base::NoDestructor<CPUFreqMonitor> instance;
132   return instance.get();
133 }
134 
OnTraceLogEnabled()135 void CPUFreqMonitor::OnTraceLogEnabled() {
136   GetOrCreateTaskRunner()->PostTask(
137       FROM_HERE,
138       base::BindOnce(&CPUFreqMonitor::Start, weak_ptr_factory_.GetWeakPtr()));
139 }
140 
OnTraceLogDisabled()141 void CPUFreqMonitor::OnTraceLogDisabled() {
142   Stop();
143 }
144 
Start()145 void CPUFreqMonitor::Start() {
146   // It's the responsibility of the caller to ensure that Start/Stop are
147   // synchronized. If Start/Stop are called asynchronously where this value
148   // may be incorrect, we have bigger problems.
149   if (base::subtle::NoBarrier_Load(&is_enabled_) == 1 ||
150       !delegate_->IsTraceCategoryEnabled()) {
151     return;
152   }
153 
154   std::vector<unsigned int> cpu_ids;
155   delegate_->GetCPUIds(&cpu_ids);
156 
157   std::vector<std::pair<unsigned int, base::ScopedFD>> fds;
158   for (unsigned int id : cpu_ids) {
159     std::string fstr = delegate_->GetScalingCurFreqPathString(id);
160     int fd = open(fstr.c_str(), O_RDONLY);
161     if (fd == -1)
162       continue;
163 
164     fds.emplace_back(std::make_pair(id, base::ScopedFD(fd)));
165   }
166   // We failed to read any scaling_cur_freq files, no point sampling nothing.
167   if (fds.size() == 0)
168     return;
169 
170   base::subtle::Release_Store(&is_enabled_, 1);
171 
172   GetOrCreateTaskRunner()->PostTask(
173       FROM_HERE,
174       base::BindOnce(&CPUFreqMonitor::Sample, weak_ptr_factory_.GetWeakPtr(),
175                      std::move(fds)));
176 }
177 
Stop()178 void CPUFreqMonitor::Stop() {
179   base::subtle::Release_Store(&is_enabled_, 0);
180 }
181 
Sample(std::vector<std::pair<unsigned int,base::ScopedFD>> fds)182 void CPUFreqMonitor::Sample(
183     std::vector<std::pair<unsigned int, base::ScopedFD>> fds) {
184   // For the same reason as above we use NoBarrier_Load, because if this value
185   // is in transition and we use Acquire_Load then we'll never shut down our
186   // original Sample tasks until the next Stop, so it's still the responsibility
187   // of callers to sync Start/Stop.
188   if (base::subtle::NoBarrier_Load(&is_enabled_) == 0)
189     return;
190 
191   for (auto& id_fd : fds) {
192     int fd = id_fd.second.get();
193     unsigned int freq = 0;
194     // If we have trouble reading data from the file for any reason we'll end up
195     // reporting the frequency as nothing.
196     lseek(fd, 0L, SEEK_SET);
197     char data[kNumBytesToReadForSampling];
198 
199     size_t bytes_read = read(fd, data, kNumBytesToReadForSampling);
200     if (bytes_read > 0) {
201       if (bytes_read < kNumBytesToReadForSampling)
202         data[bytes_read] = '\0';
203       int ret = sscanf(data, "%d", &freq);
204       if (ret == 0 || ret == std::char_traits<char>::eof())
205         freq = 0;
206     }
207 
208     delegate_->RecordFrequency(id_fd.first, freq);
209   }
210 
211   GetOrCreateTaskRunner()->PostDelayedTask(
212       FROM_HERE,
213       base::BindOnce(&CPUFreqMonitor::Sample, weak_ptr_factory_.GetWeakPtr(),
214                      std::move(fds)),
215       base::TimeDelta::FromMilliseconds(kDefaultCPUFreqSampleIntervalMs));
216 }
217 
IsEnabledForTesting()218 bool CPUFreqMonitor::IsEnabledForTesting() {
219   return base::subtle::Acquire_Load(&is_enabled_) == 1;
220 }
221 
222 const scoped_refptr<SingleThreadTaskRunner>&
GetOrCreateTaskRunner()223 CPUFreqMonitor::GetOrCreateTaskRunner() {
224   if (!task_runner_)
225     task_runner_ = delegate_->CreateTaskRunner();
226   return task_runner_;
227 }
228 
229 }  // namespace trace_event
230 }  // namespace base
231