1 /*
2 * Copyright (C) 2018 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "RecordReadThread.h"
18
19 #include <sys/resource.h>
20 #include <unistd.h>
21
22 #include <algorithm>
23 #include <unordered_map>
24
25 #include "environment.h"
26 #include "event_type.h"
27 #include "record.h"
28 #include "utils.h"
29
30 namespace simpleperf {
31
32 static constexpr size_t kDefaultLowBufferLevel = 10 * 1024 * 1024u;
33 static constexpr size_t kDefaultCriticalBufferLevel = 5 * 1024 * 1024u;
34
RecordBuffer(size_t buffer_size)35 RecordBuffer::RecordBuffer(size_t buffer_size)
36 : read_head_(0), write_head_(0), buffer_size_(buffer_size), buffer_(new char[buffer_size]) {
37 }
38
GetFreeSize() const39 size_t RecordBuffer::GetFreeSize() const {
40 size_t write_head = write_head_.load(std::memory_order_relaxed);
41 size_t read_head = read_head_.load(std::memory_order_relaxed);
42 size_t write_tail = read_head > 0 ? read_head - 1 : buffer_size_ - 1;
43 if (write_head <= write_tail) {
44 return write_tail - write_head;
45 }
46 return buffer_size_ - write_head + write_tail;
47 }
48
AllocWriteSpace(size_t record_size)49 char* RecordBuffer::AllocWriteSpace(size_t record_size) {
50 size_t write_head = write_head_.load(std::memory_order_relaxed);
51 size_t read_head = read_head_.load(std::memory_order_acquire);
52 size_t write_tail = read_head > 0 ? read_head - 1 : buffer_size_ - 1;
53 cur_write_record_size_ = record_size;
54 if (write_head < write_tail) {
55 if (write_head + record_size > write_tail) {
56 return nullptr;
57 }
58 } else if (write_head + record_size > buffer_size_) {
59 // Not enough space at the end of the buffer, need to wrap to the start of the buffer.
60 if (write_tail < record_size) {
61 return nullptr;
62 }
63 if (buffer_size_ - write_head >= sizeof(perf_event_header)) {
64 // Set the size field in perf_event_header to 0. So GetCurrentRecord() can wrap to the start
65 // of the buffer when size is 0.
66 memset(buffer_.get() + write_head, 0, sizeof(perf_event_header));
67 }
68 cur_write_record_size_ += buffer_size_ - write_head;
69 write_head = 0;
70 }
71 return buffer_.get() + write_head;
72 }
73
FinishWrite()74 void RecordBuffer::FinishWrite() {
75 size_t write_head = write_head_.load(std::memory_order_relaxed);
76 write_head = (write_head + cur_write_record_size_) % buffer_size_;
77 write_head_.store(write_head, std::memory_order_release);
78 }
79
GetCurrentRecord()80 char* RecordBuffer::GetCurrentRecord() {
81 size_t write_head = write_head_.load(std::memory_order_acquire);
82 size_t read_head = read_head_.load(std::memory_order_relaxed);
83 if (read_head == write_head) {
84 return nullptr;
85 }
86 perf_event_header header;
87 if (read_head > write_head) {
88 if (buffer_size_ - read_head < sizeof(header) ||
89 (memcpy(&header, buffer_.get() + read_head, sizeof(header)) && header.size == 0)) {
90 // Need to wrap to the start of the buffer.
91 cur_read_record_size_ += buffer_size_ - read_head;
92 read_head = 0;
93 memcpy(&header, buffer_.get(), sizeof(header));
94 }
95 } else {
96 memcpy(&header, buffer_.get() + read_head, sizeof(header));
97 }
98 cur_read_record_size_ += header.size;
99 return buffer_.get() + read_head;
100 }
101
MoveToNextRecord()102 void RecordBuffer::MoveToNextRecord() {
103 size_t read_head = read_head_.load(std::memory_order_relaxed);
104 read_head = (read_head + cur_read_record_size_) % buffer_size_;
105 read_head_.store(read_head, std::memory_order_release);
106 cur_read_record_size_ = 0;
107 }
108
RecordParser(const perf_event_attr & attr)109 RecordParser::RecordParser(const perf_event_attr& attr)
110 : sample_type_(attr.sample_type),
111 sample_regs_count_(__builtin_popcountll(attr.sample_regs_user)) {
112 size_t pos = sizeof(perf_event_header);
113 uint64_t mask = PERF_SAMPLE_IDENTIFIER | PERF_SAMPLE_IP | PERF_SAMPLE_TID;
114 pos += __builtin_popcountll(sample_type_ & mask) * sizeof(uint64_t);
115 if (sample_type_ & PERF_SAMPLE_TIME) {
116 time_pos_in_sample_records_ = pos;
117 pos += sizeof(uint64_t);
118 }
119 mask = PERF_SAMPLE_ADDR | PERF_SAMPLE_ID | PERF_SAMPLE_STREAM_ID | PERF_SAMPLE_CPU |
120 PERF_SAMPLE_PERIOD;
121 pos += __builtin_popcountll(sample_type_ & mask) * sizeof(uint64_t);
122 callchain_pos_in_sample_records_ = pos;
123 if ((sample_type_ & PERF_SAMPLE_TIME) && attr.sample_id_all) {
124 mask = PERF_SAMPLE_IDENTIFIER | PERF_SAMPLE_CPU | PERF_SAMPLE_STREAM_ID | PERF_SAMPLE_ID;
125 time_rpos_in_non_sample_records_ = (__builtin_popcountll(sample_type_ & mask) + 1) *
126 sizeof(uint64_t);
127 }
128 }
129
GetTimePos(const perf_event_header & header) const130 size_t RecordParser::GetTimePos(const perf_event_header& header) const {
131 if (header.type == PERF_RECORD_SAMPLE) {
132 return time_pos_in_sample_records_;
133 }
134 if (time_rpos_in_non_sample_records_ != 0u &&
135 time_rpos_in_non_sample_records_ < header.size - sizeof(perf_event_header)) {
136 return header.size - time_rpos_in_non_sample_records_;
137 }
138 return 0;
139 }
140
GetStackSizePos(const std::function<void (size_t,size_t,void *)> & read_record_fn) const141 size_t RecordParser::GetStackSizePos(
142 const std::function<void(size_t,size_t,void*)>& read_record_fn) const{
143 size_t pos = callchain_pos_in_sample_records_;
144 if (sample_type_ & PERF_SAMPLE_CALLCHAIN) {
145 uint64_t ip_nr;
146 read_record_fn(pos, sizeof(ip_nr), &ip_nr);
147 pos += (ip_nr + 1) * sizeof(uint64_t);
148 }
149 if (sample_type_ & PERF_SAMPLE_RAW) {
150 uint32_t size;
151 read_record_fn(pos, sizeof(size), &size);
152 pos += size + sizeof(uint32_t);
153 }
154 if (sample_type_ & PERF_SAMPLE_BRANCH_STACK) {
155 uint64_t stack_nr;
156 read_record_fn(pos, sizeof(stack_nr), &stack_nr);
157 pos += sizeof(uint64_t) + stack_nr * sizeof(BranchStackItemType);
158 }
159 if (sample_type_ & PERF_SAMPLE_REGS_USER) {
160 uint64_t abi;
161 read_record_fn(pos, sizeof(abi), &abi);
162 pos += (1 + (abi == 0 ? 0 : sample_regs_count_)) * sizeof(uint64_t);
163 }
164 return (sample_type_ & PERF_SAMPLE_STACK_USER) ? pos : 0;
165 }
166
KernelRecordReader(EventFd * event_fd)167 KernelRecordReader::KernelRecordReader(EventFd* event_fd) : event_fd_(event_fd) {
168 size_t buffer_size;
169 buffer_ = event_fd_->GetMappedBuffer(buffer_size);
170 buffer_mask_ = buffer_size - 1;
171 }
172
GetDataFromKernelBuffer()173 bool KernelRecordReader::GetDataFromKernelBuffer() {
174 data_size_ = event_fd_->GetAvailableMmapDataSize(data_pos_);
175 if (data_size_ == 0) {
176 return false;
177 }
178 init_data_size_ = data_size_;
179 record_header_.size = 0;
180 return true;
181 }
182
ReadRecord(size_t pos,size_t size,void * dest)183 void KernelRecordReader::ReadRecord(size_t pos, size_t size, void* dest) {
184 pos = (pos + data_pos_) & buffer_mask_;
185 size_t copy_size = std::min(size, buffer_mask_ + 1 - pos);
186 memcpy(dest, buffer_ + pos, copy_size);
187 if (copy_size < size) {
188 memcpy(static_cast<char*>(dest) + copy_size, buffer_, size - copy_size);
189 }
190 }
191
MoveToNextRecord(const RecordParser & parser)192 bool KernelRecordReader::MoveToNextRecord(const RecordParser& parser) {
193 data_pos_ = (data_pos_ + record_header_.size) & buffer_mask_;
194 data_size_ -= record_header_.size;
195 if (data_size_ == 0) {
196 event_fd_->DiscardMmapData(init_data_size_);
197 init_data_size_ = 0;
198 return false;
199 }
200 ReadRecord(0, sizeof(record_header_), &record_header_);
201 size_t time_pos = parser.GetTimePos(record_header_);
202 if (time_pos != 0) {
203 ReadRecord(time_pos, sizeof(record_time_), &record_time_);
204 }
205 return true;
206 }
207
RecordReadThread(size_t record_buffer_size,const perf_event_attr & attr,size_t min_mmap_pages,size_t max_mmap_pages,size_t aux_buffer_size,bool allow_cutting_samples)208 RecordReadThread::RecordReadThread(size_t record_buffer_size, const perf_event_attr& attr,
209 size_t min_mmap_pages, size_t max_mmap_pages,
210 size_t aux_buffer_size, bool allow_cutting_samples)
211 : record_buffer_(record_buffer_size),
212 record_parser_(attr),
213 attr_(attr),
214 min_mmap_pages_(min_mmap_pages),
215 max_mmap_pages_(max_mmap_pages),
216 aux_buffer_size_(aux_buffer_size) {
217 if (attr.sample_type & PERF_SAMPLE_STACK_USER) {
218 stack_size_in_sample_record_ = attr.sample_stack_user;
219 }
220 record_buffer_low_level_ = std::min(record_buffer_size / 4, kDefaultLowBufferLevel);
221 record_buffer_critical_level_ = std::min(record_buffer_size / 6, kDefaultCriticalBufferLevel);
222 if (!allow_cutting_samples) {
223 record_buffer_low_level_ = record_buffer_critical_level_;
224 }
225 }
226
~RecordReadThread()227 RecordReadThread::~RecordReadThread() {
228 if (read_thread_) {
229 StopReadThread();
230 }
231 }
232
RegisterDataCallback(IOEventLoop & loop,const std::function<bool ()> & data_callback)233 bool RecordReadThread::RegisterDataCallback(IOEventLoop& loop,
234 const std::function<bool()>& data_callback) {
235 int cmd_fd[2];
236 int data_fd[2];
237 if (pipe2(cmd_fd, O_CLOEXEC) != 0 || pipe2(data_fd, O_CLOEXEC) != 0) {
238 PLOG(ERROR) << "pipe2";
239 return false;
240 }
241 read_cmd_fd_.reset(cmd_fd[0]);
242 write_cmd_fd_.reset(cmd_fd[1]);
243 cmd_ = NO_CMD;
244 read_data_fd_.reset(data_fd[0]);
245 write_data_fd_.reset(data_fd[1]);
246 has_data_notification_ = false;
247 if (!loop.AddReadEvent(read_data_fd_, data_callback)) {
248 return false;
249 }
250 read_thread_.reset(new std::thread([&]() { RunReadThread(); }));
251 return true;
252 }
253
AddEventFds(const std::vector<EventFd * > & event_fds)254 bool RecordReadThread::AddEventFds(const std::vector<EventFd*>& event_fds) {
255 return SendCmdToReadThread(CMD_ADD_EVENT_FDS, const_cast<std::vector<EventFd*>*>(&event_fds));
256 }
257
RemoveEventFds(const std::vector<EventFd * > & event_fds)258 bool RecordReadThread::RemoveEventFds(const std::vector<EventFd*>& event_fds) {
259 return SendCmdToReadThread(CMD_REMOVE_EVENT_FDS, const_cast<std::vector<EventFd*>*>(&event_fds));
260 }
261
SyncKernelBuffer()262 bool RecordReadThread::SyncKernelBuffer() {
263 return SendCmdToReadThread(CMD_SYNC_KERNEL_BUFFER, nullptr);
264 }
265
StopReadThread()266 bool RecordReadThread::StopReadThread() {
267 bool result = SendCmdToReadThread(CMD_STOP_THREAD, nullptr);
268 if (result) {
269 read_thread_->join();
270 read_thread_ = nullptr;
271 }
272 return result;
273 }
274
SendCmdToReadThread(Cmd cmd,void * cmd_arg)275 bool RecordReadThread::SendCmdToReadThread(Cmd cmd, void* cmd_arg) {
276 {
277 std::lock_guard<std::mutex> lock(cmd_mutex_);
278 cmd_ = cmd;
279 cmd_arg_ = cmd_arg;
280 }
281 char dummy = 0;
282 if (TEMP_FAILURE_RETRY(write(write_cmd_fd_, &dummy, 1)) != 1) {
283 return false;
284 }
285 std::unique_lock<std::mutex> lock(cmd_mutex_);
286 while (cmd_ != NO_CMD) {
287 cmd_finish_cond_.wait(lock);
288 }
289 return cmd_result_;
290 }
291
GetRecord()292 std::unique_ptr<Record> RecordReadThread::GetRecord() {
293 record_buffer_.MoveToNextRecord();
294 char* p = record_buffer_.GetCurrentRecord();
295 if (p != nullptr) {
296 std::unique_ptr<Record> r = ReadRecordFromBuffer(attr_, p);
297 if (r->type() == PERF_RECORD_AUXTRACE) {
298 auto auxtrace = static_cast<AuxTraceRecord*>(r.get());
299 record_buffer_.AddCurrentRecordSize(auxtrace->data->aux_size);
300 auxtrace->location.addr = r->Binary() + r->size();
301 }
302 return r;
303 }
304 if (has_data_notification_) {
305 char dummy;
306 TEMP_FAILURE_RETRY(read(read_data_fd_, &dummy, 1));
307 has_data_notification_ = false;
308 }
309 return nullptr;
310 }
311
RunReadThread()312 void RecordReadThread::RunReadThread() {
313 IncreaseThreadPriority();
314 IOEventLoop loop;
315 CHECK(loop.AddReadEvent(read_cmd_fd_, [&]() { return HandleCmd(loop); }));
316 loop.RunLoop();
317 }
318
IncreaseThreadPriority()319 void RecordReadThread::IncreaseThreadPriority() {
320 // TODO: use real time priority for root.
321 rlimit rlim;
322 int result = getrlimit(RLIMIT_NICE, &rlim);
323 if (result == 0 && rlim.rlim_cur == 40) {
324 result = setpriority(PRIO_PROCESS, gettid(), -20);
325 if (result == 0) {
326 LOG(VERBOSE) << "Priority of record read thread is increased";
327 }
328 }
329 }
330
GetCmd()331 RecordReadThread::Cmd RecordReadThread::GetCmd() {
332 std::lock_guard<std::mutex> lock(cmd_mutex_);
333 return cmd_;
334 }
335
HandleCmd(IOEventLoop & loop)336 bool RecordReadThread::HandleCmd(IOEventLoop& loop) {
337 char dummy;
338 TEMP_FAILURE_RETRY(read(read_cmd_fd_, &dummy, 1));
339 bool result = true;
340 switch (GetCmd()) {
341 case CMD_ADD_EVENT_FDS:
342 result = HandleAddEventFds(loop, *static_cast<std::vector<EventFd*>*>(cmd_arg_));
343 break;
344 case CMD_REMOVE_EVENT_FDS:
345 result = HandleRemoveEventFds(*static_cast<std::vector<EventFd*>*>(cmd_arg_));
346 break;
347 case CMD_SYNC_KERNEL_BUFFER:
348 result = ReadRecordsFromKernelBuffer();
349 break;
350 case CMD_STOP_THREAD:
351 result = loop.ExitLoop();
352 break;
353 default:
354 LOG(ERROR) << "Unknown cmd: " << GetCmd();
355 result = false;
356 break;
357 }
358 std::lock_guard<std::mutex> lock(cmd_mutex_);
359 cmd_ = NO_CMD;
360 cmd_result_ = result;
361 cmd_finish_cond_.notify_one();
362 return true;
363 }
364
HandleAddEventFds(IOEventLoop & loop,const std::vector<EventFd * > & event_fds)365 bool RecordReadThread::HandleAddEventFds(IOEventLoop& loop,
366 const std::vector<EventFd*>& event_fds) {
367 std::unordered_map<int, EventFd*> cpu_map;
368 for (size_t pages = max_mmap_pages_; pages >= min_mmap_pages_; pages >>= 1) {
369 bool success = true;
370 bool report_error = pages == min_mmap_pages_;
371 for (EventFd* fd : event_fds) {
372 auto it = cpu_map.find(fd->Cpu());
373 if (it == cpu_map.end()) {
374 if (!fd->CreateMappedBuffer(pages, report_error)) {
375 success = false;
376 break;
377 }
378 if (IsEtmEventType(fd->attr().type)) {
379 if (!fd->CreateAuxBuffer(aux_buffer_size_, report_error)) {
380 fd->DestroyMappedBuffer();
381 success = false;
382 break;
383 }
384 }
385 cpu_map[fd->Cpu()] = fd;
386 } else {
387 if (!fd->ShareMappedBuffer(*(it->second), pages == min_mmap_pages_)) {
388 success = false;
389 break;
390 }
391 }
392 }
393 if (success) {
394 LOG(VERBOSE) << "Each kernel buffer is " << pages << " pages.";
395 break;
396 }
397 for (auto& pair : cpu_map) {
398 pair.second->DestroyMappedBuffer();
399 pair.second->DestroyAuxBuffer();
400 }
401 cpu_map.clear();
402 }
403 if (cpu_map.empty()) {
404 return false;
405 }
406 for (auto& pair : cpu_map) {
407 if (!pair.second->StartPolling(loop, [this]() { return ReadRecordsFromKernelBuffer(); })) {
408 return false;
409 }
410 kernel_record_readers_.emplace_back(pair.second);
411 }
412 return true;
413 }
414
HandleRemoveEventFds(const std::vector<EventFd * > & event_fds)415 bool RecordReadThread::HandleRemoveEventFds(const std::vector<EventFd*>& event_fds) {
416 for (auto& event_fd : event_fds) {
417 if (event_fd->HasMappedBuffer()) {
418 auto it = std::find_if(kernel_record_readers_.begin(), kernel_record_readers_.end(),
419 [&](const KernelRecordReader& reader) {
420 return reader.GetEventFd() == event_fd;
421 });
422 if (it != kernel_record_readers_.end()) {
423 kernel_record_readers_.erase(it);
424 event_fd->StopPolling();
425 event_fd->DestroyMappedBuffer();
426 event_fd->DestroyAuxBuffer();
427 }
428 }
429 }
430 return true;
431 }
432
CompareRecordTime(KernelRecordReader * r1,KernelRecordReader * r2)433 static bool CompareRecordTime(KernelRecordReader* r1, KernelRecordReader* r2) {
434 return r1->RecordTime() > r2->RecordTime();
435 }
436
437 // When reading from mmap buffers, we prefer reading from all buffers at once rather than reading
438 // one buffer at a time. Because by reading all buffers at once, we can merge records from
439 // different buffers easily in memory. Otherwise, we have to sort records with greater effort.
ReadRecordsFromKernelBuffer()440 bool RecordReadThread::ReadRecordsFromKernelBuffer() {
441 do {
442 std::vector<KernelRecordReader*> readers;
443 for (auto& reader : kernel_record_readers_) {
444 if (reader.GetDataFromKernelBuffer()) {
445 readers.push_back(&reader);
446 }
447 }
448 bool has_data = false;
449 if (!readers.empty()) {
450 has_data = true;
451 if (readers.size() == 1u) {
452 // Only one buffer has data, process it directly.
453 while (readers[0]->MoveToNextRecord(record_parser_)) {
454 PushRecordToRecordBuffer(readers[0]);
455 }
456 } else {
457 // Use a binary heap to merge records from different buffers. As records from the same
458 // buffer are already ordered by time, we only need to merge the first record from all
459 // buffers. And each time a record is popped from the heap, we put the next record from its
460 // buffer into the heap.
461 for (auto& reader : readers) {
462 reader->MoveToNextRecord(record_parser_);
463 }
464 std::make_heap(readers.begin(), readers.end(), CompareRecordTime);
465 size_t size = readers.size();
466 while (size > 0) {
467 std::pop_heap(readers.begin(), readers.begin() + size, CompareRecordTime);
468 PushRecordToRecordBuffer(readers[size - 1]);
469 if (readers[size - 1]->MoveToNextRecord(record_parser_)) {
470 std::push_heap(readers.begin(), readers.begin() + size, CompareRecordTime);
471 } else {
472 size--;
473 }
474 }
475 }
476 }
477 ReadAuxDataFromKernelBuffer(&has_data);
478 if (!has_data) {
479 break;
480 }
481 if (!SendDataNotificationToMainThread()) {
482 return false;
483 }
484 // If there are no commands, we can loop until there is no more data from the kernel.
485 } while (GetCmd() == NO_CMD);
486 return true;
487 }
488
PushRecordToRecordBuffer(KernelRecordReader * kernel_record_reader)489 void RecordReadThread::PushRecordToRecordBuffer(KernelRecordReader* kernel_record_reader) {
490 const perf_event_header& header = kernel_record_reader->RecordHeader();
491 if (header.type == PERF_RECORD_SAMPLE && stack_size_in_sample_record_ > 1024) {
492 size_t free_size = record_buffer_.GetFreeSize();
493 if (free_size < record_buffer_critical_level_) {
494 // When the free size in record buffer is below critical level, drop sample records to save
495 // space for more important records (like mmap or fork records).
496 stat_.lost_samples++;
497 return;
498 }
499 size_t stack_size_limit = stack_size_in_sample_record_;
500 if (free_size < record_buffer_low_level_) {
501 // When the free size in record buffer is below low level, cut the stack data in sample
502 // records to 1K. This makes the unwinder unwind only part of the callchains, but hopefully
503 // the call chain joiner can complete the callchains.
504 stack_size_limit = 1024;
505 }
506 size_t stack_size_pos = record_parser_.GetStackSizePos(
507 [&](size_t pos, size_t size, void* dest) {
508 return kernel_record_reader->ReadRecord(pos, size, dest);
509 });
510 uint64_t stack_size;
511 kernel_record_reader->ReadRecord(stack_size_pos, sizeof(stack_size), &stack_size);
512 if (stack_size > 0) {
513 size_t dyn_stack_size_pos = stack_size_pos + sizeof(stack_size) + stack_size;
514 uint64_t dyn_stack_size;
515 kernel_record_reader->ReadRecord(dyn_stack_size_pos, sizeof(dyn_stack_size), &dyn_stack_size);
516 if (dyn_stack_size == 0) {
517 // If stack_user_data.dyn_size == 0, it may be because the kernel misses the patch to
518 // update dyn_size, like in N9 (See b/22612370). So assume all stack data is valid if
519 // dyn_size == 0.
520 // TODO: Add cts test.
521 dyn_stack_size = stack_size;
522 }
523 // When simpleperf requests the kernel to dump 64K stack per sample, it will allocate 64K
524 // space in each sample to store stack data. However, a thread may use less stack than 64K.
525 // So not all the 64K stack data in a sample is valid, and we only need to keep valid stack
526 // data, whose size is dyn_stack_size.
527 uint64_t new_stack_size = std::min<uint64_t>(dyn_stack_size, stack_size_limit);
528 if (stack_size > new_stack_size) {
529 // Remove part of the stack data.
530 perf_event_header new_header = header;
531 new_header.size -= stack_size - new_stack_size;
532 char* p = record_buffer_.AllocWriteSpace(new_header.size);
533 if (p != nullptr) {
534 memcpy(p, &new_header, sizeof(new_header));
535 size_t pos = sizeof(new_header);
536 kernel_record_reader->ReadRecord(pos, stack_size_pos - pos, p + pos);
537 memcpy(p + stack_size_pos, &new_stack_size, sizeof(uint64_t));
538 pos = stack_size_pos + sizeof(uint64_t);
539 kernel_record_reader->ReadRecord(pos, new_stack_size, p + pos);
540 memcpy(p + pos + new_stack_size, &new_stack_size, sizeof(uint64_t));
541 record_buffer_.FinishWrite();
542 if (new_stack_size < dyn_stack_size) {
543 stat_.cut_stack_samples++;
544 }
545 } else {
546 stat_.lost_samples++;
547 }
548 return;
549 }
550 }
551 }
552 char* p = record_buffer_.AllocWriteSpace(header.size);
553 if (p != nullptr) {
554 kernel_record_reader->ReadRecord(0, header.size, p);
555 record_buffer_.FinishWrite();
556 } else {
557 if (header.type == PERF_RECORD_SAMPLE) {
558 stat_.lost_samples++;
559 } else {
560 stat_.lost_non_samples++;
561 }
562 }
563 }
564
ReadAuxDataFromKernelBuffer(bool * has_data)565 void RecordReadThread::ReadAuxDataFromKernelBuffer(bool* has_data) {
566 for (auto& reader : kernel_record_readers_) {
567 EventFd* event_fd = reader.GetEventFd();
568 if (event_fd->HasAuxBuffer()) {
569 char* buf[2];
570 size_t size[2];
571 uint64_t offset = event_fd->GetAvailableAuxData(&buf[0], &size[0], &buf[1], &size[1]);
572 size_t aux_size = size[0] + size[1];
573 if (aux_size == 0) {
574 continue;
575 }
576 *has_data = true;
577 AuxTraceRecord auxtrace(Align(aux_size, 8), offset, event_fd->Cpu(), 0, event_fd->Cpu());
578 size_t alloc_size = auxtrace.size() + auxtrace.data->aux_size;
579 if (record_buffer_.GetFreeSize() < alloc_size + record_buffer_critical_level_) {
580 stat_.lost_aux_data_size += aux_size;
581 } else {
582 char* p = record_buffer_.AllocWriteSpace(alloc_size);
583 CHECK(p != nullptr);
584 MoveToBinaryFormat(auxtrace.Binary(), auxtrace.size(), p);
585 MoveToBinaryFormat(buf[0], size[0], p);
586 if (size[1] != 0) {
587 MoveToBinaryFormat(buf[1], size[1], p);
588 }
589 size_t pad_size = auxtrace.data->aux_size - aux_size;
590 if (pad_size != 0) {
591 uint64_t pad = 0;
592 memcpy(p, &pad, pad_size);
593 }
594 record_buffer_.FinishWrite();
595 stat_.aux_data_size += aux_size;
596 LOG(DEBUG) << "record aux data " << aux_size << " bytes";
597 }
598 event_fd->DiscardAuxData(aux_size);
599 }
600 }
601 }
602
SendDataNotificationToMainThread()603 bool RecordReadThread::SendDataNotificationToMainThread() {
604 if (!has_data_notification_.load(std::memory_order_relaxed)) {
605 has_data_notification_ = true;
606 char dummy = 0;
607 if (TEMP_FAILURE_RETRY(write(write_data_fd_, &dummy, 1)) != 1) {
608 PLOG(ERROR) << "write";
609 return false;
610 }
611 }
612 return true;
613 }
614
615 } // namespace simpleperf
616