1 /*
2  * Copyright (C) 2013 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #define _GNU_SOURCE 1
18 #include <dirent.h>
19 #include <dlfcn.h>
20 #include <errno.h>
21 #include <fcntl.h>
22 #include <inttypes.h>
23 #include <pthread.h>
24 #include <signal.h>
25 #include <stdint.h>
26 #include <stdio.h>
27 #include <stdlib.h>
28 #include <string.h>
29 #include <sys/ptrace.h>
30 #include <sys/stat.h>
31 #include <sys/types.h>
32 #include <sys/wait.h>
33 #include <time.h>
34 #include <ucontext.h>
35 #include <unistd.h>
36 
37 #include <algorithm>
38 #include <list>
39 #include <memory>
40 #include <ostream>
41 #include <string>
42 #include <vector>
43 
44 #include <backtrace/Backtrace.h>
45 #include <backtrace/BacktraceMap.h>
46 
47 #include <android-base/macros.h>
48 #include <android-base/stringprintf.h>
49 #include <android-base/unique_fd.h>
50 #include <cutils/atomic.h>
51 #include <cutils/threads.h>
52 
53 #include <gtest/gtest.h>
54 
55 // For the THREAD_SIGNAL definition.
56 #include "BacktraceCurrent.h"
57 #include "backtrace_testlib.h"
58 #include "thread_utils.h"
59 
60 // Number of microseconds per milliseconds.
61 #define US_PER_MSEC             1000
62 
63 // Number of nanoseconds in a second.
64 #define NS_PER_SEC              1000000000ULL
65 
66 // Number of simultaneous dumping operations to perform.
67 #define NUM_THREADS  40
68 
69 // Number of simultaneous threads running in our forked process.
70 #define NUM_PTRACE_THREADS 5
71 
72 // The list of shared libaries that make up the backtrace library.
73 static std::vector<std::string> kBacktraceLibs{"libunwindstack.so", "libbacktrace.so"};
74 
75 struct thread_t {
76   pid_t tid;
77   int32_t state;
78   pthread_t threadId;
79   void* data;
80 };
81 
82 struct dump_thread_t {
83   thread_t thread;
84   BacktraceMap* map;
85   Backtrace* backtrace;
86   int32_t* now;
87   int32_t done;
88 };
89 
90 typedef Backtrace* (*create_func_t)(pid_t, pid_t, BacktraceMap*);
91 typedef BacktraceMap* (*map_create_func_t)(pid_t, bool);
92 
93 static void VerifyLevelDump(Backtrace* backtrace, create_func_t create_func = nullptr,
94                             map_create_func_t map_func = nullptr);
95 static void VerifyMaxDump(Backtrace* backtrace, create_func_t create_func = nullptr,
96                           map_create_func_t map_func = nullptr);
97 
NanoTime()98 static uint64_t NanoTime() {
99   struct timespec t = { 0, 0 };
100   clock_gettime(CLOCK_MONOTONIC, &t);
101   return static_cast<uint64_t>(t.tv_sec * NS_PER_SEC + t.tv_nsec);
102 }
103 
DumpFrames(Backtrace * backtrace)104 static std::string DumpFrames(Backtrace* backtrace) {
105   if (backtrace->NumFrames() == 0) {
106     return "   No frames to dump.\n";
107   }
108 
109   std::string frame;
110   for (size_t i = 0; i < backtrace->NumFrames(); i++) {
111     frame += "   " + backtrace->FormatFrameData(i) + '\n';
112   }
113   return frame;
114 }
115 
WaitForStop(pid_t pid)116 static void WaitForStop(pid_t pid) {
117   uint64_t start = NanoTime();
118 
119   siginfo_t si;
120   while (ptrace(PTRACE_GETSIGINFO, pid, 0, &si) < 0 && (errno == EINTR || errno == ESRCH)) {
121     if ((NanoTime() - start) > NS_PER_SEC) {
122       printf("The process did not get to a stopping point in 1 second.\n");
123       break;
124     }
125     usleep(US_PER_MSEC);
126   }
127 }
128 
CreateRemoteProcess(pid_t * pid)129 static void CreateRemoteProcess(pid_t* pid) {
130   if ((*pid = fork()) == 0) {
131     while (true)
132       ;
133     _exit(0);
134   }
135   ASSERT_NE(-1, *pid);
136 
137   ASSERT_TRUE(ptrace(PTRACE_ATTACH, *pid, 0, 0) == 0);
138 
139   // Wait for the process to get to a stopping point.
140   WaitForStop(*pid);
141 }
142 
FinishRemoteProcess(pid_t pid)143 static void FinishRemoteProcess(pid_t pid) {
144   ASSERT_TRUE(ptrace(PTRACE_DETACH, pid, 0, 0) == 0);
145 
146   kill(pid, SIGKILL);
147   ASSERT_EQ(waitpid(pid, nullptr, 0), pid);
148 }
149 
150 #if !defined(__ANDROID__) || defined(__arm__)
151 // On host and arm target we aren't guaranteed that we will terminate cleanly.
152 #define VERIFY_NO_ERROR(error_code)                               \
153   ASSERT_TRUE(error_code == BACKTRACE_UNWIND_NO_ERROR ||          \
154               error_code == BACKTRACE_UNWIND_ERROR_UNWIND_INFO || \
155               error_code == BACKTRACE_UNWIND_ERROR_MAP_MISSING)   \
156       << "Unknown error code " << std::to_string(error_code);
157 #else
158 #define VERIFY_NO_ERROR(error_code) ASSERT_EQ(BACKTRACE_UNWIND_NO_ERROR, error_code);
159 #endif
160 
ReadyLevelBacktrace(Backtrace * backtrace)161 static bool ReadyLevelBacktrace(Backtrace* backtrace) {
162   // See if test_level_four is in the backtrace.
163   bool found = false;
164   for (Backtrace::const_iterator it = backtrace->begin(); it != backtrace->end(); ++it) {
165     if (it->func_name == "test_level_four") {
166       found = true;
167       break;
168     }
169   }
170 
171   return found;
172 }
173 
VerifyLevelDump(Backtrace * backtrace,create_func_t,map_create_func_t)174 static void VerifyLevelDump(Backtrace* backtrace, create_func_t, map_create_func_t) {
175   ASSERT_GT(backtrace->NumFrames(), static_cast<size_t>(0))
176     << DumpFrames(backtrace);
177   ASSERT_LT(backtrace->NumFrames(), static_cast<size_t>(MAX_BACKTRACE_FRAMES))
178     << DumpFrames(backtrace);
179 
180   // Look through the frames starting at the highest to find the
181   // frame we want.
182   size_t frame_num = 0;
183   for (size_t i = backtrace->NumFrames()-1; i > 2; i--) {
184     if (backtrace->GetFrame(i)->func_name == "test_level_one") {
185       frame_num = i;
186       break;
187     }
188   }
189   ASSERT_LT(static_cast<size_t>(0), frame_num) << DumpFrames(backtrace);
190   ASSERT_LE(static_cast<size_t>(3), frame_num) << DumpFrames(backtrace);
191 
192   ASSERT_EQ(backtrace->GetFrame(frame_num)->func_name, "test_level_one")
193     << DumpFrames(backtrace);
194   ASSERT_EQ(backtrace->GetFrame(frame_num-1)->func_name, "test_level_two")
195     << DumpFrames(backtrace);
196   ASSERT_EQ(backtrace->GetFrame(frame_num-2)->func_name, "test_level_three")
197     << DumpFrames(backtrace);
198   ASSERT_EQ(backtrace->GetFrame(frame_num-3)->func_name, "test_level_four")
199     << DumpFrames(backtrace);
200 }
201 
VerifyLevelBacktrace(void *)202 static void VerifyLevelBacktrace(void*) {
203   std::unique_ptr<Backtrace> backtrace(
204       Backtrace::Create(BACKTRACE_CURRENT_PROCESS, BACKTRACE_CURRENT_THREAD));
205   ASSERT_TRUE(backtrace.get() != nullptr);
206   ASSERT_TRUE(backtrace->Unwind(0));
207   VERIFY_NO_ERROR(backtrace->GetError().error_code);
208 
209   VerifyLevelDump(backtrace.get());
210 }
211 
ReadyMaxBacktrace(Backtrace * backtrace)212 static bool ReadyMaxBacktrace(Backtrace* backtrace) {
213   return (backtrace->NumFrames() == MAX_BACKTRACE_FRAMES);
214 }
215 
VerifyMaxDump(Backtrace * backtrace,create_func_t,map_create_func_t)216 static void VerifyMaxDump(Backtrace* backtrace, create_func_t, map_create_func_t) {
217   ASSERT_EQ(backtrace->NumFrames(), static_cast<size_t>(MAX_BACKTRACE_FRAMES))
218     << DumpFrames(backtrace);
219   // Verify that the last frame is our recursive call.
220   ASSERT_EQ(backtrace->GetFrame(MAX_BACKTRACE_FRAMES-1)->func_name, "test_recursive_call")
221     << DumpFrames(backtrace);
222 }
223 
VerifyMaxBacktrace(void *)224 static void VerifyMaxBacktrace(void*) {
225   std::unique_ptr<Backtrace> backtrace(
226       Backtrace::Create(BACKTRACE_CURRENT_PROCESS, BACKTRACE_CURRENT_THREAD));
227   ASSERT_TRUE(backtrace.get() != nullptr);
228   ASSERT_TRUE(backtrace->Unwind(0));
229   ASSERT_EQ(BACKTRACE_UNWIND_ERROR_EXCEED_MAX_FRAMES_LIMIT, backtrace->GetError().error_code);
230 
231   VerifyMaxDump(backtrace.get());
232 }
233 
ThreadSetState(void * data)234 static void ThreadSetState(void* data) {
235   thread_t* thread = reinterpret_cast<thread_t*>(data);
236   android_atomic_acquire_store(1, &thread->state);
237   volatile int i = 0;
238   while (thread->state) {
239     i++;
240   }
241 }
242 
WaitForNonZero(int32_t * value,uint64_t seconds)243 static bool WaitForNonZero(int32_t* value, uint64_t seconds) {
244   uint64_t start = NanoTime();
245   do {
246     if (android_atomic_acquire_load(value)) {
247       return true;
248     }
249   } while ((NanoTime() - start) < seconds * NS_PER_SEC);
250   return false;
251 }
252 
TEST(libbacktrace,local_no_unwind_frames)253 TEST(libbacktrace, local_no_unwind_frames) {
254   // Verify that a local unwind does not include any frames within
255   // libunwind or libbacktrace.
256   std::unique_ptr<Backtrace> backtrace(Backtrace::Create(getpid(), getpid()));
257   ASSERT_TRUE(backtrace.get() != nullptr);
258   ASSERT_TRUE(backtrace->Unwind(0));
259   VERIFY_NO_ERROR(backtrace->GetError().error_code);
260 
261   ASSERT_TRUE(backtrace->NumFrames() != 0);
262   // None of the frames should be in the backtrace libraries.
263   for (const auto& frame : *backtrace ) {
264     if (BacktraceMap::IsValid(frame.map)) {
265       const std::string name = basename(frame.map.name.c_str());
266       for (const auto& lib : kBacktraceLibs) {
267         ASSERT_TRUE(name != lib) << DumpFrames(backtrace.get());
268       }
269     }
270   }
271 }
272 
TEST(libbacktrace,local_unwind_frames)273 TEST(libbacktrace, local_unwind_frames) {
274   // Verify that a local unwind with the skip frames disabled does include
275   // frames within the backtrace libraries.
276   std::unique_ptr<Backtrace> backtrace(Backtrace::Create(getpid(), getpid()));
277   ASSERT_TRUE(backtrace.get() != nullptr);
278   backtrace->SetSkipFrames(false);
279   ASSERT_TRUE(backtrace->Unwind(0));
280   VERIFY_NO_ERROR(backtrace->GetError().error_code);
281 
282   ASSERT_TRUE(backtrace->NumFrames() != 0);
283   size_t first_frame_non_backtrace_lib = 0;
284   for (const auto& frame : *backtrace) {
285     if (BacktraceMap::IsValid(frame.map)) {
286       const std::string name = basename(frame.map.name.c_str());
287       bool found = false;
288       for (const auto& lib : kBacktraceLibs) {
289         if (name == lib) {
290           found = true;
291           break;
292         }
293       }
294       if (!found) {
295         first_frame_non_backtrace_lib = frame.num;
296         break;
297       }
298     }
299   }
300 
301   ASSERT_NE(0U, first_frame_non_backtrace_lib) << "No frames found in backtrace libraries:\n"
302                                                << DumpFrames(backtrace.get());
303 }
304 
TEST(libbacktrace,local_trace)305 TEST(libbacktrace, local_trace) {
306   ASSERT_NE(test_level_one(1, 2, 3, 4, VerifyLevelBacktrace, nullptr), 0);
307 }
308 
VerifyIgnoreFrames(Backtrace * bt_all,Backtrace * bt_ign1,Backtrace * bt_ign2,const char * cur_proc)309 static void VerifyIgnoreFrames(Backtrace* bt_all, Backtrace* bt_ign1, Backtrace* bt_ign2,
310                                const char* cur_proc) {
311   ASSERT_EQ(bt_all->NumFrames(), bt_ign1->NumFrames() + 1) << "All backtrace:\n"
312                                                            << DumpFrames(bt_all)
313                                                            << "Ignore 1 backtrace:\n"
314                                                            << DumpFrames(bt_ign1);
315   ASSERT_EQ(bt_all->NumFrames(), bt_ign2->NumFrames() + 2) << "All backtrace:\n"
316                                                            << DumpFrames(bt_all)
317                                                            << "Ignore 2 backtrace:\n"
318                                                            << DumpFrames(bt_ign2);
319 
320   // Check all of the frames are the same > the current frame.
321   bool check = (cur_proc == nullptr);
322   for (size_t i = 0; i < bt_ign2->NumFrames(); i++) {
323     if (check) {
324       EXPECT_EQ(bt_ign2->GetFrame(i)->pc, bt_ign1->GetFrame(i+1)->pc);
325       EXPECT_EQ(bt_ign2->GetFrame(i)->sp, bt_ign1->GetFrame(i+1)->sp);
326       EXPECT_EQ(bt_ign2->GetFrame(i)->stack_size, bt_ign1->GetFrame(i+1)->stack_size);
327 
328       EXPECT_EQ(bt_ign2->GetFrame(i)->pc, bt_all->GetFrame(i+2)->pc);
329       EXPECT_EQ(bt_ign2->GetFrame(i)->sp, bt_all->GetFrame(i+2)->sp);
330       EXPECT_EQ(bt_ign2->GetFrame(i)->stack_size, bt_all->GetFrame(i+2)->stack_size);
331     }
332     if (!check && bt_ign2->GetFrame(i)->func_name == cur_proc) {
333       check = true;
334     }
335   }
336 }
337 
VerifyLevelIgnoreFrames(void *)338 static void VerifyLevelIgnoreFrames(void*) {
339   std::unique_ptr<Backtrace> all(
340       Backtrace::Create(BACKTRACE_CURRENT_PROCESS, BACKTRACE_CURRENT_THREAD));
341   ASSERT_TRUE(all.get() != nullptr);
342   ASSERT_TRUE(all->Unwind(0));
343   VERIFY_NO_ERROR(all->GetError().error_code);
344 
345   std::unique_ptr<Backtrace> ign1(
346       Backtrace::Create(BACKTRACE_CURRENT_PROCESS, BACKTRACE_CURRENT_THREAD));
347   ASSERT_TRUE(ign1.get() != nullptr);
348   ASSERT_TRUE(ign1->Unwind(1));
349   VERIFY_NO_ERROR(ign1->GetError().error_code);
350 
351   std::unique_ptr<Backtrace> ign2(
352       Backtrace::Create(BACKTRACE_CURRENT_PROCESS, BACKTRACE_CURRENT_THREAD));
353   ASSERT_TRUE(ign2.get() != nullptr);
354   ASSERT_TRUE(ign2->Unwind(2));
355   VERIFY_NO_ERROR(ign2->GetError().error_code);
356 
357   VerifyIgnoreFrames(all.get(), ign1.get(), ign2.get(), "VerifyLevelIgnoreFrames");
358 }
359 
TEST(libbacktrace,local_trace_ignore_frames)360 TEST(libbacktrace, local_trace_ignore_frames) {
361   ASSERT_NE(test_level_one(1, 2, 3, 4, VerifyLevelIgnoreFrames, nullptr), 0);
362 }
363 
TEST(libbacktrace,local_max_trace)364 TEST(libbacktrace, local_max_trace) {
365   ASSERT_NE(test_recursive_call(MAX_BACKTRACE_FRAMES+10, VerifyMaxBacktrace, nullptr), 0);
366 }
367 
VerifyProcTest(pid_t pid,pid_t tid,bool (* ReadyFunc)(Backtrace *),void (* VerifyFunc)(Backtrace *,create_func_t,map_create_func_t),create_func_t create_func,map_create_func_t map_create_func)368 static void VerifyProcTest(pid_t pid, pid_t tid, bool (*ReadyFunc)(Backtrace*),
369                            void (*VerifyFunc)(Backtrace*, create_func_t, map_create_func_t),
370                            create_func_t create_func, map_create_func_t map_create_func) {
371   pid_t ptrace_tid;
372   if (tid < 0) {
373     ptrace_tid = pid;
374   } else {
375     ptrace_tid = tid;
376   }
377   uint64_t start = NanoTime();
378   bool verified = false;
379   std::string last_dump;
380   do {
381     usleep(US_PER_MSEC);
382     if (ptrace(PTRACE_ATTACH, ptrace_tid, 0, 0) == 0) {
383       // Wait for the process to get to a stopping point.
384       WaitForStop(ptrace_tid);
385 
386       std::unique_ptr<BacktraceMap> map;
387       map.reset(map_create_func(pid, false));
388       std::unique_ptr<Backtrace> backtrace(create_func(pid, tid, map.get()));
389       ASSERT_TRUE(backtrace.get() != nullptr);
390       ASSERT_TRUE(backtrace->Unwind(0));
391       if (ReadyFunc(backtrace.get())) {
392         VerifyFunc(backtrace.get(), create_func, map_create_func);
393         verified = true;
394       } else {
395         last_dump = DumpFrames(backtrace.get());
396       }
397 
398       ASSERT_TRUE(ptrace(PTRACE_DETACH, ptrace_tid, 0, 0) == 0);
399     }
400     // If 5 seconds have passed, then we are done.
401   } while (!verified && (NanoTime() - start) <= 5 * NS_PER_SEC);
402   ASSERT_TRUE(verified) << "Last backtrace:\n" << last_dump;
403 }
404 
TEST(libbacktrace,ptrace_trace)405 TEST(libbacktrace, ptrace_trace) {
406   pid_t pid;
407   if ((pid = fork()) == 0) {
408     ASSERT_NE(test_level_one(1, 2, 3, 4, nullptr, nullptr), 0);
409     _exit(1);
410   }
411   VerifyProcTest(pid, BACKTRACE_CURRENT_THREAD, ReadyLevelBacktrace, VerifyLevelDump,
412                  Backtrace::Create, BacktraceMap::Create);
413 
414   kill(pid, SIGKILL);
415   int status;
416   ASSERT_EQ(waitpid(pid, &status, 0), pid);
417 }
418 
TEST(libbacktrace,ptrace_max_trace)419 TEST(libbacktrace, ptrace_max_trace) {
420   pid_t pid;
421   if ((pid = fork()) == 0) {
422     ASSERT_NE(test_recursive_call(MAX_BACKTRACE_FRAMES+10, nullptr, nullptr), 0);
423     _exit(1);
424   }
425   VerifyProcTest(pid, BACKTRACE_CURRENT_THREAD, ReadyMaxBacktrace, VerifyMaxDump, Backtrace::Create,
426                  BacktraceMap::Create);
427 
428   kill(pid, SIGKILL);
429   int status;
430   ASSERT_EQ(waitpid(pid, &status, 0), pid);
431 }
432 
VerifyProcessIgnoreFrames(Backtrace * bt_all,create_func_t create_func,map_create_func_t map_create_func)433 static void VerifyProcessIgnoreFrames(Backtrace* bt_all, create_func_t create_func,
434                                       map_create_func_t map_create_func) {
435   std::unique_ptr<BacktraceMap> map(map_create_func(bt_all->Pid(), false));
436   std::unique_ptr<Backtrace> ign1(create_func(bt_all->Pid(), BACKTRACE_CURRENT_THREAD, map.get()));
437   ASSERT_TRUE(ign1.get() != nullptr);
438   ASSERT_TRUE(ign1->Unwind(1));
439   VERIFY_NO_ERROR(ign1->GetError().error_code);
440 
441   std::unique_ptr<Backtrace> ign2(create_func(bt_all->Pid(), BACKTRACE_CURRENT_THREAD, map.get()));
442   ASSERT_TRUE(ign2.get() != nullptr);
443   ASSERT_TRUE(ign2->Unwind(2));
444   VERIFY_NO_ERROR(ign2->GetError().error_code);
445 
446   VerifyIgnoreFrames(bt_all, ign1.get(), ign2.get(), nullptr);
447 }
448 
TEST(libbacktrace,ptrace_ignore_frames)449 TEST(libbacktrace, ptrace_ignore_frames) {
450   pid_t pid;
451   if ((pid = fork()) == 0) {
452     ASSERT_NE(test_level_one(1, 2, 3, 4, nullptr, nullptr), 0);
453     _exit(1);
454   }
455   VerifyProcTest(pid, BACKTRACE_CURRENT_THREAD, ReadyLevelBacktrace, VerifyProcessIgnoreFrames,
456                  Backtrace::Create, BacktraceMap::Create);
457 
458   kill(pid, SIGKILL);
459   int status;
460   ASSERT_EQ(waitpid(pid, &status, 0), pid);
461 }
462 
463 // Create a process with multiple threads and dump all of the threads.
PtraceThreadLevelRun(void *)464 static void* PtraceThreadLevelRun(void*) {
465   EXPECT_NE(test_level_one(1, 2, 3, 4, nullptr, nullptr), 0);
466   return nullptr;
467 }
468 
GetThreads(pid_t pid,std::vector<pid_t> * threads)469 static void GetThreads(pid_t pid, std::vector<pid_t>* threads) {
470   // Get the list of tasks.
471   char task_path[128];
472   snprintf(task_path, sizeof(task_path), "/proc/%d/task", pid);
473 
474   std::unique_ptr<DIR, decltype(&closedir)> tasks_dir(opendir(task_path), closedir);
475   ASSERT_TRUE(tasks_dir != nullptr);
476   struct dirent* entry;
477   while ((entry = readdir(tasks_dir.get())) != nullptr) {
478     char* end;
479     pid_t tid = strtoul(entry->d_name, &end, 10);
480     if (*end == '\0') {
481       threads->push_back(tid);
482     }
483   }
484 }
485 
TEST(libbacktrace,ptrace_threads)486 TEST(libbacktrace, ptrace_threads) {
487   pid_t pid;
488   if ((pid = fork()) == 0) {
489     for (size_t i = 0; i < NUM_PTRACE_THREADS; i++) {
490       pthread_attr_t attr;
491       pthread_attr_init(&attr);
492       pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
493 
494       pthread_t thread;
495       ASSERT_TRUE(pthread_create(&thread, &attr, PtraceThreadLevelRun, nullptr) == 0);
496     }
497     ASSERT_NE(test_level_one(1, 2, 3, 4, nullptr, nullptr), 0);
498     _exit(1);
499   }
500 
501   // Check to see that all of the threads are running before unwinding.
502   std::vector<pid_t> threads;
503   uint64_t start = NanoTime();
504   do {
505     usleep(US_PER_MSEC);
506     threads.clear();
507     GetThreads(pid, &threads);
508   } while ((threads.size() != NUM_PTRACE_THREADS + 1) &&
509       ((NanoTime() - start) <= 5 * NS_PER_SEC));
510   ASSERT_EQ(threads.size(), static_cast<size_t>(NUM_PTRACE_THREADS + 1));
511 
512   ASSERT_TRUE(ptrace(PTRACE_ATTACH, pid, 0, 0) == 0);
513   WaitForStop(pid);
514   for (std::vector<int>::const_iterator it = threads.begin(); it != threads.end(); ++it) {
515     // Skip the current forked process, we only care about the threads.
516     if (pid == *it) {
517       continue;
518     }
519     VerifyProcTest(pid, *it, ReadyLevelBacktrace, VerifyLevelDump, Backtrace::Create,
520                    BacktraceMap::Create);
521   }
522 
523   FinishRemoteProcess(pid);
524 }
525 
VerifyLevelThread(void *)526 void VerifyLevelThread(void*) {
527   std::unique_ptr<Backtrace> backtrace(Backtrace::Create(getpid(), gettid()));
528   ASSERT_TRUE(backtrace.get() != nullptr);
529   ASSERT_TRUE(backtrace->Unwind(0));
530   VERIFY_NO_ERROR(backtrace->GetError().error_code);
531 
532   VerifyLevelDump(backtrace.get());
533 }
534 
TEST(libbacktrace,thread_current_level)535 TEST(libbacktrace, thread_current_level) {
536   ASSERT_NE(test_level_one(1, 2, 3, 4, VerifyLevelThread, nullptr), 0);
537 }
538 
VerifyMaxThread(void *)539 static void VerifyMaxThread(void*) {
540   std::unique_ptr<Backtrace> backtrace(Backtrace::Create(getpid(), gettid()));
541   ASSERT_TRUE(backtrace.get() != nullptr);
542   ASSERT_TRUE(backtrace->Unwind(0));
543   ASSERT_EQ(BACKTRACE_UNWIND_ERROR_EXCEED_MAX_FRAMES_LIMIT, backtrace->GetError().error_code);
544 
545   VerifyMaxDump(backtrace.get());
546 }
547 
TEST(libbacktrace,thread_current_max)548 TEST(libbacktrace, thread_current_max) {
549   ASSERT_NE(test_recursive_call(MAX_BACKTRACE_FRAMES+10, VerifyMaxThread, nullptr), 0);
550 }
551 
ThreadLevelRun(void * data)552 static void* ThreadLevelRun(void* data) {
553   thread_t* thread = reinterpret_cast<thread_t*>(data);
554 
555   thread->tid = gettid();
556   EXPECT_NE(test_level_one(1, 2, 3, 4, ThreadSetState, data), 0);
557   return nullptr;
558 }
559 
TEST(libbacktrace,thread_level_trace)560 TEST(libbacktrace, thread_level_trace) {
561   pthread_attr_t attr;
562   pthread_attr_init(&attr);
563   pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
564 
565   thread_t thread_data = { 0, 0, 0, nullptr };
566   pthread_t thread;
567   ASSERT_TRUE(pthread_create(&thread, &attr, ThreadLevelRun, &thread_data) == 0);
568 
569   // Wait up to 2 seconds for the tid to be set.
570   ASSERT_TRUE(WaitForNonZero(&thread_data.state, 2));
571 
572   // Make sure that the thread signal used is not visible when compiled for
573   // the target.
574 #if !defined(__GLIBC__)
575   ASSERT_LT(THREAD_SIGNAL, SIGRTMIN);
576 #endif
577 
578   // Save the current signal action and make sure it is restored afterwards.
579   struct sigaction cur_action;
580   ASSERT_TRUE(sigaction(THREAD_SIGNAL, nullptr, &cur_action) == 0);
581 
582   std::unique_ptr<Backtrace> backtrace(Backtrace::Create(getpid(), thread_data.tid));
583   ASSERT_TRUE(backtrace.get() != nullptr);
584   ASSERT_TRUE(backtrace->Unwind(0));
585   VERIFY_NO_ERROR(backtrace->GetError().error_code);
586 
587   VerifyLevelDump(backtrace.get());
588 
589   // Tell the thread to exit its infinite loop.
590   android_atomic_acquire_store(0, &thread_data.state);
591 
592   // Verify that the old action was restored.
593   struct sigaction new_action;
594   ASSERT_TRUE(sigaction(THREAD_SIGNAL, nullptr, &new_action) == 0);
595   EXPECT_EQ(cur_action.sa_sigaction, new_action.sa_sigaction);
596   // The SA_RESTORER flag gets set behind our back, so a direct comparison
597   // doesn't work unless we mask the value off. Mips doesn't have this
598   // flag, so skip this on that platform.
599 #if defined(SA_RESTORER)
600   cur_action.sa_flags &= ~SA_RESTORER;
601   new_action.sa_flags &= ~SA_RESTORER;
602 #elif defined(__GLIBC__)
603   // Our host compiler doesn't appear to define this flag for some reason.
604   cur_action.sa_flags &= ~0x04000000;
605   new_action.sa_flags &= ~0x04000000;
606 #endif
607   EXPECT_EQ(cur_action.sa_flags, new_action.sa_flags);
608 }
609 
TEST(libbacktrace,thread_ignore_frames)610 TEST(libbacktrace, thread_ignore_frames) {
611   pthread_attr_t attr;
612   pthread_attr_init(&attr);
613   pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
614 
615   thread_t thread_data = { 0, 0, 0, nullptr };
616   pthread_t thread;
617   ASSERT_TRUE(pthread_create(&thread, &attr, ThreadLevelRun, &thread_data) == 0);
618 
619   // Wait up to 2 seconds for the tid to be set.
620   ASSERT_TRUE(WaitForNonZero(&thread_data.state, 2));
621 
622   std::unique_ptr<Backtrace> all(Backtrace::Create(getpid(), thread_data.tid));
623   ASSERT_TRUE(all.get() != nullptr);
624   ASSERT_TRUE(all->Unwind(0));
625   VERIFY_NO_ERROR(all->GetError().error_code);
626 
627   std::unique_ptr<Backtrace> ign1(Backtrace::Create(getpid(), thread_data.tid));
628   ASSERT_TRUE(ign1.get() != nullptr);
629   ASSERT_TRUE(ign1->Unwind(1));
630   VERIFY_NO_ERROR(ign1->GetError().error_code);
631 
632   std::unique_ptr<Backtrace> ign2(Backtrace::Create(getpid(), thread_data.tid));
633   ASSERT_TRUE(ign2.get() != nullptr);
634   ASSERT_TRUE(ign2->Unwind(2));
635   VERIFY_NO_ERROR(ign2->GetError().error_code);
636 
637   VerifyIgnoreFrames(all.get(), ign1.get(), ign2.get(), nullptr);
638 
639   // Tell the thread to exit its infinite loop.
640   android_atomic_acquire_store(0, &thread_data.state);
641 }
642 
ThreadMaxRun(void * data)643 static void* ThreadMaxRun(void* data) {
644   thread_t* thread = reinterpret_cast<thread_t*>(data);
645 
646   thread->tid = gettid();
647   EXPECT_NE(test_recursive_call(MAX_BACKTRACE_FRAMES+10, ThreadSetState, data), 0);
648   return nullptr;
649 }
650 
TEST(libbacktrace,thread_max_trace)651 TEST(libbacktrace, thread_max_trace) {
652   pthread_attr_t attr;
653   pthread_attr_init(&attr);
654   pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
655 
656   thread_t thread_data = { 0, 0, 0, nullptr };
657   pthread_t thread;
658   ASSERT_TRUE(pthread_create(&thread, &attr, ThreadMaxRun, &thread_data) == 0);
659 
660   // Wait for the tid to be set.
661   ASSERT_TRUE(WaitForNonZero(&thread_data.state, 2));
662 
663   std::unique_ptr<Backtrace> backtrace(Backtrace::Create(getpid(), thread_data.tid));
664   ASSERT_TRUE(backtrace.get() != nullptr);
665   ASSERT_TRUE(backtrace->Unwind(0));
666   ASSERT_EQ(BACKTRACE_UNWIND_ERROR_EXCEED_MAX_FRAMES_LIMIT, backtrace->GetError().error_code);
667 
668   VerifyMaxDump(backtrace.get());
669 
670   // Tell the thread to exit its infinite loop.
671   android_atomic_acquire_store(0, &thread_data.state);
672 }
673 
ThreadDump(void * data)674 static void* ThreadDump(void* data) {
675   dump_thread_t* dump = reinterpret_cast<dump_thread_t*>(data);
676   while (true) {
677     if (android_atomic_acquire_load(dump->now)) {
678       break;
679     }
680   }
681 
682   // The status of the actual unwind will be checked elsewhere.
683   dump->backtrace = Backtrace::Create(getpid(), dump->thread.tid, dump->map);
684   dump->backtrace->Unwind(0);
685 
686   android_atomic_acquire_store(1, &dump->done);
687 
688   return nullptr;
689 }
690 
MultipleThreadDumpTest(bool share_map)691 static void MultipleThreadDumpTest(bool share_map) {
692   // Dump NUM_THREADS simultaneously using the same map.
693   std::vector<thread_t> runners(NUM_THREADS);
694   std::vector<dump_thread_t> dumpers(NUM_THREADS);
695 
696   pthread_attr_t attr;
697   pthread_attr_init(&attr);
698   pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
699   for (size_t i = 0; i < NUM_THREADS; i++) {
700     // Launch the runners, they will spin in hard loops doing nothing.
701     runners[i].tid = 0;
702     runners[i].state = 0;
703     ASSERT_TRUE(pthread_create(&runners[i].threadId, &attr, ThreadMaxRun, &runners[i]) == 0);
704   }
705 
706   // Wait for tids to be set.
707   for (std::vector<thread_t>::iterator it = runners.begin(); it != runners.end(); ++it) {
708     ASSERT_TRUE(WaitForNonZero(&it->state, 30));
709   }
710 
711   // Start all of the dumpers at once, they will spin until they are signalled
712   // to begin their dump run.
713   std::unique_ptr<BacktraceMap> map;
714   if (share_map) {
715     map.reset(BacktraceMap::Create(getpid()));
716   }
717   int32_t dump_now = 0;
718   for (size_t i = 0; i < NUM_THREADS; i++) {
719     dumpers[i].thread.tid = runners[i].tid;
720     dumpers[i].thread.state = 0;
721     dumpers[i].done = 0;
722     dumpers[i].now = &dump_now;
723     dumpers[i].map = map.get();
724 
725     ASSERT_TRUE(pthread_create(&dumpers[i].thread.threadId, &attr, ThreadDump, &dumpers[i]) == 0);
726   }
727 
728   // Start all of the dumpers going at once.
729   android_atomic_acquire_store(1, &dump_now);
730 
731   for (size_t i = 0; i < NUM_THREADS; i++) {
732     ASSERT_TRUE(WaitForNonZero(&dumpers[i].done, 30));
733 
734     // Tell the runner thread to exit its infinite loop.
735     android_atomic_acquire_store(0, &runners[i].state);
736 
737     ASSERT_TRUE(dumpers[i].backtrace != nullptr);
738     VerifyMaxDump(dumpers[i].backtrace);
739 
740     delete dumpers[i].backtrace;
741     dumpers[i].backtrace = nullptr;
742   }
743 }
744 
TEST(libbacktrace,thread_multiple_dump)745 TEST(libbacktrace, thread_multiple_dump) {
746   MultipleThreadDumpTest(false);
747 }
748 
TEST(libbacktrace,thread_multiple_dump_same_map)749 TEST(libbacktrace, thread_multiple_dump_same_map) {
750   MultipleThreadDumpTest(true);
751 }
752 
753 // This test is for UnwindMaps that should share the same map cursor when
754 // multiple maps are created for the current process at the same time.
TEST(libbacktrace,simultaneous_maps)755 TEST(libbacktrace, simultaneous_maps) {
756   BacktraceMap* map1 = BacktraceMap::Create(getpid());
757   BacktraceMap* map2 = BacktraceMap::Create(getpid());
758   BacktraceMap* map3 = BacktraceMap::Create(getpid());
759 
760   Backtrace* back1 = Backtrace::Create(getpid(), BACKTRACE_CURRENT_THREAD, map1);
761   ASSERT_TRUE(back1 != nullptr);
762   EXPECT_TRUE(back1->Unwind(0));
763   VERIFY_NO_ERROR(back1->GetError().error_code);
764   delete back1;
765   delete map1;
766 
767   Backtrace* back2 = Backtrace::Create(getpid(), BACKTRACE_CURRENT_THREAD, map2);
768   ASSERT_TRUE(back2 != nullptr);
769   EXPECT_TRUE(back2->Unwind(0));
770   VERIFY_NO_ERROR(back2->GetError().error_code);
771   delete back2;
772   delete map2;
773 
774   Backtrace* back3 = Backtrace::Create(getpid(), BACKTRACE_CURRENT_THREAD, map3);
775   ASSERT_TRUE(back3 != nullptr);
776   EXPECT_TRUE(back3->Unwind(0));
777   VERIFY_NO_ERROR(back3->GetError().error_code);
778   delete back3;
779   delete map3;
780 }
781 
TEST(libbacktrace,fillin_erases)782 TEST(libbacktrace, fillin_erases) {
783   BacktraceMap* back_map = BacktraceMap::Create(getpid());
784 
785   backtrace_map_t map;
786 
787   map.start = 1;
788   map.end = 3;
789   map.flags = 1;
790   map.name = "Initialized";
791   back_map->FillIn(0, &map);
792   delete back_map;
793 
794   ASSERT_FALSE(BacktraceMap::IsValid(map));
795   ASSERT_EQ(static_cast<uint64_t>(0), map.start);
796   ASSERT_EQ(static_cast<uint64_t>(0), map.end);
797   ASSERT_EQ(0, map.flags);
798   ASSERT_EQ("", map.name);
799 }
800 
TEST(libbacktrace,format_test)801 TEST(libbacktrace, format_test) {
802   std::unique_ptr<Backtrace> backtrace(Backtrace::Create(getpid(), BACKTRACE_CURRENT_THREAD));
803   ASSERT_TRUE(backtrace.get() != nullptr);
804 
805   backtrace_frame_data_t frame;
806   frame.num = 1;
807   frame.pc = 2;
808   frame.rel_pc = 2;
809   frame.sp = 0;
810   frame.stack_size = 0;
811   frame.func_offset = 0;
812 
813   // Check no map set.
814   frame.num = 1;
815 #if defined(__LP64__)
816   EXPECT_EQ("#01 pc 0000000000000002  <unknown>",
817 #else
818   EXPECT_EQ("#01 pc 00000002  <unknown>",
819 #endif
820             backtrace->FormatFrameData(&frame));
821 
822   // Check map name empty, but exists.
823   frame.pc = 0xb0020;
824   frame.rel_pc = 0x20;
825   frame.map.start = 0xb0000;
826   frame.map.end = 0xbffff;
827   frame.map.load_bias = 0;
828 #if defined(__LP64__)
829   EXPECT_EQ("#01 pc 0000000000000020  <anonymous:00000000000b0000>",
830 #else
831   EXPECT_EQ("#01 pc 00000020  <anonymous:000b0000>",
832 #endif
833             backtrace->FormatFrameData(&frame));
834 
835   // Check map name begins with a [.
836   frame.pc = 0xc0020;
837   frame.map.start = 0xc0000;
838   frame.map.end = 0xcffff;
839   frame.map.load_bias = 0;
840   frame.map.name = "[anon:thread signal stack]";
841 #if defined(__LP64__)
842   EXPECT_EQ("#01 pc 0000000000000020  [anon:thread signal stack:00000000000c0000]",
843 #else
844   EXPECT_EQ("#01 pc 00000020  [anon:thread signal stack:000c0000]",
845 #endif
846             backtrace->FormatFrameData(&frame));
847 
848   // Check relative pc is set and map name is set.
849   frame.pc = 0x12345679;
850   frame.rel_pc = 0x12345678;
851   frame.map.name = "MapFake";
852   frame.map.start =  1;
853   frame.map.end =  1;
854 #if defined(__LP64__)
855   EXPECT_EQ("#01 pc 0000000012345678  MapFake",
856 #else
857   EXPECT_EQ("#01 pc 12345678  MapFake",
858 #endif
859             backtrace->FormatFrameData(&frame));
860 
861   // Check func_name is set, but no func offset.
862   frame.func_name = "ProcFake";
863 #if defined(__LP64__)
864   EXPECT_EQ("#01 pc 0000000012345678  MapFake (ProcFake)",
865 #else
866   EXPECT_EQ("#01 pc 12345678  MapFake (ProcFake)",
867 #endif
868             backtrace->FormatFrameData(&frame));
869 
870   // Check func_name is set, and func offset is non-zero.
871   frame.func_offset = 645;
872 #if defined(__LP64__)
873   EXPECT_EQ("#01 pc 0000000012345678  MapFake (ProcFake+645)",
874 #else
875   EXPECT_EQ("#01 pc 12345678  MapFake (ProcFake+645)",
876 #endif
877             backtrace->FormatFrameData(&frame));
878 
879   // Check func_name is set, func offset is non-zero, and load_bias is non-zero.
880   frame.rel_pc = 0x123456dc;
881   frame.func_offset = 645;
882   frame.map.load_bias = 100;
883 #if defined(__LP64__)
884   EXPECT_EQ("#01 pc 00000000123456dc  MapFake (ProcFake+645)",
885 #else
886   EXPECT_EQ("#01 pc 123456dc  MapFake (ProcFake+645)",
887 #endif
888             backtrace->FormatFrameData(&frame));
889 
890   // Check a non-zero map offset.
891   frame.map.offset = 0x1000;
892 #if defined(__LP64__)
893   EXPECT_EQ("#01 pc 00000000123456dc  MapFake (offset 0x1000) (ProcFake+645)",
894 #else
895   EXPECT_EQ("#01 pc 123456dc  MapFake (offset 0x1000) (ProcFake+645)",
896 #endif
897             backtrace->FormatFrameData(&frame));
898 }
899 
900 struct map_test_t {
901   uint64_t start;
902   uint64_t end;
903 };
904 
map_sort(map_test_t i,map_test_t j)905 static bool map_sort(map_test_t i, map_test_t j) { return i.start < j.start; }
906 
GetTestMapsAsString(const std::vector<map_test_t> & maps)907 static std::string GetTestMapsAsString(const std::vector<map_test_t>& maps) {
908   if (maps.size() == 0) {
909     return "No test map entries\n";
910   }
911   std::string map_txt;
912   for (auto map : maps) {
913     map_txt += android::base::StringPrintf("%" PRIx64 "-%" PRIx64 "\n", map.start, map.end);
914   }
915   return map_txt;
916 }
917 
GetMapsAsString(BacktraceMap * maps)918 static std::string GetMapsAsString(BacktraceMap* maps) {
919   if (maps->size() == 0) {
920     return "No map entries\n";
921   }
922   std::string map_txt;
923   for (const backtrace_map_t* map : *maps) {
924     map_txt += android::base::StringPrintf(
925         "%" PRIx64 "-%" PRIx64 " flags: 0x%x offset: 0x%" PRIx64 " load_bias: 0x%" PRIx64,
926         map->start, map->end, map->flags, map->offset, map->load_bias);
927     if (!map->name.empty()) {
928       map_txt += ' ' + map->name;
929     }
930     map_txt += '\n';
931   }
932   return map_txt;
933 }
934 
VerifyMap(pid_t pid)935 static void VerifyMap(pid_t pid) {
936   char buffer[4096];
937   snprintf(buffer, sizeof(buffer), "/proc/%d/maps", pid);
938 
939   FILE* map_file = fopen(buffer, "r");
940   ASSERT_TRUE(map_file != nullptr);
941   std::vector<map_test_t> test_maps;
942   while (fgets(buffer, sizeof(buffer), map_file)) {
943     map_test_t map;
944     ASSERT_EQ(2, sscanf(buffer, "%" SCNx64 "-%" SCNx64 " ", &map.start, &map.end));
945     test_maps.push_back(map);
946   }
947   fclose(map_file);
948   std::sort(test_maps.begin(), test_maps.end(), map_sort);
949 
950   std::unique_ptr<BacktraceMap> map(BacktraceMap::Create(pid));
951 
952   // Basic test that verifies that the map is in the expected order.
953   auto test_it = test_maps.begin();
954   for (auto it = map->begin(); it != map->end(); ++it) {
955     ASSERT_TRUE(test_it != test_maps.end()) << "Mismatch in number of maps, expected test maps:\n"
956                                             << GetTestMapsAsString(test_maps) << "Actual maps:\n"
957                                             << GetMapsAsString(map.get());
958     ASSERT_EQ(test_it->start, (*it)->start) << "Mismatch in map data, expected test maps:\n"
959                                             << GetTestMapsAsString(test_maps) << "Actual maps:\n"
960                                             << GetMapsAsString(map.get());
961     ASSERT_EQ(test_it->end, (*it)->end) << "Mismatch maps in map data, expected test maps:\n"
962                                         << GetTestMapsAsString(test_maps) << "Actual maps:\n"
963                                         << GetMapsAsString(map.get());
964     // Make sure the load bias get set to a value.
965     ASSERT_NE(static_cast<uint64_t>(-1), (*it)->load_bias) << "Found uninitialized load_bias\n"
966                                                            << GetMapsAsString(map.get());
967     ++test_it;
968   }
969   ASSERT_TRUE(test_it == test_maps.end());
970 }
971 
TEST(libbacktrace,verify_map_remote)972 TEST(libbacktrace, verify_map_remote) {
973   pid_t pid;
974   CreateRemoteProcess(&pid);
975 
976   // The maps should match exactly since the forked process has been paused.
977   VerifyMap(pid);
978 
979   FinishRemoteProcess(pid);
980 }
981 
InitMemory(uint8_t * memory,size_t bytes)982 static void InitMemory(uint8_t* memory, size_t bytes) {
983   for (size_t i = 0; i < bytes; i++) {
984     memory[i] = i;
985     if (memory[i] == '\0') {
986       // Don't use '\0' in our data so we can verify that an overread doesn't
987       // occur by using a '\0' as the character after the read data.
988       memory[i] = 23;
989     }
990   }
991 }
992 
ThreadReadTest(void * data)993 static void* ThreadReadTest(void* data) {
994   thread_t* thread_data = reinterpret_cast<thread_t*>(data);
995 
996   thread_data->tid = gettid();
997 
998   // Create two map pages.
999   // Mark the second page as not-readable.
1000   size_t pagesize = static_cast<size_t>(sysconf(_SC_PAGE_SIZE));
1001   uint8_t* memory;
1002   if (posix_memalign(reinterpret_cast<void**>(&memory), pagesize, 2 * pagesize) != 0) {
1003     return reinterpret_cast<void*>(-1);
1004   }
1005 
1006   if (mprotect(&memory[pagesize], pagesize, PROT_NONE) != 0) {
1007     return reinterpret_cast<void*>(-1);
1008   }
1009 
1010   // Set up a simple pattern in memory.
1011   InitMemory(memory, pagesize);
1012 
1013   thread_data->data = memory;
1014 
1015   // Tell the caller it's okay to start reading memory.
1016   android_atomic_acquire_store(1, &thread_data->state);
1017 
1018   // Loop waiting for the caller to finish reading the memory.
1019   while (thread_data->state) {
1020   }
1021 
1022   // Re-enable read-write on the page so that we don't crash if we try
1023   // and access data on this page when freeing the memory.
1024   if (mprotect(&memory[pagesize], pagesize, PROT_READ | PROT_WRITE) != 0) {
1025     return reinterpret_cast<void*>(-1);
1026   }
1027   free(memory);
1028 
1029   android_atomic_acquire_store(1, &thread_data->state);
1030 
1031   return nullptr;
1032 }
1033 
RunReadTest(Backtrace * backtrace,uint64_t read_addr)1034 static void RunReadTest(Backtrace* backtrace, uint64_t read_addr) {
1035   size_t pagesize = static_cast<size_t>(sysconf(_SC_PAGE_SIZE));
1036 
1037   // Create a page of data to use to do quick compares.
1038   uint8_t* expected = new uint8_t[pagesize];
1039   InitMemory(expected, pagesize);
1040 
1041   uint8_t* data = new uint8_t[2 * pagesize];
1042   // Verify that we can only read one page worth of data.
1043   size_t bytes_read = backtrace->Read(read_addr, data, 2 * pagesize);
1044   ASSERT_EQ(pagesize, bytes_read);
1045   ASSERT_TRUE(memcmp(data, expected, pagesize) == 0);
1046 
1047   // Verify unaligned reads.
1048   for (size_t i = 1; i < sizeof(word_t); i++) {
1049     bytes_read = backtrace->Read(read_addr + i, data, 2 * sizeof(word_t));
1050     ASSERT_EQ(2 * sizeof(word_t), bytes_read);
1051     ASSERT_TRUE(memcmp(data, &expected[i], 2 * sizeof(word_t)) == 0)
1052         << "Offset at " << i << " failed";
1053   }
1054 
1055   // Verify small unaligned reads.
1056   for (size_t i = 1; i < sizeof(word_t); i++) {
1057     for (size_t j = 1; j < sizeof(word_t); j++) {
1058       // Set one byte past what we expect to read, to guarantee we don't overread.
1059       data[j] = '\0';
1060       bytes_read = backtrace->Read(read_addr + i, data, j);
1061       ASSERT_EQ(j, bytes_read);
1062       ASSERT_TRUE(memcmp(data, &expected[i], j) == 0)
1063           << "Offset at " << i << " length " << j << " miscompared";
1064       ASSERT_EQ('\0', data[j])
1065           << "Offset at " << i << " length " << j << " wrote too much data";
1066     }
1067   }
1068   delete[] data;
1069   delete[] expected;
1070 }
1071 
TEST(libbacktrace,thread_read)1072 TEST(libbacktrace, thread_read) {
1073   pthread_attr_t attr;
1074   pthread_attr_init(&attr);
1075   pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
1076   pthread_t thread;
1077   thread_t thread_data = { 0, 0, 0, nullptr };
1078   ASSERT_TRUE(pthread_create(&thread, &attr, ThreadReadTest, &thread_data) == 0);
1079 
1080   ASSERT_TRUE(WaitForNonZero(&thread_data.state, 10));
1081 
1082   std::unique_ptr<Backtrace> backtrace(Backtrace::Create(getpid(), thread_data.tid));
1083   ASSERT_TRUE(backtrace.get() != nullptr);
1084 
1085   RunReadTest(backtrace.get(), reinterpret_cast<uint64_t>(thread_data.data));
1086 
1087   android_atomic_acquire_store(0, &thread_data.state);
1088 
1089   ASSERT_TRUE(WaitForNonZero(&thread_data.state, 10));
1090 }
1091 
1092 // The code requires these variables are the same size.
1093 volatile uint64_t g_ready = 0;
1094 volatile uint64_t g_addr = 0;
1095 static_assert(sizeof(g_ready) == sizeof(g_addr), "g_ready/g_addr must be same size");
1096 
ForkedReadTest()1097 static void ForkedReadTest() {
1098   // Create two map pages.
1099   size_t pagesize = static_cast<size_t>(sysconf(_SC_PAGE_SIZE));
1100   uint8_t* memory;
1101   if (posix_memalign(reinterpret_cast<void**>(&memory), pagesize, 2 * pagesize) != 0) {
1102     perror("Failed to allocate memory\n");
1103     exit(1);
1104   }
1105 
1106   // Mark the second page as not-readable.
1107   if (mprotect(&memory[pagesize], pagesize, PROT_NONE) != 0) {
1108     perror("Failed to mprotect memory\n");
1109     exit(1);
1110   }
1111 
1112   // Set up a simple pattern in memory.
1113   InitMemory(memory, pagesize);
1114 
1115   g_addr = reinterpret_cast<uint64_t>(memory);
1116   g_ready = 1;
1117 
1118   while (1) {
1119     usleep(US_PER_MSEC);
1120   }
1121 }
1122 
TEST(libbacktrace,process_read)1123 TEST(libbacktrace, process_read) {
1124   g_ready = 0;
1125   pid_t pid;
1126   if ((pid = fork()) == 0) {
1127     ForkedReadTest();
1128     exit(0);
1129   }
1130   ASSERT_NE(-1, pid);
1131 
1132   bool test_executed = false;
1133   uint64_t start = NanoTime();
1134   while (1) {
1135     if (ptrace(PTRACE_ATTACH, pid, 0, 0) == 0) {
1136       WaitForStop(pid);
1137 
1138       std::unique_ptr<Backtrace> backtrace(Backtrace::Create(pid, pid));
1139       ASSERT_TRUE(backtrace.get() != nullptr);
1140 
1141       uint64_t read_addr;
1142       size_t bytes_read = backtrace->Read(reinterpret_cast<uint64_t>(&g_ready),
1143                                           reinterpret_cast<uint8_t*>(&read_addr), sizeof(g_ready));
1144       ASSERT_EQ(sizeof(g_ready), bytes_read);
1145       if (read_addr) {
1146         // The forked process is ready to be read.
1147         bytes_read = backtrace->Read(reinterpret_cast<uint64_t>(&g_addr),
1148                                      reinterpret_cast<uint8_t*>(&read_addr), sizeof(g_addr));
1149         ASSERT_EQ(sizeof(g_addr), bytes_read);
1150 
1151         RunReadTest(backtrace.get(), read_addr);
1152 
1153         test_executed = true;
1154         break;
1155       }
1156       ASSERT_TRUE(ptrace(PTRACE_DETACH, pid, 0, 0) == 0);
1157     }
1158     if ((NanoTime() - start) > 5 * NS_PER_SEC) {
1159       break;
1160     }
1161     usleep(US_PER_MSEC);
1162   }
1163   kill(pid, SIGKILL);
1164   ASSERT_EQ(waitpid(pid, nullptr, 0), pid);
1165 
1166   ASSERT_TRUE(test_executed);
1167 }
1168 
VerifyFunctionsFound(const std::vector<std::string> & found_functions)1169 static void VerifyFunctionsFound(const std::vector<std::string>& found_functions) {
1170   // We expect to find these functions in libbacktrace_test. If we don't
1171   // find them, that's a bug in the memory read handling code in libunwind.
1172   std::list<std::string> expected_functions;
1173   expected_functions.push_back("test_recursive_call");
1174   expected_functions.push_back("test_level_one");
1175   expected_functions.push_back("test_level_two");
1176   expected_functions.push_back("test_level_three");
1177   expected_functions.push_back("test_level_four");
1178   for (const auto& found_function : found_functions) {
1179     for (const auto& expected_function : expected_functions) {
1180       if (found_function == expected_function) {
1181         expected_functions.remove(found_function);
1182         break;
1183       }
1184     }
1185   }
1186   ASSERT_TRUE(expected_functions.empty()) << "Not all functions found in shared library.";
1187 }
1188 
CopySharedLibrary()1189 static const char* CopySharedLibrary() {
1190 #if defined(__LP64__)
1191   const char* lib_name = "lib64";
1192 #else
1193   const char* lib_name = "lib";
1194 #endif
1195 
1196 #if defined(__BIONIC__)
1197   const char* tmp_so_name = "/data/local/tmp/libbacktrace_test.so";
1198   std::string cp_cmd = android::base::StringPrintf("cp /system/%s/libbacktrace_test.so %s",
1199                                                    lib_name, tmp_so_name);
1200 #else
1201   const char* tmp_so_name = "/tmp/libbacktrace_test.so";
1202   if (getenv("ANDROID_HOST_OUT") == NULL) {
1203     fprintf(stderr, "ANDROID_HOST_OUT not set, make sure you run lunch.");
1204     return nullptr;
1205   }
1206   std::string cp_cmd = android::base::StringPrintf("cp %s/%s/libbacktrace_test.so %s",
1207                                                    getenv("ANDROID_HOST_OUT"), lib_name,
1208                                                    tmp_so_name);
1209 #endif
1210 
1211   // Copy the shared so to a tempory directory.
1212   system(cp_cmd.c_str());
1213 
1214   return tmp_so_name;
1215 }
1216 
TEST(libbacktrace,check_unreadable_elf_local)1217 TEST(libbacktrace, check_unreadable_elf_local) {
1218   const char* tmp_so_name = CopySharedLibrary();
1219   ASSERT_TRUE(tmp_so_name != nullptr);
1220 
1221   struct stat buf;
1222   ASSERT_TRUE(stat(tmp_so_name, &buf) != -1);
1223   uint64_t map_size = buf.st_size;
1224 
1225   int fd = open(tmp_so_name, O_RDONLY);
1226   ASSERT_TRUE(fd != -1);
1227 
1228   void* map = mmap(nullptr, map_size, PROT_READ | PROT_EXEC, MAP_PRIVATE, fd, 0);
1229   ASSERT_TRUE(map != MAP_FAILED);
1230   close(fd);
1231   ASSERT_TRUE(unlink(tmp_so_name) != -1);
1232 
1233   std::vector<std::string> found_functions;
1234   std::unique_ptr<Backtrace> backtrace(Backtrace::Create(BACKTRACE_CURRENT_PROCESS,
1235                                                          BACKTRACE_CURRENT_THREAD));
1236   ASSERT_TRUE(backtrace.get() != nullptr);
1237 
1238   // Needed before GetFunctionName will work.
1239   backtrace->Unwind(0);
1240 
1241   // Loop through the entire map, and get every function we can find.
1242   map_size += reinterpret_cast<uint64_t>(map);
1243   std::string last_func;
1244   for (uint64_t read_addr = reinterpret_cast<uint64_t>(map); read_addr < map_size; read_addr += 4) {
1245     uint64_t offset;
1246     std::string func_name = backtrace->GetFunctionName(read_addr, &offset);
1247     if (!func_name.empty() && last_func != func_name) {
1248       found_functions.push_back(func_name);
1249     }
1250     last_func = func_name;
1251   }
1252 
1253   ASSERT_TRUE(munmap(map, map_size - reinterpret_cast<uint64_t>(map)) == 0);
1254 
1255   VerifyFunctionsFound(found_functions);
1256 }
1257 
TEST(libbacktrace,check_unreadable_elf_remote)1258 TEST(libbacktrace, check_unreadable_elf_remote) {
1259   const char* tmp_so_name = CopySharedLibrary();
1260   ASSERT_TRUE(tmp_so_name != nullptr);
1261 
1262   g_ready = 0;
1263 
1264   struct stat buf;
1265   ASSERT_TRUE(stat(tmp_so_name, &buf) != -1);
1266   uint64_t map_size = buf.st_size;
1267 
1268   pid_t pid;
1269   if ((pid = fork()) == 0) {
1270     int fd = open(tmp_so_name, O_RDONLY);
1271     if (fd == -1) {
1272       fprintf(stderr, "Failed to open file %s: %s\n", tmp_so_name, strerror(errno));
1273       unlink(tmp_so_name);
1274       exit(0);
1275     }
1276 
1277     void* map = mmap(nullptr, map_size, PROT_READ | PROT_EXEC, MAP_PRIVATE, fd, 0);
1278     if (map == MAP_FAILED) {
1279       fprintf(stderr, "Failed to map in memory: %s\n", strerror(errno));
1280       unlink(tmp_so_name);
1281       exit(0);
1282     }
1283     close(fd);
1284     if (unlink(tmp_so_name) == -1) {
1285       fprintf(stderr, "Failed to unlink: %s\n", strerror(errno));
1286       exit(0);
1287     }
1288 
1289     g_addr = reinterpret_cast<uint64_t>(map);
1290     g_ready = 1;
1291     while (true) {
1292       usleep(US_PER_MSEC);
1293     }
1294     exit(0);
1295   }
1296   ASSERT_TRUE(pid > 0);
1297 
1298   std::vector<std::string> found_functions;
1299   uint64_t start = NanoTime();
1300   while (true) {
1301     ASSERT_TRUE(ptrace(PTRACE_ATTACH, pid, 0, 0) == 0);
1302 
1303     // Wait for the process to get to a stopping point.
1304     WaitForStop(pid);
1305 
1306     std::unique_ptr<Backtrace> backtrace(Backtrace::Create(pid, BACKTRACE_CURRENT_THREAD));
1307     ASSERT_TRUE(backtrace.get() != nullptr);
1308 
1309     uint64_t read_addr;
1310     ASSERT_EQ(sizeof(g_ready),
1311               backtrace->Read(reinterpret_cast<uint64_t>(&g_ready),
1312                               reinterpret_cast<uint8_t*>(&read_addr), sizeof(g_ready)));
1313     if (read_addr) {
1314       ASSERT_EQ(sizeof(g_addr),
1315                 backtrace->Read(reinterpret_cast<uint64_t>(&g_addr),
1316                                 reinterpret_cast<uint8_t*>(&read_addr), sizeof(uint64_t)));
1317 
1318       // Needed before GetFunctionName will work.
1319       backtrace->Unwind(0);
1320 
1321       // Loop through the entire map, and get every function we can find.
1322       map_size += read_addr;
1323       std::string last_func;
1324       for (; read_addr < map_size; read_addr += 4) {
1325         uint64_t offset;
1326         std::string func_name = backtrace->GetFunctionName(read_addr, &offset);
1327         if (!func_name.empty() && last_func != func_name) {
1328           found_functions.push_back(func_name);
1329         }
1330         last_func = func_name;
1331       }
1332       break;
1333     }
1334     ASSERT_TRUE(ptrace(PTRACE_DETACH, pid, 0, 0) == 0);
1335 
1336     if ((NanoTime() - start) > 5 * NS_PER_SEC) {
1337       break;
1338     }
1339     usleep(US_PER_MSEC);
1340   }
1341 
1342   kill(pid, SIGKILL);
1343   ASSERT_EQ(waitpid(pid, nullptr, 0), pid);
1344 
1345   VerifyFunctionsFound(found_functions);
1346 }
1347 
FindFuncFrameInBacktrace(Backtrace * backtrace,uint64_t test_func,size_t * frame_num)1348 static bool FindFuncFrameInBacktrace(Backtrace* backtrace, uint64_t test_func, size_t* frame_num) {
1349   backtrace_map_t map;
1350   backtrace->FillInMap(test_func, &map);
1351   if (!BacktraceMap::IsValid(map)) {
1352     return false;
1353   }
1354 
1355   // Loop through the frames, and find the one that is in the map.
1356   *frame_num = 0;
1357   for (Backtrace::const_iterator it = backtrace->begin(); it != backtrace->end(); ++it) {
1358     if (BacktraceMap::IsValid(it->map) && map.start == it->map.start &&
1359         it->pc >= test_func) {
1360       *frame_num = it->num;
1361       return true;
1362     }
1363   }
1364   return false;
1365 }
1366 
VerifyUnreadableElfFrame(Backtrace * backtrace,uint64_t test_func,size_t frame_num)1367 static void VerifyUnreadableElfFrame(Backtrace* backtrace, uint64_t test_func, size_t frame_num) {
1368   ASSERT_LT(backtrace->NumFrames(), static_cast<size_t>(MAX_BACKTRACE_FRAMES))
1369     << DumpFrames(backtrace);
1370 
1371   ASSERT_TRUE(frame_num != 0) << DumpFrames(backtrace);
1372   // Make sure that there is at least one more frame above the test func call.
1373   ASSERT_LT(frame_num, backtrace->NumFrames()) << DumpFrames(backtrace);
1374 
1375   uint64_t diff = backtrace->GetFrame(frame_num)->pc - test_func;
1376   ASSERT_LT(diff, 200U) << DumpFrames(backtrace);
1377 }
1378 
VerifyUnreadableElfBacktrace(void * func)1379 static void VerifyUnreadableElfBacktrace(void* func) {
1380   std::unique_ptr<Backtrace> backtrace(Backtrace::Create(BACKTRACE_CURRENT_PROCESS,
1381                                                          BACKTRACE_CURRENT_THREAD));
1382   ASSERT_TRUE(backtrace.get() != nullptr);
1383   ASSERT_TRUE(backtrace->Unwind(0));
1384   VERIFY_NO_ERROR(backtrace->GetError().error_code);
1385 
1386   size_t frame_num;
1387   uint64_t test_func = reinterpret_cast<uint64_t>(func);
1388   ASSERT_TRUE(FindFuncFrameInBacktrace(backtrace.get(), test_func, &frame_num))
1389       << DumpFrames(backtrace.get());
1390 
1391   VerifyUnreadableElfFrame(backtrace.get(), test_func, frame_num);
1392 }
1393 
1394 typedef int (*test_func_t)(int, int, int, int, void (*)(void*), void*);
1395 
TEST(libbacktrace,unwind_through_unreadable_elf_local)1396 TEST(libbacktrace, unwind_through_unreadable_elf_local) {
1397   const char* tmp_so_name = CopySharedLibrary();
1398   ASSERT_TRUE(tmp_so_name != nullptr);
1399   void* lib_handle = dlopen(tmp_so_name, RTLD_NOW);
1400   ASSERT_TRUE(lib_handle != nullptr);
1401   ASSERT_TRUE(unlink(tmp_so_name) != -1);
1402 
1403   test_func_t test_func;
1404   test_func = reinterpret_cast<test_func_t>(dlsym(lib_handle, "test_level_one"));
1405   ASSERT_TRUE(test_func != nullptr);
1406 
1407   ASSERT_NE(test_func(1, 2, 3, 4, VerifyUnreadableElfBacktrace, reinterpret_cast<void*>(test_func)),
1408             0);
1409 
1410   ASSERT_TRUE(dlclose(lib_handle) == 0);
1411 }
1412 
TEST(libbacktrace,unwind_through_unreadable_elf_remote)1413 TEST(libbacktrace, unwind_through_unreadable_elf_remote) {
1414   const char* tmp_so_name = CopySharedLibrary();
1415   ASSERT_TRUE(tmp_so_name != nullptr);
1416   void* lib_handle = dlopen(tmp_so_name, RTLD_NOW);
1417   ASSERT_TRUE(lib_handle != nullptr);
1418   ASSERT_TRUE(unlink(tmp_so_name) != -1);
1419 
1420   test_func_t test_func;
1421   test_func = reinterpret_cast<test_func_t>(dlsym(lib_handle, "test_level_one"));
1422   ASSERT_TRUE(test_func != nullptr);
1423 
1424   pid_t pid;
1425   if ((pid = fork()) == 0) {
1426     test_func(1, 2, 3, 4, 0, 0);
1427     exit(0);
1428   }
1429   ASSERT_TRUE(pid > 0);
1430   ASSERT_TRUE(dlclose(lib_handle) == 0);
1431 
1432   uint64_t start = NanoTime();
1433   bool done = false;
1434   while (!done) {
1435     ASSERT_TRUE(ptrace(PTRACE_ATTACH, pid, 0, 0) == 0);
1436 
1437     // Wait for the process to get to a stopping point.
1438     WaitForStop(pid);
1439 
1440     std::unique_ptr<Backtrace> backtrace(Backtrace::Create(pid, BACKTRACE_CURRENT_THREAD));
1441     ASSERT_TRUE(backtrace.get() != nullptr);
1442     ASSERT_TRUE(backtrace->Unwind(0));
1443     VERIFY_NO_ERROR(backtrace->GetError().error_code);
1444 
1445     size_t frame_num;
1446     if (FindFuncFrameInBacktrace(backtrace.get(), reinterpret_cast<uint64_t>(test_func),
1447                                  &frame_num)) {
1448       VerifyUnreadableElfFrame(backtrace.get(), reinterpret_cast<uint64_t>(test_func), frame_num);
1449       done = true;
1450     }
1451 
1452     ASSERT_TRUE(ptrace(PTRACE_DETACH, pid, 0, 0) == 0);
1453 
1454     if ((NanoTime() - start) > 5 * NS_PER_SEC) {
1455       break;
1456     }
1457     usleep(US_PER_MSEC);
1458   }
1459 
1460   kill(pid, SIGKILL);
1461   ASSERT_EQ(waitpid(pid, nullptr, 0), pid);
1462 
1463   ASSERT_TRUE(done) << "Test function never found in unwind.";
1464 }
1465 
TEST(libbacktrace,unwind_thread_doesnt_exist)1466 TEST(libbacktrace, unwind_thread_doesnt_exist) {
1467   std::unique_ptr<Backtrace> backtrace(
1468       Backtrace::Create(BACKTRACE_CURRENT_PROCESS, 99999999));
1469   ASSERT_TRUE(backtrace.get() != nullptr);
1470   ASSERT_FALSE(backtrace->Unwind(0));
1471   ASSERT_EQ(BACKTRACE_UNWIND_ERROR_THREAD_DOESNT_EXIST, backtrace->GetError().error_code);
1472 }
1473 
TEST(libbacktrace,local_get_function_name_before_unwind)1474 TEST(libbacktrace, local_get_function_name_before_unwind) {
1475   std::unique_ptr<Backtrace> backtrace(
1476       Backtrace::Create(BACKTRACE_CURRENT_PROCESS, BACKTRACE_CURRENT_THREAD));
1477   ASSERT_TRUE(backtrace.get() != nullptr);
1478 
1479   // Verify that trying to get a function name before doing an unwind works.
1480   uint64_t cur_func_offset = reinterpret_cast<uint64_t>(&test_level_one) + 1;
1481   uint64_t offset;
1482   ASSERT_NE(std::string(""), backtrace->GetFunctionName(cur_func_offset, &offset));
1483 }
1484 
TEST(libbacktrace,remote_get_function_name_before_unwind)1485 TEST(libbacktrace, remote_get_function_name_before_unwind) {
1486   pid_t pid;
1487   CreateRemoteProcess(&pid);
1488 
1489   // Now create an unwind object.
1490   std::unique_ptr<Backtrace> backtrace(Backtrace::Create(pid, pid));
1491 
1492   // Verify that trying to get a function name before doing an unwind works.
1493   uint64_t cur_func_offset = reinterpret_cast<uint64_t>(&test_level_one) + 1;
1494   uint64_t offset;
1495   ASSERT_NE(std::string(""), backtrace->GetFunctionName(cur_func_offset, &offset));
1496 
1497   FinishRemoteProcess(pid);
1498 }
1499 
SetUcontextSp(uint64_t sp,ucontext_t * ucontext)1500 static void SetUcontextSp(uint64_t sp, ucontext_t* ucontext) {
1501 #if defined(__arm__)
1502   ucontext->uc_mcontext.arm_sp = sp;
1503 #elif defined(__aarch64__)
1504   ucontext->uc_mcontext.sp = sp;
1505 #elif defined(__i386__)
1506   ucontext->uc_mcontext.gregs[REG_ESP] = sp;
1507 #elif defined(__x86_64__)
1508   ucontext->uc_mcontext.gregs[REG_RSP] = sp;
1509 #else
1510   UNUSED(sp);
1511   UNUSED(ucontext);
1512   ASSERT_TRUE(false) << "Unsupported architecture";
1513 #endif
1514 }
1515 
SetUcontextPc(uint64_t pc,ucontext_t * ucontext)1516 static void SetUcontextPc(uint64_t pc, ucontext_t* ucontext) {
1517 #if defined(__arm__)
1518   ucontext->uc_mcontext.arm_pc = pc;
1519 #elif defined(__aarch64__)
1520   ucontext->uc_mcontext.pc = pc;
1521 #elif defined(__i386__)
1522   ucontext->uc_mcontext.gregs[REG_EIP] = pc;
1523 #elif defined(__x86_64__)
1524   ucontext->uc_mcontext.gregs[REG_RIP] = pc;
1525 #else
1526   UNUSED(pc);
1527   UNUSED(ucontext);
1528   ASSERT_TRUE(false) << "Unsupported architecture";
1529 #endif
1530 }
1531 
SetUcontextLr(uint64_t lr,ucontext_t * ucontext)1532 static void SetUcontextLr(uint64_t lr, ucontext_t* ucontext) {
1533 #if defined(__arm__)
1534   ucontext->uc_mcontext.arm_lr = lr;
1535 #elif defined(__aarch64__)
1536   ucontext->uc_mcontext.regs[30] = lr;
1537 #elif defined(__i386__)
1538   // The lr is on the stack.
1539   ASSERT_TRUE(lr != 0);
1540   ASSERT_TRUE(ucontext != nullptr);
1541 #elif defined(__x86_64__)
1542   // The lr is on the stack.
1543   ASSERT_TRUE(lr != 0);
1544   ASSERT_TRUE(ucontext != nullptr);
1545 #else
1546   UNUSED(lr);
1547   UNUSED(ucontext);
1548   ASSERT_TRUE(false) << "Unsupported architecture";
1549 #endif
1550 }
1551 
1552 static constexpr size_t DEVICE_MAP_SIZE = 1024;
1553 
SetupDeviceMap(void ** device_map)1554 static void SetupDeviceMap(void** device_map) {
1555   // Make sure that anything in a device map will result in fails
1556   // to read.
1557   android::base::unique_fd device_fd(open("/dev/zero", O_RDONLY | O_CLOEXEC));
1558 
1559   *device_map = mmap(nullptr, 1024, PROT_READ, MAP_PRIVATE, device_fd, 0);
1560   ASSERT_TRUE(*device_map != MAP_FAILED);
1561 
1562   // Make sure the map is readable.
1563   ASSERT_EQ(0, reinterpret_cast<int*>(*device_map)[0]);
1564 }
1565 
UnwindFromDevice(Backtrace * backtrace,void * device_map)1566 static void UnwindFromDevice(Backtrace* backtrace, void* device_map) {
1567   uint64_t device_map_uint = reinterpret_cast<uint64_t>(device_map);
1568 
1569   backtrace_map_t map;
1570   backtrace->FillInMap(device_map_uint, &map);
1571   // Verify the flag is set.
1572   ASSERT_EQ(PROT_DEVICE_MAP, map.flags & PROT_DEVICE_MAP);
1573 
1574   // Quick sanity checks.
1575   uint64_t offset;
1576   ASSERT_EQ(std::string(""), backtrace->GetFunctionName(device_map_uint, &offset));
1577   ASSERT_EQ(std::string(""), backtrace->GetFunctionName(device_map_uint, &offset, &map));
1578   ASSERT_EQ(std::string(""), backtrace->GetFunctionName(0, &offset));
1579 
1580   uint64_t cur_func_offset = reinterpret_cast<uint64_t>(&test_level_one) + 1;
1581   // Now verify the device map flag actually causes the function name to be empty.
1582   backtrace->FillInMap(cur_func_offset, &map);
1583   ASSERT_TRUE((map.flags & PROT_DEVICE_MAP) == 0);
1584   ASSERT_NE(std::string(""), backtrace->GetFunctionName(cur_func_offset, &offset, &map));
1585   map.flags |= PROT_DEVICE_MAP;
1586   ASSERT_EQ(std::string(""), backtrace->GetFunctionName(cur_func_offset, &offset, &map));
1587 
1588   ucontext_t ucontext;
1589 
1590   // Create a context that has the pc in the device map, but the sp
1591   // in a non-device map.
1592   memset(&ucontext, 0, sizeof(ucontext));
1593   SetUcontextSp(reinterpret_cast<uint64_t>(&ucontext), &ucontext);
1594   SetUcontextPc(device_map_uint, &ucontext);
1595   SetUcontextLr(cur_func_offset, &ucontext);
1596 
1597   ASSERT_TRUE(backtrace->Unwind(0, &ucontext));
1598 
1599   // The buffer should only be a single element.
1600   ASSERT_EQ(1U, backtrace->NumFrames());
1601   const backtrace_frame_data_t* frame = backtrace->GetFrame(0);
1602   ASSERT_EQ(device_map_uint, frame->pc);
1603   ASSERT_EQ(reinterpret_cast<uint64_t>(&ucontext), frame->sp);
1604 
1605   // Check what happens when skipping the first frame.
1606   ASSERT_TRUE(backtrace->Unwind(1, &ucontext));
1607   ASSERT_EQ(0U, backtrace->NumFrames());
1608 
1609   // Create a context that has the sp in the device map, but the pc
1610   // in a non-device map.
1611   memset(&ucontext, 0, sizeof(ucontext));
1612   SetUcontextSp(device_map_uint, &ucontext);
1613   SetUcontextPc(cur_func_offset, &ucontext);
1614   SetUcontextLr(cur_func_offset, &ucontext);
1615 
1616   ASSERT_TRUE(backtrace->Unwind(0, &ucontext));
1617 
1618   // The buffer should only be a single element.
1619   ASSERT_EQ(1U, backtrace->NumFrames());
1620   frame = backtrace->GetFrame(0);
1621   ASSERT_EQ(cur_func_offset, frame->pc);
1622   ASSERT_EQ(device_map_uint, frame->sp);
1623 
1624   // Check what happens when skipping the first frame.
1625   ASSERT_TRUE(backtrace->Unwind(1, &ucontext));
1626   ASSERT_EQ(0U, backtrace->NumFrames());
1627 }
1628 
TEST(libbacktrace,unwind_disallow_device_map_local)1629 TEST(libbacktrace, unwind_disallow_device_map_local) {
1630   void* device_map;
1631   SetupDeviceMap(&device_map);
1632 
1633   // Now create an unwind object.
1634   std::unique_ptr<Backtrace> backtrace(
1635       Backtrace::Create(BACKTRACE_CURRENT_PROCESS, BACKTRACE_CURRENT_THREAD));
1636   ASSERT_TRUE(backtrace);
1637 
1638   UnwindFromDevice(backtrace.get(), device_map);
1639 
1640   munmap(device_map, DEVICE_MAP_SIZE);
1641 }
1642 
TEST(libbacktrace,unwind_disallow_device_map_remote)1643 TEST(libbacktrace, unwind_disallow_device_map_remote) {
1644   void* device_map;
1645   SetupDeviceMap(&device_map);
1646 
1647   // Fork a process to do a remote backtrace.
1648   pid_t pid;
1649   CreateRemoteProcess(&pid);
1650 
1651   // Now create an unwind object.
1652   std::unique_ptr<Backtrace> backtrace(Backtrace::Create(pid, pid));
1653 
1654   UnwindFromDevice(backtrace.get(), device_map);
1655 
1656   FinishRemoteProcess(pid);
1657 
1658   munmap(device_map, DEVICE_MAP_SIZE);
1659 }
1660 
1661 class ScopedSignalHandler {
1662  public:
ScopedSignalHandler(int signal_number,void (* handler)(int))1663   ScopedSignalHandler(int signal_number, void (*handler)(int)) : signal_number_(signal_number) {
1664     memset(&action_, 0, sizeof(action_));
1665     action_.sa_handler = handler;
1666     sigaction(signal_number_, &action_, &old_action_);
1667   }
1668 
ScopedSignalHandler(int signal_number,void (* action)(int,siginfo_t *,void *))1669   ScopedSignalHandler(int signal_number, void (*action)(int, siginfo_t*, void*))
1670       : signal_number_(signal_number) {
1671     memset(&action_, 0, sizeof(action_));
1672     action_.sa_flags = SA_SIGINFO;
1673     action_.sa_sigaction = action;
1674     sigaction(signal_number_, &action_, &old_action_);
1675   }
1676 
~ScopedSignalHandler()1677   ~ScopedSignalHandler() { sigaction(signal_number_, &old_action_, nullptr); }
1678 
1679  private:
1680   struct sigaction action_;
1681   struct sigaction old_action_;
1682   const int signal_number_;
1683 };
1684 
SetValueAndLoop(void * data)1685 static void SetValueAndLoop(void* data) {
1686   volatile int* value = reinterpret_cast<volatile int*>(data);
1687 
1688   *value = 1;
1689   for (volatile int i = 0;; i++)
1690     ;
1691 }
1692 
UnwindThroughSignal(bool use_action,create_func_t create_func,map_create_func_t map_create_func)1693 static void UnwindThroughSignal(bool use_action, create_func_t create_func,
1694                                 map_create_func_t map_create_func) {
1695   volatile int value = 0;
1696   pid_t pid;
1697   if ((pid = fork()) == 0) {
1698     if (use_action) {
1699       ScopedSignalHandler ssh(SIGUSR1, test_signal_action);
1700 
1701       test_level_one(1, 2, 3, 4, SetValueAndLoop, const_cast<int*>(&value));
1702     } else {
1703       ScopedSignalHandler ssh(SIGUSR1, test_signal_handler);
1704 
1705       test_level_one(1, 2, 3, 4, SetValueAndLoop, const_cast<int*>(&value));
1706     }
1707   }
1708   ASSERT_NE(-1, pid);
1709 
1710   int read_value = 0;
1711   uint64_t start = NanoTime();
1712   while (read_value == 0) {
1713     usleep(1000);
1714 
1715     // Loop until the remote function gets into the final function.
1716     ASSERT_TRUE(ptrace(PTRACE_ATTACH, pid, 0, 0) == 0);
1717 
1718     WaitForStop(pid);
1719 
1720     std::unique_ptr<BacktraceMap> map(map_create_func(pid, false));
1721     std::unique_ptr<Backtrace> backtrace(create_func(pid, pid, map.get()));
1722 
1723     size_t bytes_read = backtrace->Read(reinterpret_cast<uint64_t>(const_cast<int*>(&value)),
1724                                         reinterpret_cast<uint8_t*>(&read_value), sizeof(read_value));
1725     ASSERT_EQ(sizeof(read_value), bytes_read);
1726 
1727     ASSERT_TRUE(ptrace(PTRACE_DETACH, pid, 0, 0) == 0);
1728 
1729     ASSERT_TRUE(NanoTime() - start < 5 * NS_PER_SEC)
1730         << "Remote process did not execute far enough in 5 seconds.";
1731   }
1732 
1733   // Now need to send a signal to the remote process.
1734   kill(pid, SIGUSR1);
1735 
1736   // Wait for the process to get to the signal handler loop.
1737   Backtrace::const_iterator frame_iter;
1738   start = NanoTime();
1739   std::unique_ptr<BacktraceMap> map;
1740   std::unique_ptr<Backtrace> backtrace;
1741   while (true) {
1742     usleep(1000);
1743 
1744     ASSERT_TRUE(ptrace(PTRACE_ATTACH, pid, 0, 0) == 0);
1745 
1746     WaitForStop(pid);
1747 
1748     map.reset(map_create_func(pid, false));
1749     ASSERT_TRUE(map.get() != nullptr);
1750     backtrace.reset(create_func(pid, pid, map.get()));
1751     ASSERT_TRUE(backtrace->Unwind(0));
1752     bool found = false;
1753     for (frame_iter = backtrace->begin(); frame_iter != backtrace->end(); ++frame_iter) {
1754       if (frame_iter->func_name == "test_loop_forever") {
1755         ++frame_iter;
1756         found = true;
1757         break;
1758       }
1759     }
1760     if (found) {
1761       break;
1762     }
1763 
1764     ASSERT_TRUE(ptrace(PTRACE_DETACH, pid, 0, 0) == 0);
1765 
1766     ASSERT_TRUE(NanoTime() - start < 5 * NS_PER_SEC)
1767         << "Remote process did not get in signal handler in 5 seconds." << std::endl
1768         << DumpFrames(backtrace.get());
1769   }
1770 
1771   std::vector<std::string> names;
1772   // Loop through the frames, and save the function names.
1773   size_t frame = 0;
1774   for (; frame_iter != backtrace->end(); ++frame_iter) {
1775     if (frame_iter->func_name == "test_level_four") {
1776       frame = names.size() + 1;
1777     }
1778     names.push_back(frame_iter->func_name);
1779   }
1780   ASSERT_NE(0U, frame) << "Unable to find test_level_four in backtrace" << std::endl
1781                        << DumpFrames(backtrace.get());
1782 
1783   // The expected order of the frames:
1784   //   test_loop_forever
1785   //   test_signal_handler|test_signal_action
1786   //   <OPTIONAL_FRAME> May or may not exist.
1787   //   SetValueAndLoop (but the function name might be empty)
1788   //   test_level_four
1789   //   test_level_three
1790   //   test_level_two
1791   //   test_level_one
1792   ASSERT_LE(frame + 2, names.size()) << DumpFrames(backtrace.get());
1793   ASSERT_LE(2U, frame) << DumpFrames(backtrace.get());
1794   if (use_action) {
1795     ASSERT_EQ("test_signal_action", names[0]) << DumpFrames(backtrace.get());
1796   } else {
1797     ASSERT_EQ("test_signal_handler", names[0]) << DumpFrames(backtrace.get());
1798   }
1799   ASSERT_EQ("test_level_three", names[frame]) << DumpFrames(backtrace.get());
1800   ASSERT_EQ("test_level_two", names[frame + 1]) << DumpFrames(backtrace.get());
1801   ASSERT_EQ("test_level_one", names[frame + 2]) << DumpFrames(backtrace.get());
1802 
1803   FinishRemoteProcess(pid);
1804 }
1805 
TEST(libbacktrace,unwind_remote_through_signal_using_handler)1806 TEST(libbacktrace, unwind_remote_through_signal_using_handler) {
1807   UnwindThroughSignal(false, Backtrace::Create, BacktraceMap::Create);
1808 }
1809 
TEST(libbacktrace,unwind_remote_through_signal_using_action)1810 TEST(libbacktrace, unwind_remote_through_signal_using_action) {
1811   UnwindThroughSignal(true, Backtrace::Create, BacktraceMap::Create);
1812 }
1813 
TestFrameSkipNumbering(create_func_t create_func,map_create_func_t map_create_func)1814 static void TestFrameSkipNumbering(create_func_t create_func, map_create_func_t map_create_func) {
1815   std::unique_ptr<BacktraceMap> map(map_create_func(getpid(), false));
1816   std::unique_ptr<Backtrace> backtrace(create_func(getpid(), gettid(), map.get()));
1817   backtrace->Unwind(1);
1818   ASSERT_NE(0U, backtrace->NumFrames());
1819   ASSERT_EQ(0U, backtrace->GetFrame(0)->num);
1820 }
1821 
TEST(libbacktrace,unwind_frame_skip_numbering)1822 TEST(libbacktrace, unwind_frame_skip_numbering) {
1823   TestFrameSkipNumbering(Backtrace::Create, BacktraceMap::Create);
1824 }
1825 
1826 #if defined(ENABLE_PSS_TESTS)
1827 #include "GetPss.h"
1828 
1829 #define MAX_LEAK_BYTES (32*1024UL)
1830 
CheckForLeak(pid_t pid,pid_t tid)1831 static void CheckForLeak(pid_t pid, pid_t tid) {
1832   std::unique_ptr<BacktraceMap> map(BacktraceMap::Create(pid));
1833 
1834   // Do a few runs to get the PSS stable.
1835   for (size_t i = 0; i < 100; i++) {
1836     Backtrace* backtrace = Backtrace::Create(pid, tid, map.get());
1837     ASSERT_TRUE(backtrace != nullptr);
1838     ASSERT_TRUE(backtrace->Unwind(0));
1839     VERIFY_NO_ERROR(backtrace->GetError().error_code);
1840     delete backtrace;
1841   }
1842   size_t stable_pss = GetPssBytes();
1843   ASSERT_TRUE(stable_pss != 0);
1844 
1845   // Loop enough that even a small leak should be detectable.
1846   for (size_t i = 0; i < 4096; i++) {
1847     Backtrace* backtrace = Backtrace::Create(pid, tid, map.get());
1848     ASSERT_TRUE(backtrace != nullptr);
1849     ASSERT_TRUE(backtrace->Unwind(0));
1850     VERIFY_NO_ERROR(backtrace->GetError().error_code);
1851     delete backtrace;
1852   }
1853   size_t new_pss = GetPssBytes();
1854   ASSERT_TRUE(new_pss != 0);
1855   if (new_pss > stable_pss) {
1856     ASSERT_LE(new_pss - stable_pss, MAX_LEAK_BYTES);
1857   }
1858 }
1859 
TEST(libbacktrace,check_for_leak_local)1860 TEST(libbacktrace, check_for_leak_local) {
1861   CheckForLeak(BACKTRACE_CURRENT_PROCESS, BACKTRACE_CURRENT_THREAD);
1862 }
1863 
TEST(libbacktrace,check_for_leak_local_thread)1864 TEST(libbacktrace, check_for_leak_local_thread) {
1865   thread_t thread_data = { 0, 0, 0, nullptr };
1866   pthread_t thread;
1867   ASSERT_TRUE(pthread_create(&thread, nullptr, ThreadLevelRun, &thread_data) == 0);
1868 
1869   // Wait up to 2 seconds for the tid to be set.
1870   ASSERT_TRUE(WaitForNonZero(&thread_data.state, 2));
1871 
1872   CheckForLeak(BACKTRACE_CURRENT_PROCESS, thread_data.tid);
1873 
1874   // Tell the thread to exit its infinite loop.
1875   android_atomic_acquire_store(0, &thread_data.state);
1876 
1877   ASSERT_TRUE(pthread_join(thread, nullptr) == 0);
1878 }
1879 
TEST(libbacktrace,check_for_leak_remote)1880 TEST(libbacktrace, check_for_leak_remote) {
1881   pid_t pid;
1882   CreateRemoteProcess(&pid);
1883 
1884   CheckForLeak(pid, BACKTRACE_CURRENT_THREAD);
1885 
1886   FinishRemoteProcess(pid);
1887 }
1888 #endif
1889