1 /*
2  *
3  * honggfuzz - architecture dependent code (LINUX/PERF)
4  * -----------------------------------------
5  *
6  * Author: Robert Swiecki <swiecki@google.com>
7  *
8  * Copyright 2010-2015 by Google Inc. All Rights Reserved.
9  *
10  * Licensed under the Apache License, Version 2.0 (the "License"); you may
11  * not use this file except in compliance with the License. You may obtain
12  * a copy of the License at
13  *
14  * http://www.apache.org/licenses/LICENSE-2.0
15  *
16  * Unless required by applicable law or agreed to in writing, software
17  * distributed under the License is distributed on an "AS IS" BASIS,
18  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
19  * implied. See the License for the specific language governing
20  * permissions and limitations under the License.
21  *
22  */
23 
24 #include "common.h"
25 #include "linux/perf.h"
26 
27 #include <asm/mman.h>
28 #include <errno.h>
29 #include <fcntl.h>
30 #include <inttypes.h>
31 #include <linux/hw_breakpoint.h>
32 #include <linux/perf_event.h>
33 #include <linux/sysctl.h>
34 #include <signal.h>
35 #include <stdlib.h>
36 #include <string.h>
37 #include <sys/ioctl.h>
38 #include <sys/mman.h>
39 #include <sys/poll.h>
40 #include <sys/ptrace.h>
41 #include <sys/syscall.h>
42 #include <unistd.h>
43 
44 #include "files.h"
45 #include "linux/pt.h"
46 #include "log.h"
47 #include "util.h"
48 
49 /*
50  * Check Intel's PT perf compatibility. The runtime kernel version check is addressed
51  * at arch_archInit() - deal with compilation compatibilities here. Perf struct
52  * version strings are exposed from 'uapi/linux/perf_event.h'
53  */
54 #ifdef PERF_ATTR_SIZE_VER5
55 #define _HF_ENABLE_INTEL_PT
56 #endif
57 
58 /* Buffer used with BTS (branch recording) */
59 static __thread uint8_t *perfMmapBuf = NULL;
60 /* Buffer used with BTS (branch recording) */
61 static __thread uint8_t *perfMmapAux = NULL;
62 #define _HF_PERF_MAP_SZ (1024 * 128)
63 #define _HF_PERF_AUX_SZ (1024 * 1024)
64 /* Unique path counter */
65 __thread uint64_t perfBranchesCnt = 0;
66 /* Perf method - to be used in signal handlers */
67 static dynFileMethod_t perfDynamicMethod = _HF_DYNFILE_NONE;
68 /* Don't record branches using address above this parameter */
69 static uint64_t perfCutOffAddr = ~(0ULL);
70 /* Page Size for the current arch */
71 static size_t perfPageSz = 0x0;
72 /* PERF_TYPE for Intel_PT/BTS -1 if none */
73 static int32_t perfIntelPtPerfType = -1;
74 static int32_t perfIntelBtsPerfType = -1;
75 
76 __thread size_t perfBloomSz = 0U;
77 __thread uint8_t *perfBloom = NULL;
78 
arch_perfCountBranches(void)79 static size_t arch_perfCountBranches(void)
80 {
81     return perfBranchesCnt;
82 }
83 
arch_perfAddBranch(uint64_t from,uint64_t to)84 static inline void arch_perfAddBranch(uint64_t from, uint64_t to)
85 {
86     /*
87      * Kernel sometimes reports branches from the kernel (iret), we are not interested in that as it
88      * makes the whole concept of unique branch counting less predictable
89      */
90     if (__builtin_expect(from > 0xFFFFFFFF00000000, false)
91         || __builtin_expect(to > 0xFFFFFFFF00000000, false)) {
92         LOG_D("Adding branch %#018" PRIx64 " - %#018" PRIx64, from, to);
93         return;
94     }
95     if (from >= perfCutOffAddr || to >= perfCutOffAddr) {
96         return;
97     }
98 
99     register size_t pos = 0UL;
100     if (perfDynamicMethod == _HF_DYNFILE_BTS_BLOCK || perfDynamicMethod == _HF_DYNFILE_IPT_BLOCK) {
101         pos = from % (perfBloomSz * 8);
102     } else if (perfDynamicMethod == _HF_DYNFILE_BTS_EDGE) {
103         pos = (from * to) % (perfBloomSz * 8);
104     }
105 
106     size_t byteOff = pos / 8;
107     uint8_t bitSet = (uint8_t) (1 << (pos % 8));
108 
109     register uint8_t prev = __sync_fetch_and_or(&perfBloom[byteOff], bitSet);
110     if (!(prev & bitSet)) {
111         perfBranchesCnt++;
112     }
113 }
114 
arch_perfMmapParse(void)115 static inline void arch_perfMmapParse(void)
116 {
117     struct perf_event_mmap_page *pem = (struct perf_event_mmap_page *)perfMmapBuf;
118 #ifdef _HF_ENABLE_INTEL_PT
119     if (pem->aux_head == pem->aux_tail) {
120         return;
121     }
122     if (pem->aux_head < pem->aux_tail) {
123         LOG_F("The PERF AUX data has been overwritten. The AUX buffer is too small");
124     }
125 
126     struct bts_branch {
127         uint64_t from;
128         uint64_t to;
129         uint64_t misc;
130     };
131     if (perfDynamicMethod == _HF_DYNFILE_BTS_BLOCK) {
132         struct bts_branch *br = (struct bts_branch *)perfMmapAux;
133         for (; br < ((struct bts_branch *)(perfMmapAux + pem->aux_head)); br++) {
134             arch_perfAddBranch(br->from, 0UL);
135         }
136         return;
137     }
138     if (perfDynamicMethod == _HF_DYNFILE_BTS_EDGE) {
139         struct bts_branch *br = (struct bts_branch *)perfMmapAux;
140         for (; br < ((struct bts_branch *)(perfMmapAux + pem->aux_head)); br++) {
141             arch_perfAddBranch(br->from, br->to);
142         }
143         return;
144     }
145 #endif
146 
147     arch_ptAnalyze(pem, perfMmapAux, arch_perfAddBranch);
148 }
149 
perf_event_open(struct perf_event_attr * hw_event,pid_t pid,int cpu,int group_fd,unsigned long flags)150 static long perf_event_open(struct perf_event_attr *hw_event, pid_t pid, int cpu, int group_fd,
151                             unsigned long flags)
152 {
153     return syscall(__NR_perf_event_open, hw_event, (uintptr_t) pid, (uintptr_t) cpu,
154                    (uintptr_t) group_fd, (uintptr_t) flags);
155 }
156 
arch_perfOpen(honggfuzz_t * hfuzz,pid_t pid,dynFileMethod_t method,int * perfFd)157 static bool arch_perfOpen(honggfuzz_t * hfuzz, pid_t pid, dynFileMethod_t method, int *perfFd)
158 {
159     LOG_D("Enabling PERF for PID=%d method=%x", pid, method);
160 
161     perfDynamicMethod = method;
162     perfBranchesCnt = 0;
163 
164     if (*perfFd != -1) {
165         LOG_F("The PERF FD is already initialized, possibly conflicting perf types enabled");
166     }
167 
168     if (((method & _HF_DYNFILE_BTS_BLOCK) || method & _HF_DYNFILE_BTS_EDGE)
169         && perfIntelBtsPerfType == -1) {
170         LOG_F("Intel BTS events (new type) are not supported on this platform");
171     }
172     if ((method & _HF_DYNFILE_IPT_BLOCK)
173         && perfIntelPtPerfType == -1) {
174         LOG_F("Intel PT events are not supported on this platform");
175     }
176 
177     struct perf_event_attr pe;
178     memset(&pe, 0, sizeof(struct perf_event_attr));
179     pe.size = sizeof(struct perf_event_attr);
180     pe.exclude_kernel = 1;
181     pe.exclude_hv = 1;
182     pe.exclude_guest = 1;
183     pe.exclude_idle = 1;
184     pe.exclude_callchain_kernel = 1;
185     pe.exclude_callchain_user = 1;
186     if (hfuzz->pid > 0) {
187         pe.disabled = 0;
188         pe.enable_on_exec = 0;
189     } else {
190         pe.disabled = 1;
191         pe.enable_on_exec = 1;
192     }
193     pe.type = PERF_TYPE_HARDWARE;
194     pe.pinned = 1;
195     pe.precise_ip = 1;
196 
197     switch (method) {
198     case _HF_DYNFILE_INSTR_COUNT:
199         LOG_D("Using: PERF_COUNT_HW_INSTRUCTIONS for PID: %d", pid);
200         pe.config = PERF_COUNT_HW_INSTRUCTIONS;
201         pe.inherit = 1;
202         break;
203     case _HF_DYNFILE_BRANCH_COUNT:
204         LOG_D("Using: PERF_COUNT_HW_BRANCH_INSTRUCTIONS for PID: %d", pid);
205         pe.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
206         pe.inherit = 1;
207         break;
208     case _HF_DYNFILE_BTS_BLOCK:
209         LOG_D("Using: (Intel BTS) type=%" PRIu32 " for PID: %d", perfIntelBtsPerfType, pid);
210         pe.type = perfIntelBtsPerfType;
211         break;
212     case _HF_DYNFILE_BTS_EDGE:
213         LOG_D("Using: (Intel BTS) type=%" PRIu32 " for PID: %d", perfIntelBtsPerfType, pid);
214         pe.type = perfIntelBtsPerfType;
215         break;
216     case _HF_DYNFILE_IPT_BLOCK:
217         LOG_D("Using: (Intel PT) type=%" PRIu32 " for PID: %d", perfIntelPtPerfType, pid);
218         pe.type = perfIntelPtPerfType;
219         pe.config = (1U << 11); /* Disable RETCompression */
220         break;
221     default:
222         LOG_E("Unknown perf mode: '%d' for PID: %d", method, pid);
223         return false;
224         break;
225     }
226 
227     *perfFd = perf_event_open(&pe, pid, -1, -1, 0);
228     if (*perfFd == -1) {
229         PLOG_F("perf_event_open() failed");
230         return false;
231     }
232 
233     if (method != _HF_DYNFILE_BTS_BLOCK && method != _HF_DYNFILE_BTS_EDGE
234         && method != _HF_DYNFILE_IPT_BLOCK) {
235         return true;
236     }
237 #ifdef _HF_ENABLE_INTEL_PT
238     perfMmapBuf =
239         mmap(NULL, _HF_PERF_MAP_SZ + getpagesize(), PROT_READ | PROT_WRITE, MAP_SHARED, *perfFd, 0);
240     if (perfMmapBuf == MAP_FAILED) {
241         perfMmapBuf = NULL;
242         PLOG_E("mmap(mmapBuf) failed, sz=%zu, try increasing kernel.perf_event_mlock_kb",
243                (size_t) _HF_PERF_MAP_SZ + getpagesize());
244         close(*perfFd);
245         return false;
246     }
247 
248     struct perf_event_mmap_page *pem = (struct perf_event_mmap_page *)perfMmapBuf;
249     pem->aux_offset = pem->data_offset + pem->data_size;
250     pem->aux_size = _HF_PERF_AUX_SZ;
251     perfMmapAux = mmap(NULL, pem->aux_size, PROT_READ, MAP_SHARED, *perfFd, pem->aux_offset);
252     if (perfMmapAux == MAP_FAILED) {
253         munmap(perfMmapBuf, _HF_PERF_MAP_SZ + getpagesize());
254         perfMmapBuf = NULL;
255         PLOG_E("mmap(mmapAuxBuf) failed, try increasing kernel.perf_event_mlock_kb");
256         close(*perfFd);
257         return false;
258     }
259 #else                           /* _HF_ENABLE_INTEL_PT */
260     LOG_F("Your <linux/perf_event.h> includes are too old to support Intel PT/BTS");
261 #endif                          /* _HF_ENABLE_INTEL_PT */
262 
263     return true;
264 }
265 
arch_perfEnable(pid_t pid,honggfuzz_t * hfuzz,perfFd_t * perfFds)266 bool arch_perfEnable(pid_t pid, honggfuzz_t * hfuzz, perfFd_t * perfFds)
267 {
268     if (hfuzz->dynFileMethod == _HF_DYNFILE_NONE) {
269         return true;
270     }
271 
272     perfBloom = hfuzz->bbMap;
273     perfBloomSz = hfuzz->bbMapSz;
274 
275     perfFds->cpuInstrFd = -1;
276     perfFds->cpuBranchFd = -1;
277     perfFds->cpuIptBtsFd = -1;
278 
279     if (hfuzz->dynFileMethod & _HF_DYNFILE_INSTR_COUNT) {
280         if (arch_perfOpen(hfuzz, pid, _HF_DYNFILE_INSTR_COUNT, &perfFds->cpuInstrFd) == false) {
281             LOG_E("Cannot set up perf for PID=%d (_HF_DYNFILE_INSTR_COUNT)", pid);
282             goto out;
283         }
284     }
285     if (hfuzz->dynFileMethod & _HF_DYNFILE_BRANCH_COUNT) {
286         if (arch_perfOpen(hfuzz, pid, _HF_DYNFILE_BRANCH_COUNT, &perfFds->cpuBranchFd) == false) {
287             LOG_E("Cannot set up perf for PID=%d (_HF_DYNFILE_BRANCH_COUNT)", pid);
288             goto out;
289         }
290     }
291     if (hfuzz->dynFileMethod & _HF_DYNFILE_BTS_BLOCK) {
292         if (arch_perfOpen(hfuzz, pid, _HF_DYNFILE_BTS_BLOCK, &perfFds->cpuIptBtsFd) == false) {
293             LOG_E("Cannot set up perf for PID=%d (_HF_DYNFILE_BTS_BLOCK)", pid);
294             goto out;
295         }
296     }
297     if (hfuzz->dynFileMethod & _HF_DYNFILE_BTS_EDGE) {
298         if (arch_perfOpen(hfuzz, pid, _HF_DYNFILE_BTS_EDGE, &perfFds->cpuIptBtsFd) == false) {
299             LOG_E("Cannot set up perf for PID=%d (_HF_DYNFILE_BTS_EDGE)", pid);
300             goto out;
301         }
302     }
303     if (hfuzz->dynFileMethod & _HF_DYNFILE_IPT_BLOCK) {
304         if (arch_perfOpen(hfuzz, pid, _HF_DYNFILE_IPT_BLOCK, &perfFds->cpuIptBtsFd) == false) {
305             LOG_E("Cannot set up perf for PID=%d (_HF_DYNFILE_IPT_BLOCK)", pid);
306             goto out;
307         }
308     }
309 
310     return true;
311 
312  out:
313     close(perfFds->cpuInstrFd);
314     close(perfFds->cpuBranchFd);
315     close(perfFds->cpuIptBtsFd);
316 
317     return false;
318 }
319 
arch_perfAnalyze(honggfuzz_t * hfuzz,fuzzer_t * fuzzer,perfFd_t * perfFds)320 void arch_perfAnalyze(honggfuzz_t * hfuzz, fuzzer_t * fuzzer, perfFd_t * perfFds)
321 {
322     if (hfuzz->dynFileMethod == _HF_DYNFILE_NONE) {
323         return;
324     }
325 
326     uint64_t instrCount = 0;
327     if (hfuzz->dynFileMethod & _HF_DYNFILE_INSTR_COUNT) {
328         ioctl(perfFds->cpuInstrFd, PERF_EVENT_IOC_DISABLE, 0);
329         if (read(perfFds->cpuInstrFd, &instrCount, sizeof(instrCount)) != sizeof(instrCount)) {
330             PLOG_E("read(perfFd='%d') failed", perfFds->cpuInstrFd);
331         }
332         close(perfFds->cpuInstrFd);
333     }
334 
335     uint64_t branchCount = 0;
336     if (hfuzz->dynFileMethod & _HF_DYNFILE_BRANCH_COUNT) {
337         ioctl(perfFds->cpuBranchFd, PERF_EVENT_IOC_DISABLE, 0);
338         if (read(perfFds->cpuBranchFd, &branchCount, sizeof(branchCount)) != sizeof(branchCount)) {
339             PLOG_E("read(perfFd='%d') failed", perfFds->cpuBranchFd);
340         }
341         close(perfFds->cpuBranchFd);
342     }
343 
344     uint64_t newbbCount = 0;
345     if (hfuzz->dynFileMethod & _HF_DYNFILE_BTS_BLOCK) {
346         close(perfFds->cpuIptBtsFd);
347         arch_perfMmapParse();
348         newbbCount = arch_perfCountBranches();
349     }
350 
351     if (hfuzz->dynFileMethod & _HF_DYNFILE_BTS_EDGE) {
352         close(perfFds->cpuIptBtsFd);
353         arch_perfMmapParse();
354         newbbCount = arch_perfCountBranches();
355     }
356 
357     if (hfuzz->dynFileMethod & _HF_DYNFILE_IPT_BLOCK) {
358         close(perfFds->cpuIptBtsFd);
359         arch_perfMmapParse();
360         newbbCount = arch_perfCountBranches();
361     }
362 
363     if (perfMmapAux != NULL) {
364         munmap(perfMmapAux, _HF_PERF_AUX_SZ);
365         perfMmapAux = NULL;
366     }
367     if (perfMmapBuf != NULL) {
368         munmap(perfMmapBuf, _HF_PERF_MAP_SZ + getpagesize());
369         perfMmapBuf = NULL;
370     }
371 
372     fuzzer->hwCnts.cpuInstrCnt = instrCount;
373     fuzzer->hwCnts.cpuBranchCnt = branchCount;
374     fuzzer->hwCnts.bbCnt = newbbCount;
375 }
376 
arch_perfInit(honggfuzz_t * hfuzz)377 bool arch_perfInit(honggfuzz_t * hfuzz)
378 {
379     perfPageSz = getpagesize();
380     perfCutOffAddr = hfuzz->dynamicCutOffAddr;
381 
382     uint8_t buf[PATH_MAX + 1];
383     size_t sz =
384         files_readFileToBufMax("/sys/bus/event_source/devices/intel_pt/type", buf, sizeof(buf) - 1);
385     if (sz > 0) {
386         buf[sz] = '\0';
387         perfIntelPtPerfType = (int32_t) strtoul((char *)buf, NULL, 10);
388         LOG_D("perfIntelPtPerfType = %" PRIu32, perfIntelPtPerfType);
389     }
390     sz = files_readFileToBufMax("/sys/bus/event_source/devices/intel_bts/type", buf,
391                                 sizeof(buf) - 1);
392     if (sz > 0) {
393         buf[sz] = '\0';
394         perfIntelBtsPerfType = (int32_t) strtoul((char *)buf, NULL, 10);
395         LOG_D("perfIntelBtsPerfType = %" PRIu32, perfIntelBtsPerfType);
396     }
397     return true;
398 }
399