1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/kernel/profile.c 4 * Simple profiling. Manages a direct-mapped profile hit count buffer, 5 * with configurable resolution, support for restricting the cpus on 6 * which profiling is done, and switching between cpu time and 7 * schedule() calls via kernel command line parameters passed at boot. 8 * 9 * Scheduler profiling support, Arjan van de Ven and Ingo Molnar, 10 * Red Hat, July 2004 11 * Consolidation of architecture support code for profiling, 12 * Nadia Yvette Chambers, Oracle, July 2004 13 * Amortized hit count accounting via per-cpu open-addressed hashtables 14 * to resolve timer interrupt livelocks, Nadia Yvette Chambers, 15 * Oracle, 2004 16 */ 17 18 #include <linux/export.h> 19 #include <linux/profile.h> 20 #include <linux/memblock.h> 21 #include <linux/notifier.h> 22 #include <linux/mm.h> 23 #include <linux/cpumask.h> 24 #include <linux/cpu.h> 25 #include <linux/highmem.h> 26 #include <linux/mutex.h> 27 #include <linux/slab.h> 28 #include <linux/vmalloc.h> 29 #include <linux/sched/stat.h> 30 31 #include <asm/sections.h> 32 #include <asm/irq_regs.h> 33 #include <asm/ptrace.h> 34 35 struct profile_hit { 36 u32 pc, hits; 37 }; 38 #define PROFILE_GRPSHIFT 3 39 #define PROFILE_GRPSZ (1 << PROFILE_GRPSHIFT) 40 #define NR_PROFILE_HIT (PAGE_SIZE/sizeof(struct profile_hit)) 41 #define NR_PROFILE_GRP (NR_PROFILE_HIT/PROFILE_GRPSZ) 42 43 static atomic_t *prof_buffer; 44 static unsigned long prof_len; 45 static unsigned short int prof_shift; 46 47 int prof_on __read_mostly; 48 EXPORT_SYMBOL_GPL(prof_on); 49 50 static cpumask_var_t prof_cpu_mask; 51 #if defined(CONFIG_SMP) && defined(CONFIG_PROC_FS) 52 static DEFINE_PER_CPU(struct profile_hit *[2], cpu_profile_hits); 53 static DEFINE_PER_CPU(int, cpu_profile_flip); 54 static DEFINE_MUTEX(profile_flip_mutex); 55 #endif /* CONFIG_SMP */ 56 57 int profile_setup(char *str) 58 { 59 static const char schedstr[] = "schedule"; 60 static const char sleepstr[] = "sleep"; 61 static const char kvmstr[] = "kvm"; 62 int par; 63 64 if (!strncmp(str, sleepstr, strlen(sleepstr))) { 65 #ifdef CONFIG_SCHEDSTATS 66 force_schedstat_enabled(); 67 prof_on = SLEEP_PROFILING; 68 if (str[strlen(sleepstr)] == ',') 69 str += strlen(sleepstr) + 1; 70 if (get_option(&str, &par)) 71 prof_shift = clamp(par, 0, BITS_PER_LONG - 1); 72 pr_info("kernel sleep profiling enabled (shift: %u)\n", 73 prof_shift); 74 #else 75 pr_warn("kernel sleep profiling requires CONFIG_SCHEDSTATS\n"); 76 #endif /* CONFIG_SCHEDSTATS */ 77 } else if (!strncmp(str, schedstr, strlen(schedstr))) { 78 prof_on = SCHED_PROFILING; 79 if (str[strlen(schedstr)] == ',') 80 str += strlen(schedstr) + 1; 81 if (get_option(&str, &par)) 82 prof_shift = clamp(par, 0, BITS_PER_LONG - 1); 83 pr_info("kernel schedule profiling enabled (shift: %u)\n", 84 prof_shift); 85 } else if (!strncmp(str, kvmstr, strlen(kvmstr))) { 86 prof_on = KVM_PROFILING; 87 if (str[strlen(kvmstr)] == ',') 88 str += strlen(kvmstr) + 1; 89 if (get_option(&str, &par)) 90 prof_shift = clamp(par, 0, BITS_PER_LONG - 1); 91 pr_info("kernel KVM profiling enabled (shift: %u)\n", 92 prof_shift); 93 } else if (get_option(&str, &par)) { 94 prof_shift = clamp(par, 0, BITS_PER_LONG - 1); 95 prof_on = CPU_PROFILING; 96 pr_info("kernel profiling enabled (shift: %u)\n", 97 prof_shift); 98 } 99 return 1; 100 } 101 __setup("profile=", profile_setup); 102 103 104 int __ref profile_init(void) 105 { 106 int buffer_bytes; 107 if (!prof_on) 108 return 0; 109 110 /* only text is profiled */ 111 prof_len = (_etext - _stext) >> prof_shift; 112 buffer_bytes = prof_len*sizeof(atomic_t); 113 114 if (!alloc_cpumask_var(&prof_cpu_mask, GFP_KERNEL)) 115 return -ENOMEM; 116 117 cpumask_copy(prof_cpu_mask, cpu_possible_mask); 118 119 prof_buffer = kzalloc(buffer_bytes, GFP_KERNEL|__GFP_NOWARN); 120 if (prof_buffer) 121 return 0; 122 123 prof_buffer = alloc_pages_exact(buffer_bytes, 124 GFP_KERNEL|__GFP_ZERO|__GFP_NOWARN); 125 if (prof_buffer) 126 return 0; 127 128 prof_buffer = vzalloc(buffer_bytes); 129 if (prof_buffer) 130 return 0; 131 132 free_cpumask_var(prof_cpu_mask); 133 return -ENOMEM; 134 } 135 136 /* Profile event notifications */ 137 138 static ATOMIC_NOTIFIER_HEAD(task_free_notifier); 139 140 int profile_handoff_task(struct task_struct *task) 141 { 142 int ret; 143 ret = atomic_notifier_call_chain(&task_free_notifier, 0, task); 144 return (ret == NOTIFY_OK) ? 1 : 0; 145 } 146 147 int task_handoff_register(struct notifier_block *n) 148 { 149 return atomic_notifier_chain_register(&task_free_notifier, n); 150 } 151 EXPORT_SYMBOL_GPL(task_handoff_register); 152 153 int task_handoff_unregister(struct notifier_block *n) 154 { 155 return atomic_notifier_chain_unregister(&task_free_notifier, n); 156 } 157 EXPORT_SYMBOL_GPL(task_handoff_unregister); 158 159 #if defined(CONFIG_SMP) && defined(CONFIG_PROC_FS) 160 /* 161 * Each cpu has a pair of open-addressed hashtables for pending 162 * profile hits. read_profile() IPI's all cpus to request them 163 * to flip buffers and flushes their contents to prof_buffer itself. 164 * Flip requests are serialized by the profile_flip_mutex. The sole 165 * use of having a second hashtable is for avoiding cacheline 166 * contention that would otherwise happen during flushes of pending 167 * profile hits required for the accuracy of reported profile hits 168 * and so resurrect the interrupt livelock issue. 169 * 170 * The open-addressed hashtables are indexed by profile buffer slot 171 * and hold the number of pending hits to that profile buffer slot on 172 * a cpu in an entry. When the hashtable overflows, all pending hits 173 * are accounted to their corresponding profile buffer slots with 174 * atomic_add() and the hashtable emptied. As numerous pending hits 175 * may be accounted to a profile buffer slot in a hashtable entry, 176 * this amortizes a number of atomic profile buffer increments likely 177 * to be far larger than the number of entries in the hashtable, 178 * particularly given that the number of distinct profile buffer 179 * positions to which hits are accounted during short intervals (e.g. 180 * several seconds) is usually very small. Exclusion from buffer 181 * flipping is provided by interrupt disablement (note that for 182 * SCHED_PROFILING or SLEEP_PROFILING profile_hit() may be called from 183 * process context). 184 * The hash function is meant to be lightweight as opposed to strong, 185 * and was vaguely inspired by ppc64 firmware-supported inverted 186 * pagetable hash functions, but uses a full hashtable full of finite 187 * collision chains, not just pairs of them. 188 * 189 * -- nyc 190 */ 191 static void __profile_flip_buffers(void *unused) 192 { 193 int cpu = smp_processor_id(); 194 195 per_cpu(cpu_profile_flip, cpu) = !per_cpu(cpu_profile_flip, cpu); 196 } 197 198 static void profile_flip_buffers(void) 199 { 200 int i, j, cpu; 201 202 mutex_lock(&profile_flip_mutex); 203 j = per_cpu(cpu_profile_flip, get_cpu()); 204 put_cpu(); 205 on_each_cpu(__profile_flip_buffers, NULL, 1); 206 for_each_online_cpu(cpu) { 207 struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[j]; 208 for (i = 0; i < NR_PROFILE_HIT; ++i) { 209 if (!hits[i].hits) { 210 if (hits[i].pc) 211 hits[i].pc = 0; 212 continue; 213 } 214 atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]); 215 hits[i].hits = hits[i].pc = 0; 216 } 217 } 218 mutex_unlock(&profile_flip_mutex); 219 } 220 221 static void profile_discard_flip_buffers(void) 222 { 223 int i, cpu; 224 225 mutex_lock(&profile_flip_mutex); 226 i = per_cpu(cpu_profile_flip, get_cpu()); 227 put_cpu(); 228 on_each_cpu(__profile_flip_buffers, NULL, 1); 229 for_each_online_cpu(cpu) { 230 struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[i]; 231 memset(hits, 0, NR_PROFILE_HIT*sizeof(struct profile_hit)); 232 } 233 mutex_unlock(&profile_flip_mutex); 234 } 235 236 static void do_profile_hits(int type, void *__pc, unsigned int nr_hits) 237 { 238 unsigned long primary, secondary, flags, pc = (unsigned long)__pc; 239 int i, j, cpu; 240 struct profile_hit *hits; 241 242 pc = min((pc - (unsigned long)_stext) >> prof_shift, prof_len - 1); 243 i = primary = (pc & (NR_PROFILE_GRP - 1)) << PROFILE_GRPSHIFT; 244 secondary = (~(pc << 1) & (NR_PROFILE_GRP - 1)) << PROFILE_GRPSHIFT; 245 cpu = get_cpu(); 246 hits = per_cpu(cpu_profile_hits, cpu)[per_cpu(cpu_profile_flip, cpu)]; 247 if (!hits) { 248 put_cpu(); 249 return; 250 } 251 /* 252 * We buffer the global profiler buffer into a per-CPU 253 * queue and thus reduce the number of global (and possibly 254 * NUMA-alien) accesses. The write-queue is self-coalescing: 255 */ 256 local_irq_save(flags); 257 do { 258 for (j = 0; j < PROFILE_GRPSZ; ++j) { 259 if (hits[i + j].pc == pc) { 260 hits[i + j].hits += nr_hits; 261 goto out; 262 } else if (!hits[i + j].hits) { 263 hits[i + j].pc = pc; 264 hits[i + j].hits = nr_hits; 265 goto out; 266 } 267 } 268 i = (i + secondary) & (NR_PROFILE_HIT - 1); 269 } while (i != primary); 270 271 /* 272 * Add the current hit(s) and flush the write-queue out 273 * to the global buffer: 274 */ 275 atomic_add(nr_hits, &prof_buffer[pc]); 276 for (i = 0; i < NR_PROFILE_HIT; ++i) { 277 atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]); 278 hits[i].pc = hits[i].hits = 0; 279 } 280 out: 281 local_irq_restore(flags); 282 put_cpu(); 283 } 284 285 static int profile_dead_cpu(unsigned int cpu) 286 { 287 struct page *page; 288 int i; 289 290 if (cpumask_available(prof_cpu_mask)) 291 cpumask_clear_cpu(cpu, prof_cpu_mask); 292 293 for (i = 0; i < 2; i++) { 294 if (per_cpu(cpu_profile_hits, cpu)[i]) { 295 page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[i]); 296 per_cpu(cpu_profile_hits, cpu)[i] = NULL; 297 __free_page(page); 298 } 299 } 300 return 0; 301 } 302 303 static int profile_prepare_cpu(unsigned int cpu) 304 { 305 int i, node = cpu_to_mem(cpu); 306 struct page *page; 307 308 per_cpu(cpu_profile_flip, cpu) = 0; 309 310 for (i = 0; i < 2; i++) { 311 if (per_cpu(cpu_profile_hits, cpu)[i]) 312 continue; 313 314 page = __alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0); 315 if (!page) { 316 profile_dead_cpu(cpu); 317 return -ENOMEM; 318 } 319 per_cpu(cpu_profile_hits, cpu)[i] = page_address(page); 320 321 } 322 return 0; 323 } 324 325 static int profile_online_cpu(unsigned int cpu) 326 { 327 if (cpumask_available(prof_cpu_mask)) 328 cpumask_set_cpu(cpu, prof_cpu_mask); 329 330 return 0; 331 } 332 333 #else /* !CONFIG_SMP */ 334 #define profile_flip_buffers() do { } while (0) 335 #define profile_discard_flip_buffers() do { } while (0) 336 337 static void do_profile_hits(int type, void *__pc, unsigned int nr_hits) 338 { 339 unsigned long pc; 340 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift; 341 atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]); 342 } 343 #endif /* !CONFIG_SMP */ 344 345 void profile_hits(int type, void *__pc, unsigned int nr_hits) 346 { 347 if (prof_on != type || !prof_buffer) 348 return; 349 do_profile_hits(type, __pc, nr_hits); 350 } 351 EXPORT_SYMBOL_GPL(profile_hits); 352 353 void profile_tick(int type) 354 { 355 struct pt_regs *regs = get_irq_regs(); 356 357 if (!user_mode(regs) && cpumask_available(prof_cpu_mask) && 358 cpumask_test_cpu(smp_processor_id(), prof_cpu_mask)) 359 profile_hit(type, (void *)profile_pc(regs)); 360 } 361 362 #ifdef CONFIG_PROC_FS 363 #include <linux/proc_fs.h> 364 #include <linux/seq_file.h> 365 #include <linux/uaccess.h> 366 367 static int prof_cpu_mask_proc_show(struct seq_file *m, void *v) 368 { 369 seq_printf(m, "%*pb\n", cpumask_pr_args(prof_cpu_mask)); 370 return 0; 371 } 372 373 static int prof_cpu_mask_proc_open(struct inode *inode, struct file *file) 374 { 375 return single_open(file, prof_cpu_mask_proc_show, NULL); 376 } 377 378 static ssize_t prof_cpu_mask_proc_write(struct file *file, 379 const char __user *buffer, size_t count, loff_t *pos) 380 { 381 cpumask_var_t new_value; 382 int err; 383 384 if (!zalloc_cpumask_var(&new_value, GFP_KERNEL)) 385 return -ENOMEM; 386 387 err = cpumask_parse_user(buffer, count, new_value); 388 if (!err) { 389 cpumask_copy(prof_cpu_mask, new_value); 390 err = count; 391 } 392 free_cpumask_var(new_value); 393 return err; 394 } 395 396 static const struct proc_ops prof_cpu_mask_proc_ops = { 397 .proc_open = prof_cpu_mask_proc_open, 398 .proc_read = seq_read, 399 .proc_lseek = seq_lseek, 400 .proc_release = single_release, 401 .proc_write = prof_cpu_mask_proc_write, 402 }; 403 404 void create_prof_cpu_mask(void) 405 { 406 /* create /proc/irq/prof_cpu_mask */ 407 proc_create("irq/prof_cpu_mask", 0600, NULL, &prof_cpu_mask_proc_ops); 408 } 409 410 /* 411 * This function accesses profiling information. The returned data is 412 * binary: the sampling step and the actual contents of the profile 413 * buffer. Use of the program readprofile is recommended in order to 414 * get meaningful info out of these data. 415 */ 416 static ssize_t 417 read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos) 418 { 419 unsigned long p = *ppos; 420 ssize_t read; 421 char *pnt; 422 unsigned long sample_step = 1UL << prof_shift; 423 424 profile_flip_buffers(); 425 if (p >= (prof_len+1)*sizeof(unsigned int)) 426 return 0; 427 if (count > (prof_len+1)*sizeof(unsigned int) - p) 428 count = (prof_len+1)*sizeof(unsigned int) - p; 429 read = 0; 430 431 while (p < sizeof(unsigned int) && count > 0) { 432 if (put_user(*((char *)(&sample_step)+p), buf)) 433 return -EFAULT; 434 buf++; p++; count--; read++; 435 } 436 pnt = (char *)prof_buffer + p - sizeof(atomic_t); 437 if (copy_to_user(buf, (void *)pnt, count)) 438 return -EFAULT; 439 read += count; 440 *ppos += read; 441 return read; 442 } 443 444 /* 445 * Writing to /proc/profile resets the counters 446 * 447 * Writing a 'profiling multiplier' value into it also re-sets the profiling 448 * interrupt frequency, on architectures that support this. 449 */ 450 static ssize_t write_profile(struct file *file, const char __user *buf, 451 size_t count, loff_t *ppos) 452 { 453 #ifdef CONFIG_SMP 454 extern int setup_profiling_timer(unsigned int multiplier); 455 456 if (count == sizeof(int)) { 457 unsigned int multiplier; 458 459 if (copy_from_user(&multiplier, buf, sizeof(int))) 460 return -EFAULT; 461 462 if (setup_profiling_timer(multiplier)) 463 return -EINVAL; 464 } 465 #endif 466 profile_discard_flip_buffers(); 467 memset(prof_buffer, 0, prof_len * sizeof(atomic_t)); 468 return count; 469 } 470 471 static const struct proc_ops profile_proc_ops = { 472 .proc_read = read_profile, 473 .proc_write = write_profile, 474 .proc_lseek = default_llseek, 475 }; 476 477 int __ref create_proc_profile(void) 478 { 479 struct proc_dir_entry *entry; 480 #ifdef CONFIG_SMP 481 enum cpuhp_state online_state; 482 #endif 483 484 int err = 0; 485 486 if (!prof_on) 487 return 0; 488 #ifdef CONFIG_SMP 489 err = cpuhp_setup_state(CPUHP_PROFILE_PREPARE, "PROFILE_PREPARE", 490 profile_prepare_cpu, profile_dead_cpu); 491 if (err) 492 return err; 493 494 err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "AP_PROFILE_ONLINE", 495 profile_online_cpu, NULL); 496 if (err < 0) 497 goto err_state_prep; 498 online_state = err; 499 err = 0; 500 #endif 501 entry = proc_create("profile", S_IWUSR | S_IRUGO, 502 NULL, &profile_proc_ops); 503 if (!entry) 504 goto err_state_onl; 505 proc_set_size(entry, (1 + prof_len) * sizeof(atomic_t)); 506 507 return err; 508 err_state_onl: 509 #ifdef CONFIG_SMP 510 cpuhp_remove_state(online_state); 511 err_state_prep: 512 cpuhp_remove_state(CPUHP_PROFILE_PREPARE); 513 #endif 514 return err; 515 } 516 subsys_initcall(create_proc_profile); 517 #endif /* CONFIG_PROC_FS */ 518