1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * kernel/lockdep.c 4 * 5 * Runtime locking correctness validator 6 * 7 * Started by Ingo Molnar: 8 * 9 * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 10 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra 11 * 12 * this code maps all the lock dependencies as they occur in a live kernel 13 * and will warn about the following classes of locking bugs: 14 * 15 * - lock inversion scenarios 16 * - circular lock dependencies 17 * - hardirq/softirq safe/unsafe locking bugs 18 * 19 * Bugs are reported even if the current locking scenario does not cause 20 * any deadlock at this point. 21 * 22 * I.e. if anytime in the past two locks were taken in a different order, 23 * even if it happened for another task, even if those were different 24 * locks (but of the same class as this lock), this code will detect it. 25 * 26 * Thanks to Arjan van de Ven for coming up with the initial idea of 27 * mapping lock dependencies runtime. 28 */ 29 #define DISABLE_BRANCH_PROFILING 30 #include <linux/mutex.h> 31 #include <linux/sched.h> 32 #include <linux/sched/clock.h> 33 #include <linux/sched/task.h> 34 #include <linux/sched/mm.h> 35 #include <linux/delay.h> 36 #include <linux/module.h> 37 #include <linux/proc_fs.h> 38 #include <linux/seq_file.h> 39 #include <linux/spinlock.h> 40 #include <linux/kallsyms.h> 41 #include <linux/interrupt.h> 42 #include <linux/stacktrace.h> 43 #include <linux/debug_locks.h> 44 #include <linux/irqflags.h> 45 #include <linux/utsname.h> 46 #include <linux/hash.h> 47 #include <linux/ftrace.h> 48 #include <linux/stringify.h> 49 #include <linux/bitmap.h> 50 #include <linux/bitops.h> 51 #include <linux/gfp.h> 52 #include <linux/random.h> 53 #include <linux/jhash.h> 54 #include <linux/nmi.h> 55 #include <linux/rcupdate.h> 56 #include <linux/kprobes.h> 57 #include <linux/lockdep.h> 58 #include <linux/context_tracking.h> 59 60 #include <asm/sections.h> 61 62 #include "lockdep_internals.h" 63 64 #include <trace/events/lock.h> 65 66 #ifdef CONFIG_PROVE_LOCKING 67 static int prove_locking = 1; 68 module_param(prove_locking, int, 0644); 69 #else 70 #define prove_locking 0 71 #endif 72 73 #ifdef CONFIG_LOCK_STAT 74 static int lock_stat = 1; 75 module_param(lock_stat, int, 0644); 76 #else 77 #define lock_stat 0 78 #endif 79 80 #ifdef CONFIG_SYSCTL 81 static struct ctl_table kern_lockdep_table[] = { 82 #ifdef CONFIG_PROVE_LOCKING 83 { 84 .procname = "prove_locking", 85 .data = &prove_locking, 86 .maxlen = sizeof(int), 87 .mode = 0644, 88 .proc_handler = proc_dointvec, 89 }, 90 #endif /* CONFIG_PROVE_LOCKING */ 91 #ifdef CONFIG_LOCK_STAT 92 { 93 .procname = "lock_stat", 94 .data = &lock_stat, 95 .maxlen = sizeof(int), 96 .mode = 0644, 97 .proc_handler = proc_dointvec, 98 }, 99 #endif /* CONFIG_LOCK_STAT */ 100 { } 101 }; 102 103 static __init int kernel_lockdep_sysctls_init(void) 104 { 105 register_sysctl_init("kernel", kern_lockdep_table); 106 return 0; 107 } 108 late_initcall(kernel_lockdep_sysctls_init); 109 #endif /* CONFIG_SYSCTL */ 110 111 DEFINE_PER_CPU(unsigned int, lockdep_recursion); 112 EXPORT_PER_CPU_SYMBOL_GPL(lockdep_recursion); 113 114 static __always_inline bool lockdep_enabled(void) 115 { 116 if (!debug_locks) 117 return false; 118 119 if (this_cpu_read(lockdep_recursion)) 120 return false; 121 122 if (current->lockdep_recursion) 123 return false; 124 125 return true; 126 } 127 128 /* 129 * lockdep_lock: protects the lockdep graph, the hashes and the 130 * class/list/hash allocators. 131 * 132 * This is one of the rare exceptions where it's justified 133 * to use a raw spinlock - we really dont want the spinlock 134 * code to recurse back into the lockdep code... 135 */ 136 static arch_spinlock_t __lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; 137 static struct task_struct *__owner; 138 139 static inline void lockdep_lock(void) 140 { 141 DEBUG_LOCKS_WARN_ON(!irqs_disabled()); 142 143 __this_cpu_inc(lockdep_recursion); 144 arch_spin_lock(&__lock); 145 __owner = current; 146 } 147 148 static inline void lockdep_unlock(void) 149 { 150 DEBUG_LOCKS_WARN_ON(!irqs_disabled()); 151 152 if (debug_locks && DEBUG_LOCKS_WARN_ON(__owner != current)) 153 return; 154 155 __owner = NULL; 156 arch_spin_unlock(&__lock); 157 __this_cpu_dec(lockdep_recursion); 158 } 159 160 static inline bool lockdep_assert_locked(void) 161 { 162 return DEBUG_LOCKS_WARN_ON(__owner != current); 163 } 164 165 static struct task_struct *lockdep_selftest_task_struct; 166 167 168 static int graph_lock(void) 169 { 170 lockdep_lock(); 171 /* 172 * Make sure that if another CPU detected a bug while 173 * walking the graph we dont change it (while the other 174 * CPU is busy printing out stuff with the graph lock 175 * dropped already) 176 */ 177 if (!debug_locks) { 178 lockdep_unlock(); 179 return 0; 180 } 181 return 1; 182 } 183 184 static inline void graph_unlock(void) 185 { 186 lockdep_unlock(); 187 } 188 189 /* 190 * Turn lock debugging off and return with 0 if it was off already, 191 * and also release the graph lock: 192 */ 193 static inline int debug_locks_off_graph_unlock(void) 194 { 195 int ret = debug_locks_off(); 196 197 lockdep_unlock(); 198 199 return ret; 200 } 201 202 unsigned long nr_list_entries; 203 static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES]; 204 static DECLARE_BITMAP(list_entries_in_use, MAX_LOCKDEP_ENTRIES); 205 206 /* 207 * All data structures here are protected by the global debug_lock. 208 * 209 * nr_lock_classes is the number of elements of lock_classes[] that is 210 * in use. 211 */ 212 #define KEYHASH_BITS (MAX_LOCKDEP_KEYS_BITS - 1) 213 #define KEYHASH_SIZE (1UL << KEYHASH_BITS) 214 static struct hlist_head lock_keys_hash[KEYHASH_SIZE]; 215 unsigned long nr_lock_classes; 216 unsigned long nr_zapped_classes; 217 unsigned long max_lock_class_idx; 218 struct lock_class lock_classes[MAX_LOCKDEP_KEYS]; 219 DECLARE_BITMAP(lock_classes_in_use, MAX_LOCKDEP_KEYS); 220 221 static inline struct lock_class *hlock_class(struct held_lock *hlock) 222 { 223 unsigned int class_idx = hlock->class_idx; 224 225 /* Don't re-read hlock->class_idx, can't use READ_ONCE() on bitfield */ 226 barrier(); 227 228 if (!test_bit(class_idx, lock_classes_in_use)) { 229 /* 230 * Someone passed in garbage, we give up. 231 */ 232 DEBUG_LOCKS_WARN_ON(1); 233 return NULL; 234 } 235 236 /* 237 * At this point, if the passed hlock->class_idx is still garbage, 238 * we just have to live with it 239 */ 240 return lock_classes + class_idx; 241 } 242 243 #ifdef CONFIG_LOCK_STAT 244 static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], cpu_lock_stats); 245 246 static inline u64 lockstat_clock(void) 247 { 248 return local_clock(); 249 } 250 251 static int lock_point(unsigned long points[], unsigned long ip) 252 { 253 int i; 254 255 for (i = 0; i < LOCKSTAT_POINTS; i++) { 256 if (points[i] == 0) { 257 points[i] = ip; 258 break; 259 } 260 if (points[i] == ip) 261 break; 262 } 263 264 return i; 265 } 266 267 static void lock_time_inc(struct lock_time *lt, u64 time) 268 { 269 if (time > lt->max) 270 lt->max = time; 271 272 if (time < lt->min || !lt->nr) 273 lt->min = time; 274 275 lt->total += time; 276 lt->nr++; 277 } 278 279 static inline void lock_time_add(struct lock_time *src, struct lock_time *dst) 280 { 281 if (!src->nr) 282 return; 283 284 if (src->max > dst->max) 285 dst->max = src->max; 286 287 if (src->min < dst->min || !dst->nr) 288 dst->min = src->min; 289 290 dst->total += src->total; 291 dst->nr += src->nr; 292 } 293 294 struct lock_class_stats lock_stats(struct lock_class *class) 295 { 296 struct lock_class_stats stats; 297 int cpu, i; 298 299 memset(&stats, 0, sizeof(struct lock_class_stats)); 300 for_each_possible_cpu(cpu) { 301 struct lock_class_stats *pcs = 302 &per_cpu(cpu_lock_stats, cpu)[class - lock_classes]; 303 304 for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++) 305 stats.contention_point[i] += pcs->contention_point[i]; 306 307 for (i = 0; i < ARRAY_SIZE(stats.contending_point); i++) 308 stats.contending_point[i] += pcs->contending_point[i]; 309 310 lock_time_add(&pcs->read_waittime, &stats.read_waittime); 311 lock_time_add(&pcs->write_waittime, &stats.write_waittime); 312 313 lock_time_add(&pcs->read_holdtime, &stats.read_holdtime); 314 lock_time_add(&pcs->write_holdtime, &stats.write_holdtime); 315 316 for (i = 0; i < ARRAY_SIZE(stats.bounces); i++) 317 stats.bounces[i] += pcs->bounces[i]; 318 } 319 320 return stats; 321 } 322 323 void clear_lock_stats(struct lock_class *class) 324 { 325 int cpu; 326 327 for_each_possible_cpu(cpu) { 328 struct lock_class_stats *cpu_stats = 329 &per_cpu(cpu_lock_stats, cpu)[class - lock_classes]; 330 331 memset(cpu_stats, 0, sizeof(struct lock_class_stats)); 332 } 333 memset(class->contention_point, 0, sizeof(class->contention_point)); 334 memset(class->contending_point, 0, sizeof(class->contending_point)); 335 } 336 337 static struct lock_class_stats *get_lock_stats(struct lock_class *class) 338 { 339 return &this_cpu_ptr(cpu_lock_stats)[class - lock_classes]; 340 } 341 342 static void lock_release_holdtime(struct held_lock *hlock) 343 { 344 struct lock_class_stats *stats; 345 u64 holdtime; 346 347 if (!lock_stat) 348 return; 349 350 holdtime = lockstat_clock() - hlock->holdtime_stamp; 351 352 stats = get_lock_stats(hlock_class(hlock)); 353 if (hlock->read) 354 lock_time_inc(&stats->read_holdtime, holdtime); 355 else 356 lock_time_inc(&stats->write_holdtime, holdtime); 357 } 358 #else 359 static inline void lock_release_holdtime(struct held_lock *hlock) 360 { 361 } 362 #endif 363 364 /* 365 * We keep a global list of all lock classes. The list is only accessed with 366 * the lockdep spinlock lock held. free_lock_classes is a list with free 367 * elements. These elements are linked together by the lock_entry member in 368 * struct lock_class. 369 */ 370 static LIST_HEAD(all_lock_classes); 371 static LIST_HEAD(free_lock_classes); 372 373 /** 374 * struct pending_free - information about data structures about to be freed 375 * @zapped: Head of a list with struct lock_class elements. 376 * @lock_chains_being_freed: Bitmap that indicates which lock_chains[] elements 377 * are about to be freed. 378 */ 379 struct pending_free { 380 struct list_head zapped; 381 DECLARE_BITMAP(lock_chains_being_freed, MAX_LOCKDEP_CHAINS); 382 }; 383 384 /** 385 * struct delayed_free - data structures used for delayed freeing 386 * 387 * A data structure for delayed freeing of data structures that may be 388 * accessed by RCU readers at the time these were freed. 389 * 390 * @rcu_head: Used to schedule an RCU callback for freeing data structures. 391 * @index: Index of @pf to which freed data structures are added. 392 * @scheduled: Whether or not an RCU callback has been scheduled. 393 * @pf: Array with information about data structures about to be freed. 394 */ 395 static struct delayed_free { 396 struct rcu_head rcu_head; 397 int index; 398 int scheduled; 399 struct pending_free pf[2]; 400 } delayed_free; 401 402 /* 403 * The lockdep classes are in a hash-table as well, for fast lookup: 404 */ 405 #define CLASSHASH_BITS (MAX_LOCKDEP_KEYS_BITS - 1) 406 #define CLASSHASH_SIZE (1UL << CLASSHASH_BITS) 407 #define __classhashfn(key) hash_long((unsigned long)key, CLASSHASH_BITS) 408 #define classhashentry(key) (classhash_table + __classhashfn((key))) 409 410 static struct hlist_head classhash_table[CLASSHASH_SIZE]; 411 412 /* 413 * We put the lock dependency chains into a hash-table as well, to cache 414 * their existence: 415 */ 416 #define CHAINHASH_BITS (MAX_LOCKDEP_CHAINS_BITS-1) 417 #define CHAINHASH_SIZE (1UL << CHAINHASH_BITS) 418 #define __chainhashfn(chain) hash_long(chain, CHAINHASH_BITS) 419 #define chainhashentry(chain) (chainhash_table + __chainhashfn((chain))) 420 421 static struct hlist_head chainhash_table[CHAINHASH_SIZE]; 422 423 /* 424 * the id of held_lock 425 */ 426 static inline u16 hlock_id(struct held_lock *hlock) 427 { 428 BUILD_BUG_ON(MAX_LOCKDEP_KEYS_BITS + 2 > 16); 429 430 return (hlock->class_idx | (hlock->read << MAX_LOCKDEP_KEYS_BITS)); 431 } 432 433 static inline unsigned int chain_hlock_class_idx(u16 hlock_id) 434 { 435 return hlock_id & (MAX_LOCKDEP_KEYS - 1); 436 } 437 438 /* 439 * The hash key of the lock dependency chains is a hash itself too: 440 * it's a hash of all locks taken up to that lock, including that lock. 441 * It's a 64-bit hash, because it's important for the keys to be 442 * unique. 443 */ 444 static inline u64 iterate_chain_key(u64 key, u32 idx) 445 { 446 u32 k0 = key, k1 = key >> 32; 447 448 __jhash_mix(idx, k0, k1); /* Macro that modifies arguments! */ 449 450 return k0 | (u64)k1 << 32; 451 } 452 453 void lockdep_init_task(struct task_struct *task) 454 { 455 task->lockdep_depth = 0; /* no locks held yet */ 456 task->curr_chain_key = INITIAL_CHAIN_KEY; 457 task->lockdep_recursion = 0; 458 } 459 460 static __always_inline void lockdep_recursion_inc(void) 461 { 462 __this_cpu_inc(lockdep_recursion); 463 } 464 465 static __always_inline void lockdep_recursion_finish(void) 466 { 467 if (WARN_ON_ONCE(__this_cpu_dec_return(lockdep_recursion))) 468 __this_cpu_write(lockdep_recursion, 0); 469 } 470 471 void lockdep_set_selftest_task(struct task_struct *task) 472 { 473 lockdep_selftest_task_struct = task; 474 } 475 476 /* 477 * Debugging switches: 478 */ 479 480 #define VERBOSE 0 481 #define VERY_VERBOSE 0 482 483 #if VERBOSE 484 # define HARDIRQ_VERBOSE 1 485 # define SOFTIRQ_VERBOSE 1 486 #else 487 # define HARDIRQ_VERBOSE 0 488 # define SOFTIRQ_VERBOSE 0 489 #endif 490 491 #if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE 492 /* 493 * Quick filtering for interesting events: 494 */ 495 static int class_filter(struct lock_class *class) 496 { 497 #if 0 498 /* Example */ 499 if (class->name_version == 1 && 500 !strcmp(class->name, "lockname")) 501 return 1; 502 if (class->name_version == 1 && 503 !strcmp(class->name, "&struct->lockfield")) 504 return 1; 505 #endif 506 /* Filter everything else. 1 would be to allow everything else */ 507 return 0; 508 } 509 #endif 510 511 static int verbose(struct lock_class *class) 512 { 513 #if VERBOSE 514 return class_filter(class); 515 #endif 516 return 0; 517 } 518 519 static void print_lockdep_off(const char *bug_msg) 520 { 521 printk(KERN_DEBUG "%s\n", bug_msg); 522 printk(KERN_DEBUG "turning off the locking correctness validator.\n"); 523 #ifdef CONFIG_LOCK_STAT 524 printk(KERN_DEBUG "Please attach the output of /proc/lock_stat to the bug report\n"); 525 #endif 526 } 527 528 unsigned long nr_stack_trace_entries; 529 530 #ifdef CONFIG_PROVE_LOCKING 531 /** 532 * struct lock_trace - single stack backtrace 533 * @hash_entry: Entry in a stack_trace_hash[] list. 534 * @hash: jhash() of @entries. 535 * @nr_entries: Number of entries in @entries. 536 * @entries: Actual stack backtrace. 537 */ 538 struct lock_trace { 539 struct hlist_node hash_entry; 540 u32 hash; 541 u32 nr_entries; 542 unsigned long entries[] __aligned(sizeof(unsigned long)); 543 }; 544 #define LOCK_TRACE_SIZE_IN_LONGS \ 545 (sizeof(struct lock_trace) / sizeof(unsigned long)) 546 /* 547 * Stack-trace: sequence of lock_trace structures. Protected by the graph_lock. 548 */ 549 static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES]; 550 static struct hlist_head stack_trace_hash[STACK_TRACE_HASH_SIZE]; 551 552 static bool traces_identical(struct lock_trace *t1, struct lock_trace *t2) 553 { 554 return t1->hash == t2->hash && t1->nr_entries == t2->nr_entries && 555 memcmp(t1->entries, t2->entries, 556 t1->nr_entries * sizeof(t1->entries[0])) == 0; 557 } 558 559 static struct lock_trace *save_trace(void) 560 { 561 struct lock_trace *trace, *t2; 562 struct hlist_head *hash_head; 563 u32 hash; 564 int max_entries; 565 566 BUILD_BUG_ON_NOT_POWER_OF_2(STACK_TRACE_HASH_SIZE); 567 BUILD_BUG_ON(LOCK_TRACE_SIZE_IN_LONGS >= MAX_STACK_TRACE_ENTRIES); 568 569 trace = (struct lock_trace *)(stack_trace + nr_stack_trace_entries); 570 max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries - 571 LOCK_TRACE_SIZE_IN_LONGS; 572 573 if (max_entries <= 0) { 574 if (!debug_locks_off_graph_unlock()) 575 return NULL; 576 577 print_lockdep_off("BUG: MAX_STACK_TRACE_ENTRIES too low!"); 578 dump_stack(); 579 580 return NULL; 581 } 582 trace->nr_entries = stack_trace_save(trace->entries, max_entries, 3); 583 584 hash = jhash(trace->entries, trace->nr_entries * 585 sizeof(trace->entries[0]), 0); 586 trace->hash = hash; 587 hash_head = stack_trace_hash + (hash & (STACK_TRACE_HASH_SIZE - 1)); 588 hlist_for_each_entry(t2, hash_head, hash_entry) { 589 if (traces_identical(trace, t2)) 590 return t2; 591 } 592 nr_stack_trace_entries += LOCK_TRACE_SIZE_IN_LONGS + trace->nr_entries; 593 hlist_add_head(&trace->hash_entry, hash_head); 594 595 return trace; 596 } 597 598 /* Return the number of stack traces in the stack_trace[] array. */ 599 u64 lockdep_stack_trace_count(void) 600 { 601 struct lock_trace *trace; 602 u64 c = 0; 603 int i; 604 605 for (i = 0; i < ARRAY_SIZE(stack_trace_hash); i++) { 606 hlist_for_each_entry(trace, &stack_trace_hash[i], hash_entry) { 607 c++; 608 } 609 } 610 611 return c; 612 } 613 614 /* Return the number of stack hash chains that have at least one stack trace. */ 615 u64 lockdep_stack_hash_count(void) 616 { 617 u64 c = 0; 618 int i; 619 620 for (i = 0; i < ARRAY_SIZE(stack_trace_hash); i++) 621 if (!hlist_empty(&stack_trace_hash[i])) 622 c++; 623 624 return c; 625 } 626 #endif 627 628 unsigned int nr_hardirq_chains; 629 unsigned int nr_softirq_chains; 630 unsigned int nr_process_chains; 631 unsigned int max_lockdep_depth; 632 633 #ifdef CONFIG_DEBUG_LOCKDEP 634 /* 635 * Various lockdep statistics: 636 */ 637 DEFINE_PER_CPU(struct lockdep_stats, lockdep_stats); 638 #endif 639 640 #ifdef CONFIG_PROVE_LOCKING 641 /* 642 * Locking printouts: 643 */ 644 645 #define __USAGE(__STATE) \ 646 [LOCK_USED_IN_##__STATE] = "IN-"__stringify(__STATE)"-W", \ 647 [LOCK_ENABLED_##__STATE] = __stringify(__STATE)"-ON-W", \ 648 [LOCK_USED_IN_##__STATE##_READ] = "IN-"__stringify(__STATE)"-R",\ 649 [LOCK_ENABLED_##__STATE##_READ] = __stringify(__STATE)"-ON-R", 650 651 static const char *usage_str[] = 652 { 653 #define LOCKDEP_STATE(__STATE) __USAGE(__STATE) 654 #include "lockdep_states.h" 655 #undef LOCKDEP_STATE 656 [LOCK_USED] = "INITIAL USE", 657 [LOCK_USED_READ] = "INITIAL READ USE", 658 /* abused as string storage for verify_lock_unused() */ 659 [LOCK_USAGE_STATES] = "IN-NMI", 660 }; 661 #endif 662 663 const char *__get_key_name(const struct lockdep_subclass_key *key, char *str) 664 { 665 return kallsyms_lookup((unsigned long)key, NULL, NULL, NULL, str); 666 } 667 668 static inline unsigned long lock_flag(enum lock_usage_bit bit) 669 { 670 return 1UL << bit; 671 } 672 673 static char get_usage_char(struct lock_class *class, enum lock_usage_bit bit) 674 { 675 /* 676 * The usage character defaults to '.' (i.e., irqs disabled and not in 677 * irq context), which is the safest usage category. 678 */ 679 char c = '.'; 680 681 /* 682 * The order of the following usage checks matters, which will 683 * result in the outcome character as follows: 684 * 685 * - '+': irq is enabled and not in irq context 686 * - '-': in irq context and irq is disabled 687 * - '?': in irq context and irq is enabled 688 */ 689 if (class->usage_mask & lock_flag(bit + LOCK_USAGE_DIR_MASK)) { 690 c = '+'; 691 if (class->usage_mask & lock_flag(bit)) 692 c = '?'; 693 } else if (class->usage_mask & lock_flag(bit)) 694 c = '-'; 695 696 return c; 697 } 698 699 void get_usage_chars(struct lock_class *class, char usage[LOCK_USAGE_CHARS]) 700 { 701 int i = 0; 702 703 #define LOCKDEP_STATE(__STATE) \ 704 usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE); \ 705 usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE##_READ); 706 #include "lockdep_states.h" 707 #undef LOCKDEP_STATE 708 709 usage[i] = '\0'; 710 } 711 712 static void __print_lock_name(struct held_lock *hlock, struct lock_class *class) 713 { 714 char str[KSYM_NAME_LEN]; 715 const char *name; 716 717 name = class->name; 718 if (!name) { 719 name = __get_key_name(class->key, str); 720 printk(KERN_CONT "%s", name); 721 } else { 722 printk(KERN_CONT "%s", name); 723 if (class->name_version > 1) 724 printk(KERN_CONT "#%d", class->name_version); 725 if (class->subclass) 726 printk(KERN_CONT "/%d", class->subclass); 727 if (hlock && class->print_fn) 728 class->print_fn(hlock->instance); 729 } 730 } 731 732 static void print_lock_name(struct held_lock *hlock, struct lock_class *class) 733 { 734 char usage[LOCK_USAGE_CHARS]; 735 736 get_usage_chars(class, usage); 737 738 printk(KERN_CONT " ("); 739 __print_lock_name(hlock, class); 740 printk(KERN_CONT "){%s}-{%d:%d}", usage, 741 class->wait_type_outer ?: class->wait_type_inner, 742 class->wait_type_inner); 743 } 744 745 static void print_lockdep_cache(struct lockdep_map *lock) 746 { 747 const char *name; 748 char str[KSYM_NAME_LEN]; 749 750 name = lock->name; 751 if (!name) 752 name = __get_key_name(lock->key->subkeys, str); 753 754 printk(KERN_CONT "%s", name); 755 } 756 757 static void print_lock(struct held_lock *hlock) 758 { 759 /* 760 * We can be called locklessly through debug_show_all_locks() so be 761 * extra careful, the hlock might have been released and cleared. 762 * 763 * If this indeed happens, lets pretend it does not hurt to continue 764 * to print the lock unless the hlock class_idx does not point to a 765 * registered class. The rationale here is: since we don't attempt 766 * to distinguish whether we are in this situation, if it just 767 * happened we can't count on class_idx to tell either. 768 */ 769 struct lock_class *lock = hlock_class(hlock); 770 771 if (!lock) { 772 printk(KERN_CONT "<RELEASED>\n"); 773 return; 774 } 775 776 printk(KERN_CONT "%px", hlock->instance); 777 print_lock_name(hlock, lock); 778 printk(KERN_CONT ", at: %pS\n", (void *)hlock->acquire_ip); 779 } 780 781 static void lockdep_print_held_locks(struct task_struct *p) 782 { 783 int i, depth = READ_ONCE(p->lockdep_depth); 784 785 if (!depth) 786 printk("no locks held by %s/%d.\n", p->comm, task_pid_nr(p)); 787 else 788 printk("%d lock%s held by %s/%d:\n", depth, 789 depth > 1 ? "s" : "", p->comm, task_pid_nr(p)); 790 /* 791 * It's not reliable to print a task's held locks if it's not sleeping 792 * and it's not the current task. 793 */ 794 if (p != current && task_is_running(p)) 795 return; 796 for (i = 0; i < depth; i++) { 797 printk(" #%d: ", i); 798 print_lock(p->held_locks + i); 799 } 800 } 801 802 static void print_kernel_ident(void) 803 { 804 printk("%s %.*s %s\n", init_utsname()->release, 805 (int)strcspn(init_utsname()->version, " "), 806 init_utsname()->version, 807 print_tainted()); 808 } 809 810 static int very_verbose(struct lock_class *class) 811 { 812 #if VERY_VERBOSE 813 return class_filter(class); 814 #endif 815 return 0; 816 } 817 818 /* 819 * Is this the address of a static object: 820 */ 821 #ifdef __KERNEL__ 822 /* 823 * Check if an address is part of freed initmem. After initmem is freed, 824 * memory can be allocated from it, and such allocations would then have 825 * addresses within the range [_stext, _end]. 826 */ 827 #ifndef arch_is_kernel_initmem_freed 828 static int arch_is_kernel_initmem_freed(unsigned long addr) 829 { 830 if (system_state < SYSTEM_FREEING_INITMEM) 831 return 0; 832 833 return init_section_contains((void *)addr, 1); 834 } 835 #endif 836 837 static int static_obj(const void *obj) 838 { 839 unsigned long start = (unsigned long) &_stext, 840 end = (unsigned long) &_end, 841 addr = (unsigned long) obj; 842 843 if (arch_is_kernel_initmem_freed(addr)) 844 return 0; 845 846 /* 847 * static variable? 848 */ 849 if ((addr >= start) && (addr < end)) 850 return 1; 851 852 /* 853 * in-kernel percpu var? 854 */ 855 if (is_kernel_percpu_address(addr)) 856 return 1; 857 858 /* 859 * module static or percpu var? 860 */ 861 return is_module_address(addr) || is_module_percpu_address(addr); 862 } 863 #endif 864 865 /* 866 * To make lock name printouts unique, we calculate a unique 867 * class->name_version generation counter. The caller must hold the graph 868 * lock. 869 */ 870 static int count_matching_names(struct lock_class *new_class) 871 { 872 struct lock_class *class; 873 int count = 0; 874 875 if (!new_class->name) 876 return 0; 877 878 list_for_each_entry(class, &all_lock_classes, lock_entry) { 879 if (new_class->key - new_class->subclass == class->key) 880 return class->name_version; 881 if (class->name && !strcmp(class->name, new_class->name)) 882 count = max(count, class->name_version); 883 } 884 885 return count + 1; 886 } 887 888 /* used from NMI context -- must be lockless */ 889 static noinstr struct lock_class * 890 look_up_lock_class(const struct lockdep_map *lock, unsigned int subclass) 891 { 892 struct lockdep_subclass_key *key; 893 struct hlist_head *hash_head; 894 struct lock_class *class; 895 896 if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) { 897 instrumentation_begin(); 898 debug_locks_off(); 899 printk(KERN_ERR 900 "BUG: looking up invalid subclass: %u\n", subclass); 901 printk(KERN_ERR 902 "turning off the locking correctness validator.\n"); 903 dump_stack(); 904 instrumentation_end(); 905 return NULL; 906 } 907 908 /* 909 * If it is not initialised then it has never been locked, 910 * so it won't be present in the hash table. 911 */ 912 if (unlikely(!lock->key)) 913 return NULL; 914 915 /* 916 * NOTE: the class-key must be unique. For dynamic locks, a static 917 * lock_class_key variable is passed in through the mutex_init() 918 * (or spin_lock_init()) call - which acts as the key. For static 919 * locks we use the lock object itself as the key. 920 */ 921 BUILD_BUG_ON(sizeof(struct lock_class_key) > 922 sizeof(struct lockdep_map)); 923 924 key = lock->key->subkeys + subclass; 925 926 hash_head = classhashentry(key); 927 928 /* 929 * We do an RCU walk of the hash, see lockdep_free_key_range(). 930 */ 931 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) 932 return NULL; 933 934 hlist_for_each_entry_rcu_notrace(class, hash_head, hash_entry) { 935 if (class->key == key) { 936 /* 937 * Huh! same key, different name? Did someone trample 938 * on some memory? We're most confused. 939 */ 940 WARN_ONCE(class->name != lock->name && 941 lock->key != &__lockdep_no_validate__, 942 "Looking for class \"%s\" with key %ps, but found a different class \"%s\" with the same key\n", 943 lock->name, lock->key, class->name); 944 return class; 945 } 946 } 947 948 return NULL; 949 } 950 951 /* 952 * Static locks do not have their class-keys yet - for them the key is 953 * the lock object itself. If the lock is in the per cpu area, the 954 * canonical address of the lock (per cpu offset removed) is used. 955 */ 956 static bool assign_lock_key(struct lockdep_map *lock) 957 { 958 unsigned long can_addr, addr = (unsigned long)lock; 959 960 #ifdef __KERNEL__ 961 /* 962 * lockdep_free_key_range() assumes that struct lock_class_key 963 * objects do not overlap. Since we use the address of lock 964 * objects as class key for static objects, check whether the 965 * size of lock_class_key objects does not exceed the size of 966 * the smallest lock object. 967 */ 968 BUILD_BUG_ON(sizeof(struct lock_class_key) > sizeof(raw_spinlock_t)); 969 #endif 970 971 if (__is_kernel_percpu_address(addr, &can_addr)) 972 lock->key = (void *)can_addr; 973 else if (__is_module_percpu_address(addr, &can_addr)) 974 lock->key = (void *)can_addr; 975 else if (static_obj(lock)) 976 lock->key = (void *)lock; 977 else { 978 /* Debug-check: all keys must be persistent! */ 979 debug_locks_off(); 980 pr_err("INFO: trying to register non-static key.\n"); 981 pr_err("The code is fine but needs lockdep annotation, or maybe\n"); 982 pr_err("you didn't initialize this object before use?\n"); 983 pr_err("turning off the locking correctness validator.\n"); 984 dump_stack(); 985 return false; 986 } 987 988 return true; 989 } 990 991 #ifdef CONFIG_DEBUG_LOCKDEP 992 993 /* Check whether element @e occurs in list @h */ 994 static bool in_list(struct list_head *e, struct list_head *h) 995 { 996 struct list_head *f; 997 998 list_for_each(f, h) { 999 if (e == f) 1000 return true; 1001 } 1002 1003 return false; 1004 } 1005 1006 /* 1007 * Check whether entry @e occurs in any of the locks_after or locks_before 1008 * lists. 1009 */ 1010 static bool in_any_class_list(struct list_head *e) 1011 { 1012 struct lock_class *class; 1013 int i; 1014 1015 for (i = 0; i < ARRAY_SIZE(lock_classes); i++) { 1016 class = &lock_classes[i]; 1017 if (in_list(e, &class->locks_after) || 1018 in_list(e, &class->locks_before)) 1019 return true; 1020 } 1021 return false; 1022 } 1023 1024 static bool class_lock_list_valid(struct lock_class *c, struct list_head *h) 1025 { 1026 struct lock_list *e; 1027 1028 list_for_each_entry(e, h, entry) { 1029 if (e->links_to != c) { 1030 printk(KERN_INFO "class %s: mismatch for lock entry %ld; class %s <> %s", 1031 c->name ? : "(?)", 1032 (unsigned long)(e - list_entries), 1033 e->links_to && e->links_to->name ? 1034 e->links_to->name : "(?)", 1035 e->class && e->class->name ? e->class->name : 1036 "(?)"); 1037 return false; 1038 } 1039 } 1040 return true; 1041 } 1042 1043 #ifdef CONFIG_PROVE_LOCKING 1044 static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS]; 1045 #endif 1046 1047 static bool check_lock_chain_key(struct lock_chain *chain) 1048 { 1049 #ifdef CONFIG_PROVE_LOCKING 1050 u64 chain_key = INITIAL_CHAIN_KEY; 1051 int i; 1052 1053 for (i = chain->base; i < chain->base + chain->depth; i++) 1054 chain_key = iterate_chain_key(chain_key, chain_hlocks[i]); 1055 /* 1056 * The 'unsigned long long' casts avoid that a compiler warning 1057 * is reported when building tools/lib/lockdep. 1058 */ 1059 if (chain->chain_key != chain_key) { 1060 printk(KERN_INFO "chain %lld: key %#llx <> %#llx\n", 1061 (unsigned long long)(chain - lock_chains), 1062 (unsigned long long)chain->chain_key, 1063 (unsigned long long)chain_key); 1064 return false; 1065 } 1066 #endif 1067 return true; 1068 } 1069 1070 static bool in_any_zapped_class_list(struct lock_class *class) 1071 { 1072 struct pending_free *pf; 1073 int i; 1074 1075 for (i = 0, pf = delayed_free.pf; i < ARRAY_SIZE(delayed_free.pf); i++, pf++) { 1076 if (in_list(&class->lock_entry, &pf->zapped)) 1077 return true; 1078 } 1079 1080 return false; 1081 } 1082 1083 static bool __check_data_structures(void) 1084 { 1085 struct lock_class *class; 1086 struct lock_chain *chain; 1087 struct hlist_head *head; 1088 struct lock_list *e; 1089 int i; 1090 1091 /* Check whether all classes occur in a lock list. */ 1092 for (i = 0; i < ARRAY_SIZE(lock_classes); i++) { 1093 class = &lock_classes[i]; 1094 if (!in_list(&class->lock_entry, &all_lock_classes) && 1095 !in_list(&class->lock_entry, &free_lock_classes) && 1096 !in_any_zapped_class_list(class)) { 1097 printk(KERN_INFO "class %px/%s is not in any class list\n", 1098 class, class->name ? : "(?)"); 1099 return false; 1100 } 1101 } 1102 1103 /* Check whether all classes have valid lock lists. */ 1104 for (i = 0; i < ARRAY_SIZE(lock_classes); i++) { 1105 class = &lock_classes[i]; 1106 if (!class_lock_list_valid(class, &class->locks_before)) 1107 return false; 1108 if (!class_lock_list_valid(class, &class->locks_after)) 1109 return false; 1110 } 1111 1112 /* Check the chain_key of all lock chains. */ 1113 for (i = 0; i < ARRAY_SIZE(chainhash_table); i++) { 1114 head = chainhash_table + i; 1115 hlist_for_each_entry_rcu(chain, head, entry) { 1116 if (!check_lock_chain_key(chain)) 1117 return false; 1118 } 1119 } 1120 1121 /* 1122 * Check whether all list entries that are in use occur in a class 1123 * lock list. 1124 */ 1125 for_each_set_bit(i, list_entries_in_use, ARRAY_SIZE(list_entries)) { 1126 e = list_entries + i; 1127 if (!in_any_class_list(&e->entry)) { 1128 printk(KERN_INFO "list entry %d is not in any class list; class %s <> %s\n", 1129 (unsigned int)(e - list_entries), 1130 e->class->name ? : "(?)", 1131 e->links_to->name ? : "(?)"); 1132 return false; 1133 } 1134 } 1135 1136 /* 1137 * Check whether all list entries that are not in use do not occur in 1138 * a class lock list. 1139 */ 1140 for_each_clear_bit(i, list_entries_in_use, ARRAY_SIZE(list_entries)) { 1141 e = list_entries + i; 1142 if (in_any_class_list(&e->entry)) { 1143 printk(KERN_INFO "list entry %d occurs in a class list; class %s <> %s\n", 1144 (unsigned int)(e - list_entries), 1145 e->class && e->class->name ? e->class->name : 1146 "(?)", 1147 e->links_to && e->links_to->name ? 1148 e->links_to->name : "(?)"); 1149 return false; 1150 } 1151 } 1152 1153 return true; 1154 } 1155 1156 int check_consistency = 0; 1157 module_param(check_consistency, int, 0644); 1158 1159 static void check_data_structures(void) 1160 { 1161 static bool once = false; 1162 1163 if (check_consistency && !once) { 1164 if (!__check_data_structures()) { 1165 once = true; 1166 WARN_ON(once); 1167 } 1168 } 1169 } 1170 1171 #else /* CONFIG_DEBUG_LOCKDEP */ 1172 1173 static inline void check_data_structures(void) { } 1174 1175 #endif /* CONFIG_DEBUG_LOCKDEP */ 1176 1177 static void init_chain_block_buckets(void); 1178 1179 /* 1180 * Initialize the lock_classes[] array elements, the free_lock_classes list 1181 * and also the delayed_free structure. 1182 */ 1183 static void init_data_structures_once(void) 1184 { 1185 static bool __read_mostly ds_initialized, rcu_head_initialized; 1186 int i; 1187 1188 if (likely(rcu_head_initialized)) 1189 return; 1190 1191 if (system_state >= SYSTEM_SCHEDULING) { 1192 init_rcu_head(&delayed_free.rcu_head); 1193 rcu_head_initialized = true; 1194 } 1195 1196 if (ds_initialized) 1197 return; 1198 1199 ds_initialized = true; 1200 1201 INIT_LIST_HEAD(&delayed_free.pf[0].zapped); 1202 INIT_LIST_HEAD(&delayed_free.pf[1].zapped); 1203 1204 for (i = 0; i < ARRAY_SIZE(lock_classes); i++) { 1205 list_add_tail(&lock_classes[i].lock_entry, &free_lock_classes); 1206 INIT_LIST_HEAD(&lock_classes[i].locks_after); 1207 INIT_LIST_HEAD(&lock_classes[i].locks_before); 1208 } 1209 init_chain_block_buckets(); 1210 } 1211 1212 static inline struct hlist_head *keyhashentry(const struct lock_class_key *key) 1213 { 1214 unsigned long hash = hash_long((uintptr_t)key, KEYHASH_BITS); 1215 1216 return lock_keys_hash + hash; 1217 } 1218 1219 /* Register a dynamically allocated key. */ 1220 void lockdep_register_key(struct lock_class_key *key) 1221 { 1222 struct hlist_head *hash_head; 1223 struct lock_class_key *k; 1224 unsigned long flags; 1225 1226 if (WARN_ON_ONCE(static_obj(key))) 1227 return; 1228 hash_head = keyhashentry(key); 1229 1230 raw_local_irq_save(flags); 1231 if (!graph_lock()) 1232 goto restore_irqs; 1233 hlist_for_each_entry_rcu(k, hash_head, hash_entry) { 1234 if (WARN_ON_ONCE(k == key)) 1235 goto out_unlock; 1236 } 1237 hlist_add_head_rcu(&key->hash_entry, hash_head); 1238 out_unlock: 1239 graph_unlock(); 1240 restore_irqs: 1241 raw_local_irq_restore(flags); 1242 } 1243 EXPORT_SYMBOL_GPL(lockdep_register_key); 1244 1245 /* Check whether a key has been registered as a dynamic key. */ 1246 static bool is_dynamic_key(const struct lock_class_key *key) 1247 { 1248 struct hlist_head *hash_head; 1249 struct lock_class_key *k; 1250 bool found = false; 1251 1252 if (WARN_ON_ONCE(static_obj(key))) 1253 return false; 1254 1255 /* 1256 * If lock debugging is disabled lock_keys_hash[] may contain 1257 * pointers to memory that has already been freed. Avoid triggering 1258 * a use-after-free in that case by returning early. 1259 */ 1260 if (!debug_locks) 1261 return true; 1262 1263 hash_head = keyhashentry(key); 1264 1265 rcu_read_lock(); 1266 hlist_for_each_entry_rcu(k, hash_head, hash_entry) { 1267 if (k == key) { 1268 found = true; 1269 break; 1270 } 1271 } 1272 rcu_read_unlock(); 1273 1274 return found; 1275 } 1276 1277 /* 1278 * Register a lock's class in the hash-table, if the class is not present 1279 * yet. Otherwise we look it up. We cache the result in the lock object 1280 * itself, so actual lookup of the hash should be once per lock object. 1281 */ 1282 static struct lock_class * 1283 register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) 1284 { 1285 struct lockdep_subclass_key *key; 1286 struct hlist_head *hash_head; 1287 struct lock_class *class; 1288 int idx; 1289 1290 DEBUG_LOCKS_WARN_ON(!irqs_disabled()); 1291 1292 class = look_up_lock_class(lock, subclass); 1293 if (likely(class)) 1294 goto out_set_class_cache; 1295 1296 if (!lock->key) { 1297 if (!assign_lock_key(lock)) 1298 return NULL; 1299 } else if (!static_obj(lock->key) && !is_dynamic_key(lock->key)) { 1300 return NULL; 1301 } 1302 1303 key = lock->key->subkeys + subclass; 1304 hash_head = classhashentry(key); 1305 1306 if (!graph_lock()) { 1307 return NULL; 1308 } 1309 /* 1310 * We have to do the hash-walk again, to avoid races 1311 * with another CPU: 1312 */ 1313 hlist_for_each_entry_rcu(class, hash_head, hash_entry) { 1314 if (class->key == key) 1315 goto out_unlock_set; 1316 } 1317 1318 init_data_structures_once(); 1319 1320 /* Allocate a new lock class and add it to the hash. */ 1321 class = list_first_entry_or_null(&free_lock_classes, typeof(*class), 1322 lock_entry); 1323 if (!class) { 1324 if (!debug_locks_off_graph_unlock()) { 1325 return NULL; 1326 } 1327 1328 print_lockdep_off("BUG: MAX_LOCKDEP_KEYS too low!"); 1329 dump_stack(); 1330 return NULL; 1331 } 1332 nr_lock_classes++; 1333 __set_bit(class - lock_classes, lock_classes_in_use); 1334 debug_atomic_inc(nr_unused_locks); 1335 class->key = key; 1336 class->name = lock->name; 1337 class->subclass = subclass; 1338 WARN_ON_ONCE(!list_empty(&class->locks_before)); 1339 WARN_ON_ONCE(!list_empty(&class->locks_after)); 1340 class->name_version = count_matching_names(class); 1341 class->wait_type_inner = lock->wait_type_inner; 1342 class->wait_type_outer = lock->wait_type_outer; 1343 class->lock_type = lock->lock_type; 1344 /* 1345 * We use RCU's safe list-add method to make 1346 * parallel walking of the hash-list safe: 1347 */ 1348 hlist_add_head_rcu(&class->hash_entry, hash_head); 1349 /* 1350 * Remove the class from the free list and add it to the global list 1351 * of classes. 1352 */ 1353 list_move_tail(&class->lock_entry, &all_lock_classes); 1354 idx = class - lock_classes; 1355 if (idx > max_lock_class_idx) 1356 max_lock_class_idx = idx; 1357 1358 if (verbose(class)) { 1359 graph_unlock(); 1360 1361 printk("\nnew class %px: %s", class->key, class->name); 1362 if (class->name_version > 1) 1363 printk(KERN_CONT "#%d", class->name_version); 1364 printk(KERN_CONT "\n"); 1365 dump_stack(); 1366 1367 if (!graph_lock()) { 1368 return NULL; 1369 } 1370 } 1371 out_unlock_set: 1372 graph_unlock(); 1373 1374 out_set_class_cache: 1375 if (!subclass || force) 1376 lock->class_cache[0] = class; 1377 else if (subclass < NR_LOCKDEP_CACHING_CLASSES) 1378 lock->class_cache[subclass] = class; 1379 1380 /* 1381 * Hash collision, did we smoke some? We found a class with a matching 1382 * hash but the subclass -- which is hashed in -- didn't match. 1383 */ 1384 if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass)) 1385 return NULL; 1386 1387 return class; 1388 } 1389 1390 #ifdef CONFIG_PROVE_LOCKING 1391 /* 1392 * Allocate a lockdep entry. (assumes the graph_lock held, returns 1393 * with NULL on failure) 1394 */ 1395 static struct lock_list *alloc_list_entry(void) 1396 { 1397 int idx = find_first_zero_bit(list_entries_in_use, 1398 ARRAY_SIZE(list_entries)); 1399 1400 if (idx >= ARRAY_SIZE(list_entries)) { 1401 if (!debug_locks_off_graph_unlock()) 1402 return NULL; 1403 1404 print_lockdep_off("BUG: MAX_LOCKDEP_ENTRIES too low!"); 1405 dump_stack(); 1406 return NULL; 1407 } 1408 nr_list_entries++; 1409 __set_bit(idx, list_entries_in_use); 1410 return list_entries + idx; 1411 } 1412 1413 /* 1414 * Add a new dependency to the head of the list: 1415 */ 1416 static int add_lock_to_list(struct lock_class *this, 1417 struct lock_class *links_to, struct list_head *head, 1418 u16 distance, u8 dep, 1419 const struct lock_trace *trace) 1420 { 1421 struct lock_list *entry; 1422 /* 1423 * Lock not present yet - get a new dependency struct and 1424 * add it to the list: 1425 */ 1426 entry = alloc_list_entry(); 1427 if (!entry) 1428 return 0; 1429 1430 entry->class = this; 1431 entry->links_to = links_to; 1432 entry->dep = dep; 1433 entry->distance = distance; 1434 entry->trace = trace; 1435 /* 1436 * Both allocation and removal are done under the graph lock; but 1437 * iteration is under RCU-sched; see look_up_lock_class() and 1438 * lockdep_free_key_range(). 1439 */ 1440 list_add_tail_rcu(&entry->entry, head); 1441 1442 return 1; 1443 } 1444 1445 /* 1446 * For good efficiency of modular, we use power of 2 1447 */ 1448 #define MAX_CIRCULAR_QUEUE_SIZE (1UL << CONFIG_LOCKDEP_CIRCULAR_QUEUE_BITS) 1449 #define CQ_MASK (MAX_CIRCULAR_QUEUE_SIZE-1) 1450 1451 /* 1452 * The circular_queue and helpers are used to implement graph 1453 * breadth-first search (BFS) algorithm, by which we can determine 1454 * whether there is a path from a lock to another. In deadlock checks, 1455 * a path from the next lock to be acquired to a previous held lock 1456 * indicates that adding the <prev> -> <next> lock dependency will 1457 * produce a circle in the graph. Breadth-first search instead of 1458 * depth-first search is used in order to find the shortest (circular) 1459 * path. 1460 */ 1461 struct circular_queue { 1462 struct lock_list *element[MAX_CIRCULAR_QUEUE_SIZE]; 1463 unsigned int front, rear; 1464 }; 1465 1466 static struct circular_queue lock_cq; 1467 1468 unsigned int max_bfs_queue_depth; 1469 1470 static unsigned int lockdep_dependency_gen_id; 1471 1472 static inline void __cq_init(struct circular_queue *cq) 1473 { 1474 cq->front = cq->rear = 0; 1475 lockdep_dependency_gen_id++; 1476 } 1477 1478 static inline int __cq_empty(struct circular_queue *cq) 1479 { 1480 return (cq->front == cq->rear); 1481 } 1482 1483 static inline int __cq_full(struct circular_queue *cq) 1484 { 1485 return ((cq->rear + 1) & CQ_MASK) == cq->front; 1486 } 1487 1488 static inline int __cq_enqueue(struct circular_queue *cq, struct lock_list *elem) 1489 { 1490 if (__cq_full(cq)) 1491 return -1; 1492 1493 cq->element[cq->rear] = elem; 1494 cq->rear = (cq->rear + 1) & CQ_MASK; 1495 return 0; 1496 } 1497 1498 /* 1499 * Dequeue an element from the circular_queue, return a lock_list if 1500 * the queue is not empty, or NULL if otherwise. 1501 */ 1502 static inline struct lock_list * __cq_dequeue(struct circular_queue *cq) 1503 { 1504 struct lock_list * lock; 1505 1506 if (__cq_empty(cq)) 1507 return NULL; 1508 1509 lock = cq->element[cq->front]; 1510 cq->front = (cq->front + 1) & CQ_MASK; 1511 1512 return lock; 1513 } 1514 1515 static inline unsigned int __cq_get_elem_count(struct circular_queue *cq) 1516 { 1517 return (cq->rear - cq->front) & CQ_MASK; 1518 } 1519 1520 static inline void mark_lock_accessed(struct lock_list *lock) 1521 { 1522 lock->class->dep_gen_id = lockdep_dependency_gen_id; 1523 } 1524 1525 static inline void visit_lock_entry(struct lock_list *lock, 1526 struct lock_list *parent) 1527 { 1528 lock->parent = parent; 1529 } 1530 1531 static inline unsigned long lock_accessed(struct lock_list *lock) 1532 { 1533 return lock->class->dep_gen_id == lockdep_dependency_gen_id; 1534 } 1535 1536 static inline struct lock_list *get_lock_parent(struct lock_list *child) 1537 { 1538 return child->parent; 1539 } 1540 1541 static inline int get_lock_depth(struct lock_list *child) 1542 { 1543 int depth = 0; 1544 struct lock_list *parent; 1545 1546 while ((parent = get_lock_parent(child))) { 1547 child = parent; 1548 depth++; 1549 } 1550 return depth; 1551 } 1552 1553 /* 1554 * Return the forward or backward dependency list. 1555 * 1556 * @lock: the lock_list to get its class's dependency list 1557 * @offset: the offset to struct lock_class to determine whether it is 1558 * locks_after or locks_before 1559 */ 1560 static inline struct list_head *get_dep_list(struct lock_list *lock, int offset) 1561 { 1562 void *lock_class = lock->class; 1563 1564 return lock_class + offset; 1565 } 1566 /* 1567 * Return values of a bfs search: 1568 * 1569 * BFS_E* indicates an error 1570 * BFS_R* indicates a result (match or not) 1571 * 1572 * BFS_EINVALIDNODE: Find a invalid node in the graph. 1573 * 1574 * BFS_EQUEUEFULL: The queue is full while doing the bfs. 1575 * 1576 * BFS_RMATCH: Find the matched node in the graph, and put that node into 1577 * *@target_entry. 1578 * 1579 * BFS_RNOMATCH: Haven't found the matched node and keep *@target_entry 1580 * _unchanged_. 1581 */ 1582 enum bfs_result { 1583 BFS_EINVALIDNODE = -2, 1584 BFS_EQUEUEFULL = -1, 1585 BFS_RMATCH = 0, 1586 BFS_RNOMATCH = 1, 1587 }; 1588 1589 /* 1590 * bfs_result < 0 means error 1591 */ 1592 static inline bool bfs_error(enum bfs_result res) 1593 { 1594 return res < 0; 1595 } 1596 1597 /* 1598 * DEP_*_BIT in lock_list::dep 1599 * 1600 * For dependency @prev -> @next: 1601 * 1602 * SR: @prev is shared reader (->read != 0) and @next is recursive reader 1603 * (->read == 2) 1604 * ER: @prev is exclusive locker (->read == 0) and @next is recursive reader 1605 * SN: @prev is shared reader and @next is non-recursive locker (->read != 2) 1606 * EN: @prev is exclusive locker and @next is non-recursive locker 1607 * 1608 * Note that we define the value of DEP_*_BITs so that: 1609 * bit0 is prev->read == 0 1610 * bit1 is next->read != 2 1611 */ 1612 #define DEP_SR_BIT (0 + (0 << 1)) /* 0 */ 1613 #define DEP_ER_BIT (1 + (0 << 1)) /* 1 */ 1614 #define DEP_SN_BIT (0 + (1 << 1)) /* 2 */ 1615 #define DEP_EN_BIT (1 + (1 << 1)) /* 3 */ 1616 1617 #define DEP_SR_MASK (1U << (DEP_SR_BIT)) 1618 #define DEP_ER_MASK (1U << (DEP_ER_BIT)) 1619 #define DEP_SN_MASK (1U << (DEP_SN_BIT)) 1620 #define DEP_EN_MASK (1U << (DEP_EN_BIT)) 1621 1622 static inline unsigned int 1623 __calc_dep_bit(struct held_lock *prev, struct held_lock *next) 1624 { 1625 return (prev->read == 0) + ((next->read != 2) << 1); 1626 } 1627 1628 static inline u8 calc_dep(struct held_lock *prev, struct held_lock *next) 1629 { 1630 return 1U << __calc_dep_bit(prev, next); 1631 } 1632 1633 /* 1634 * calculate the dep_bit for backwards edges. We care about whether @prev is 1635 * shared and whether @next is recursive. 1636 */ 1637 static inline unsigned int 1638 __calc_dep_bitb(struct held_lock *prev, struct held_lock *next) 1639 { 1640 return (next->read != 2) + ((prev->read == 0) << 1); 1641 } 1642 1643 static inline u8 calc_depb(struct held_lock *prev, struct held_lock *next) 1644 { 1645 return 1U << __calc_dep_bitb(prev, next); 1646 } 1647 1648 /* 1649 * Initialize a lock_list entry @lock belonging to @class as the root for a BFS 1650 * search. 1651 */ 1652 static inline void __bfs_init_root(struct lock_list *lock, 1653 struct lock_class *class) 1654 { 1655 lock->class = class; 1656 lock->parent = NULL; 1657 lock->only_xr = 0; 1658 } 1659 1660 /* 1661 * Initialize a lock_list entry @lock based on a lock acquisition @hlock as the 1662 * root for a BFS search. 1663 * 1664 * ->only_xr of the initial lock node is set to @hlock->read == 2, to make sure 1665 * that <prev> -> @hlock and @hlock -> <whatever __bfs() found> is not -(*R)-> 1666 * and -(S*)->. 1667 */ 1668 static inline void bfs_init_root(struct lock_list *lock, 1669 struct held_lock *hlock) 1670 { 1671 __bfs_init_root(lock, hlock_class(hlock)); 1672 lock->only_xr = (hlock->read == 2); 1673 } 1674 1675 /* 1676 * Similar to bfs_init_root() but initialize the root for backwards BFS. 1677 * 1678 * ->only_xr of the initial lock node is set to @hlock->read != 0, to make sure 1679 * that <next> -> @hlock and @hlock -> <whatever backwards BFS found> is not 1680 * -(*S)-> and -(R*)-> (reverse order of -(*R)-> and -(S*)->). 1681 */ 1682 static inline void bfs_init_rootb(struct lock_list *lock, 1683 struct held_lock *hlock) 1684 { 1685 __bfs_init_root(lock, hlock_class(hlock)); 1686 lock->only_xr = (hlock->read != 0); 1687 } 1688 1689 static inline struct lock_list *__bfs_next(struct lock_list *lock, int offset) 1690 { 1691 if (!lock || !lock->parent) 1692 return NULL; 1693 1694 return list_next_or_null_rcu(get_dep_list(lock->parent, offset), 1695 &lock->entry, struct lock_list, entry); 1696 } 1697 1698 /* 1699 * Breadth-First Search to find a strong path in the dependency graph. 1700 * 1701 * @source_entry: the source of the path we are searching for. 1702 * @data: data used for the second parameter of @match function 1703 * @match: match function for the search 1704 * @target_entry: pointer to the target of a matched path 1705 * @offset: the offset to struct lock_class to determine whether it is 1706 * locks_after or locks_before 1707 * 1708 * We may have multiple edges (considering different kinds of dependencies, 1709 * e.g. ER and SN) between two nodes in the dependency graph. But 1710 * only the strong dependency path in the graph is relevant to deadlocks. A 1711 * strong dependency path is a dependency path that doesn't have two adjacent 1712 * dependencies as -(*R)-> -(S*)->, please see: 1713 * 1714 * Documentation/locking/lockdep-design.rst 1715 * 1716 * for more explanation of the definition of strong dependency paths 1717 * 1718 * In __bfs(), we only traverse in the strong dependency path: 1719 * 1720 * In lock_list::only_xr, we record whether the previous dependency only 1721 * has -(*R)-> in the search, and if it does (prev only has -(*R)->), we 1722 * filter out any -(S*)-> in the current dependency and after that, the 1723 * ->only_xr is set according to whether we only have -(*R)-> left. 1724 */ 1725 static enum bfs_result __bfs(struct lock_list *source_entry, 1726 void *data, 1727 bool (*match)(struct lock_list *entry, void *data), 1728 bool (*skip)(struct lock_list *entry, void *data), 1729 struct lock_list **target_entry, 1730 int offset) 1731 { 1732 struct circular_queue *cq = &lock_cq; 1733 struct lock_list *lock = NULL; 1734 struct lock_list *entry; 1735 struct list_head *head; 1736 unsigned int cq_depth; 1737 bool first; 1738 1739 lockdep_assert_locked(); 1740 1741 __cq_init(cq); 1742 __cq_enqueue(cq, source_entry); 1743 1744 while ((lock = __bfs_next(lock, offset)) || (lock = __cq_dequeue(cq))) { 1745 if (!lock->class) 1746 return BFS_EINVALIDNODE; 1747 1748 /* 1749 * Step 1: check whether we already finish on this one. 1750 * 1751 * If we have visited all the dependencies from this @lock to 1752 * others (iow, if we have visited all lock_list entries in 1753 * @lock->class->locks_{after,before}) we skip, otherwise go 1754 * and visit all the dependencies in the list and mark this 1755 * list accessed. 1756 */ 1757 if (lock_accessed(lock)) 1758 continue; 1759 else 1760 mark_lock_accessed(lock); 1761 1762 /* 1763 * Step 2: check whether prev dependency and this form a strong 1764 * dependency path. 1765 */ 1766 if (lock->parent) { /* Parent exists, check prev dependency */ 1767 u8 dep = lock->dep; 1768 bool prev_only_xr = lock->parent->only_xr; 1769 1770 /* 1771 * Mask out all -(S*)-> if we only have *R in previous 1772 * step, because -(*R)-> -(S*)-> don't make up a strong 1773 * dependency. 1774 */ 1775 if (prev_only_xr) 1776 dep &= ~(DEP_SR_MASK | DEP_SN_MASK); 1777 1778 /* If nothing left, we skip */ 1779 if (!dep) 1780 continue; 1781 1782 /* If there are only -(*R)-> left, set that for the next step */ 1783 lock->only_xr = !(dep & (DEP_SN_MASK | DEP_EN_MASK)); 1784 } 1785 1786 /* 1787 * Step 3: we haven't visited this and there is a strong 1788 * dependency path to this, so check with @match. 1789 * If @skip is provide and returns true, we skip this 1790 * lock (and any path this lock is in). 1791 */ 1792 if (skip && skip(lock, data)) 1793 continue; 1794 1795 if (match(lock, data)) { 1796 *target_entry = lock; 1797 return BFS_RMATCH; 1798 } 1799 1800 /* 1801 * Step 4: if not match, expand the path by adding the 1802 * forward or backwards dependencies in the search 1803 * 1804 */ 1805 first = true; 1806 head = get_dep_list(lock, offset); 1807 list_for_each_entry_rcu(entry, head, entry) { 1808 visit_lock_entry(entry, lock); 1809 1810 /* 1811 * Note we only enqueue the first of the list into the 1812 * queue, because we can always find a sibling 1813 * dependency from one (see __bfs_next()), as a result 1814 * the space of queue is saved. 1815 */ 1816 if (!first) 1817 continue; 1818 1819 first = false; 1820 1821 if (__cq_enqueue(cq, entry)) 1822 return BFS_EQUEUEFULL; 1823 1824 cq_depth = __cq_get_elem_count(cq); 1825 if (max_bfs_queue_depth < cq_depth) 1826 max_bfs_queue_depth = cq_depth; 1827 } 1828 } 1829 1830 return BFS_RNOMATCH; 1831 } 1832 1833 static inline enum bfs_result 1834 __bfs_forwards(struct lock_list *src_entry, 1835 void *data, 1836 bool (*match)(struct lock_list *entry, void *data), 1837 bool (*skip)(struct lock_list *entry, void *data), 1838 struct lock_list **target_entry) 1839 { 1840 return __bfs(src_entry, data, match, skip, target_entry, 1841 offsetof(struct lock_class, locks_after)); 1842 1843 } 1844 1845 static inline enum bfs_result 1846 __bfs_backwards(struct lock_list *src_entry, 1847 void *data, 1848 bool (*match)(struct lock_list *entry, void *data), 1849 bool (*skip)(struct lock_list *entry, void *data), 1850 struct lock_list **target_entry) 1851 { 1852 return __bfs(src_entry, data, match, skip, target_entry, 1853 offsetof(struct lock_class, locks_before)); 1854 1855 } 1856 1857 static void print_lock_trace(const struct lock_trace *trace, 1858 unsigned int spaces) 1859 { 1860 stack_trace_print(trace->entries, trace->nr_entries, spaces); 1861 } 1862 1863 /* 1864 * Print a dependency chain entry (this is only done when a deadlock 1865 * has been detected): 1866 */ 1867 static noinline void 1868 print_circular_bug_entry(struct lock_list *target, int depth) 1869 { 1870 if (debug_locks_silent) 1871 return; 1872 printk("\n-> #%u", depth); 1873 print_lock_name(NULL, target->class); 1874 printk(KERN_CONT ":\n"); 1875 print_lock_trace(target->trace, 6); 1876 } 1877 1878 static void 1879 print_circular_lock_scenario(struct held_lock *src, 1880 struct held_lock *tgt, 1881 struct lock_list *prt) 1882 { 1883 struct lock_class *source = hlock_class(src); 1884 struct lock_class *target = hlock_class(tgt); 1885 struct lock_class *parent = prt->class; 1886 int src_read = src->read; 1887 int tgt_read = tgt->read; 1888 1889 /* 1890 * A direct locking problem where unsafe_class lock is taken 1891 * directly by safe_class lock, then all we need to show 1892 * is the deadlock scenario, as it is obvious that the 1893 * unsafe lock is taken under the safe lock. 1894 * 1895 * But if there is a chain instead, where the safe lock takes 1896 * an intermediate lock (middle_class) where this lock is 1897 * not the same as the safe lock, then the lock chain is 1898 * used to describe the problem. Otherwise we would need 1899 * to show a different CPU case for each link in the chain 1900 * from the safe_class lock to the unsafe_class lock. 1901 */ 1902 if (parent != source) { 1903 printk("Chain exists of:\n "); 1904 __print_lock_name(src, source); 1905 printk(KERN_CONT " --> "); 1906 __print_lock_name(NULL, parent); 1907 printk(KERN_CONT " --> "); 1908 __print_lock_name(tgt, target); 1909 printk(KERN_CONT "\n\n"); 1910 } 1911 1912 printk(" Possible unsafe locking scenario:\n\n"); 1913 printk(" CPU0 CPU1\n"); 1914 printk(" ---- ----\n"); 1915 if (tgt_read != 0) 1916 printk(" rlock("); 1917 else 1918 printk(" lock("); 1919 __print_lock_name(tgt, target); 1920 printk(KERN_CONT ");\n"); 1921 printk(" lock("); 1922 __print_lock_name(NULL, parent); 1923 printk(KERN_CONT ");\n"); 1924 printk(" lock("); 1925 __print_lock_name(tgt, target); 1926 printk(KERN_CONT ");\n"); 1927 if (src_read != 0) 1928 printk(" rlock("); 1929 else if (src->sync) 1930 printk(" sync("); 1931 else 1932 printk(" lock("); 1933 __print_lock_name(src, source); 1934 printk(KERN_CONT ");\n"); 1935 printk("\n *** DEADLOCK ***\n\n"); 1936 } 1937 1938 /* 1939 * When a circular dependency is detected, print the 1940 * header first: 1941 */ 1942 static noinline void 1943 print_circular_bug_header(struct lock_list *entry, unsigned int depth, 1944 struct held_lock *check_src, 1945 struct held_lock *check_tgt) 1946 { 1947 struct task_struct *curr = current; 1948 1949 if (debug_locks_silent) 1950 return; 1951 1952 pr_warn("\n"); 1953 pr_warn("======================================================\n"); 1954 pr_warn("WARNING: possible circular locking dependency detected\n"); 1955 print_kernel_ident(); 1956 pr_warn("------------------------------------------------------\n"); 1957 pr_warn("%s/%d is trying to acquire lock:\n", 1958 curr->comm, task_pid_nr(curr)); 1959 print_lock(check_src); 1960 1961 pr_warn("\nbut task is already holding lock:\n"); 1962 1963 print_lock(check_tgt); 1964 pr_warn("\nwhich lock already depends on the new lock.\n\n"); 1965 pr_warn("\nthe existing dependency chain (in reverse order) is:\n"); 1966 1967 print_circular_bug_entry(entry, depth); 1968 } 1969 1970 /* 1971 * We are about to add A -> B into the dependency graph, and in __bfs() a 1972 * strong dependency path A -> .. -> B is found: hlock_class equals 1973 * entry->class. 1974 * 1975 * If A -> .. -> B can replace A -> B in any __bfs() search (means the former 1976 * is _stronger_ than or equal to the latter), we consider A -> B as redundant. 1977 * For example if A -> .. -> B is -(EN)-> (i.e. A -(E*)-> .. -(*N)-> B), and A 1978 * -> B is -(ER)-> or -(EN)->, then we don't need to add A -> B into the 1979 * dependency graph, as any strong path ..-> A -> B ->.. we can get with 1980 * having dependency A -> B, we could already get a equivalent path ..-> A -> 1981 * .. -> B -> .. with A -> .. -> B. Therefore A -> B is redundant. 1982 * 1983 * We need to make sure both the start and the end of A -> .. -> B is not 1984 * weaker than A -> B. For the start part, please see the comment in 1985 * check_redundant(). For the end part, we need: 1986 * 1987 * Either 1988 * 1989 * a) A -> B is -(*R)-> (everything is not weaker than that) 1990 * 1991 * or 1992 * 1993 * b) A -> .. -> B is -(*N)-> (nothing is stronger than this) 1994 * 1995 */ 1996 static inline bool hlock_equal(struct lock_list *entry, void *data) 1997 { 1998 struct held_lock *hlock = (struct held_lock *)data; 1999 2000 return hlock_class(hlock) == entry->class && /* Found A -> .. -> B */ 2001 (hlock->read == 2 || /* A -> B is -(*R)-> */ 2002 !entry->only_xr); /* A -> .. -> B is -(*N)-> */ 2003 } 2004 2005 /* 2006 * We are about to add B -> A into the dependency graph, and in __bfs() a 2007 * strong dependency path A -> .. -> B is found: hlock_class equals 2008 * entry->class. 2009 * 2010 * We will have a deadlock case (conflict) if A -> .. -> B -> A is a strong 2011 * dependency cycle, that means: 2012 * 2013 * Either 2014 * 2015 * a) B -> A is -(E*)-> 2016 * 2017 * or 2018 * 2019 * b) A -> .. -> B is -(*N)-> (i.e. A -> .. -(*N)-> B) 2020 * 2021 * as then we don't have -(*R)-> -(S*)-> in the cycle. 2022 */ 2023 static inline bool hlock_conflict(struct lock_list *entry, void *data) 2024 { 2025 struct held_lock *hlock = (struct held_lock *)data; 2026 2027 return hlock_class(hlock) == entry->class && /* Found A -> .. -> B */ 2028 (hlock->read == 0 || /* B -> A is -(E*)-> */ 2029 !entry->only_xr); /* A -> .. -> B is -(*N)-> */ 2030 } 2031 2032 static noinline void print_circular_bug(struct lock_list *this, 2033 struct lock_list *target, 2034 struct held_lock *check_src, 2035 struct held_lock *check_tgt) 2036 { 2037 struct task_struct *curr = current; 2038 struct lock_list *parent; 2039 struct lock_list *first_parent; 2040 int depth; 2041 2042 if (!debug_locks_off_graph_unlock() || debug_locks_silent) 2043 return; 2044 2045 this->trace = save_trace(); 2046 if (!this->trace) 2047 return; 2048 2049 depth = get_lock_depth(target); 2050 2051 print_circular_bug_header(target, depth, check_src, check_tgt); 2052 2053 parent = get_lock_parent(target); 2054 first_parent = parent; 2055 2056 while (parent) { 2057 print_circular_bug_entry(parent, --depth); 2058 parent = get_lock_parent(parent); 2059 } 2060 2061 printk("\nother info that might help us debug this:\n\n"); 2062 print_circular_lock_scenario(check_src, check_tgt, 2063 first_parent); 2064 2065 lockdep_print_held_locks(curr); 2066 2067 printk("\nstack backtrace:\n"); 2068 dump_stack(); 2069 } 2070 2071 static noinline void print_bfs_bug(int ret) 2072 { 2073 if (!debug_locks_off_graph_unlock()) 2074 return; 2075 2076 /* 2077 * Breadth-first-search failed, graph got corrupted? 2078 */ 2079 WARN(1, "lockdep bfs error:%d\n", ret); 2080 } 2081 2082 static bool noop_count(struct lock_list *entry, void *data) 2083 { 2084 (*(unsigned long *)data)++; 2085 return false; 2086 } 2087 2088 static unsigned long __lockdep_count_forward_deps(struct lock_list *this) 2089 { 2090 unsigned long count = 0; 2091 struct lock_list *target_entry; 2092 2093 __bfs_forwards(this, (void *)&count, noop_count, NULL, &target_entry); 2094 2095 return count; 2096 } 2097 unsigned long lockdep_count_forward_deps(struct lock_class *class) 2098 { 2099 unsigned long ret, flags; 2100 struct lock_list this; 2101 2102 __bfs_init_root(&this, class); 2103 2104 raw_local_irq_save(flags); 2105 lockdep_lock(); 2106 ret = __lockdep_count_forward_deps(&this); 2107 lockdep_unlock(); 2108 raw_local_irq_restore(flags); 2109 2110 return ret; 2111 } 2112 2113 static unsigned long __lockdep_count_backward_deps(struct lock_list *this) 2114 { 2115 unsigned long count = 0; 2116 struct lock_list *target_entry; 2117 2118 __bfs_backwards(this, (void *)&count, noop_count, NULL, &target_entry); 2119 2120 return count; 2121 } 2122 2123 unsigned long lockdep_count_backward_deps(struct lock_class *class) 2124 { 2125 unsigned long ret, flags; 2126 struct lock_list this; 2127 2128 __bfs_init_root(&this, class); 2129 2130 raw_local_irq_save(flags); 2131 lockdep_lock(); 2132 ret = __lockdep_count_backward_deps(&this); 2133 lockdep_unlock(); 2134 raw_local_irq_restore(flags); 2135 2136 return ret; 2137 } 2138 2139 /* 2140 * Check that the dependency graph starting at <src> can lead to 2141 * <target> or not. 2142 */ 2143 static noinline enum bfs_result 2144 check_path(struct held_lock *target, struct lock_list *src_entry, 2145 bool (*match)(struct lock_list *entry, void *data), 2146 bool (*skip)(struct lock_list *entry, void *data), 2147 struct lock_list **target_entry) 2148 { 2149 enum bfs_result ret; 2150 2151 ret = __bfs_forwards(src_entry, target, match, skip, target_entry); 2152 2153 if (unlikely(bfs_error(ret))) 2154 print_bfs_bug(ret); 2155 2156 return ret; 2157 } 2158 2159 static void print_deadlock_bug(struct task_struct *, struct held_lock *, struct held_lock *); 2160 2161 /* 2162 * Prove that the dependency graph starting at <src> can not 2163 * lead to <target>. If it can, there is a circle when adding 2164 * <target> -> <src> dependency. 2165 * 2166 * Print an error and return BFS_RMATCH if it does. 2167 */ 2168 static noinline enum bfs_result 2169 check_noncircular(struct held_lock *src, struct held_lock *target, 2170 struct lock_trace **const trace) 2171 { 2172 enum bfs_result ret; 2173 struct lock_list *target_entry; 2174 struct lock_list src_entry; 2175 2176 bfs_init_root(&src_entry, src); 2177 2178 debug_atomic_inc(nr_cyclic_checks); 2179 2180 ret = check_path(target, &src_entry, hlock_conflict, NULL, &target_entry); 2181 2182 if (unlikely(ret == BFS_RMATCH)) { 2183 if (!*trace) { 2184 /* 2185 * If save_trace fails here, the printing might 2186 * trigger a WARN but because of the !nr_entries it 2187 * should not do bad things. 2188 */ 2189 *trace = save_trace(); 2190 } 2191 2192 if (src->class_idx == target->class_idx) 2193 print_deadlock_bug(current, src, target); 2194 else 2195 print_circular_bug(&src_entry, target_entry, src, target); 2196 } 2197 2198 return ret; 2199 } 2200 2201 #ifdef CONFIG_TRACE_IRQFLAGS 2202 2203 /* 2204 * Forwards and backwards subgraph searching, for the purposes of 2205 * proving that two subgraphs can be connected by a new dependency 2206 * without creating any illegal irq-safe -> irq-unsafe lock dependency. 2207 * 2208 * A irq safe->unsafe deadlock happens with the following conditions: 2209 * 2210 * 1) We have a strong dependency path A -> ... -> B 2211 * 2212 * 2) and we have ENABLED_IRQ usage of B and USED_IN_IRQ usage of A, therefore 2213 * irq can create a new dependency B -> A (consider the case that a holder 2214 * of B gets interrupted by an irq whose handler will try to acquire A). 2215 * 2216 * 3) the dependency circle A -> ... -> B -> A we get from 1) and 2) is a 2217 * strong circle: 2218 * 2219 * For the usage bits of B: 2220 * a) if A -> B is -(*N)->, then B -> A could be any type, so any 2221 * ENABLED_IRQ usage suffices. 2222 * b) if A -> B is -(*R)->, then B -> A must be -(E*)->, so only 2223 * ENABLED_IRQ_*_READ usage suffices. 2224 * 2225 * For the usage bits of A: 2226 * c) if A -> B is -(E*)->, then B -> A could be any type, so any 2227 * USED_IN_IRQ usage suffices. 2228 * d) if A -> B is -(S*)->, then B -> A must be -(*N)->, so only 2229 * USED_IN_IRQ_*_READ usage suffices. 2230 */ 2231 2232 /* 2233 * There is a strong dependency path in the dependency graph: A -> B, and now 2234 * we need to decide which usage bit of A should be accumulated to detect 2235 * safe->unsafe bugs. 2236 * 2237 * Note that usage_accumulate() is used in backwards search, so ->only_xr 2238 * stands for whether A -> B only has -(S*)-> (in this case ->only_xr is true). 2239 * 2240 * As above, if only_xr is false, which means A -> B has -(E*)-> dependency 2241 * path, any usage of A should be considered. Otherwise, we should only 2242 * consider _READ usage. 2243 */ 2244 static inline bool usage_accumulate(struct lock_list *entry, void *mask) 2245 { 2246 if (!entry->only_xr) 2247 *(unsigned long *)mask |= entry->class->usage_mask; 2248 else /* Mask out _READ usage bits */ 2249 *(unsigned long *)mask |= (entry->class->usage_mask & LOCKF_IRQ); 2250 2251 return false; 2252 } 2253 2254 /* 2255 * There is a strong dependency path in the dependency graph: A -> B, and now 2256 * we need to decide which usage bit of B conflicts with the usage bits of A, 2257 * i.e. which usage bit of B may introduce safe->unsafe deadlocks. 2258 * 2259 * As above, if only_xr is false, which means A -> B has -(*N)-> dependency 2260 * path, any usage of B should be considered. Otherwise, we should only 2261 * consider _READ usage. 2262 */ 2263 static inline bool usage_match(struct lock_list *entry, void *mask) 2264 { 2265 if (!entry->only_xr) 2266 return !!(entry->class->usage_mask & *(unsigned long *)mask); 2267 else /* Mask out _READ usage bits */ 2268 return !!((entry->class->usage_mask & LOCKF_IRQ) & *(unsigned long *)mask); 2269 } 2270 2271 static inline bool usage_skip(struct lock_list *entry, void *mask) 2272 { 2273 /* 2274 * Skip local_lock() for irq inversion detection. 2275 * 2276 * For !RT, local_lock() is not a real lock, so it won't carry any 2277 * dependency. 2278 * 2279 * For RT, an irq inversion happens when we have lock A and B, and on 2280 * some CPU we can have: 2281 * 2282 * lock(A); 2283 * <interrupted> 2284 * lock(B); 2285 * 2286 * where lock(B) cannot sleep, and we have a dependency B -> ... -> A. 2287 * 2288 * Now we prove local_lock() cannot exist in that dependency. First we 2289 * have the observation for any lock chain L1 -> ... -> Ln, for any 2290 * 1 <= i <= n, Li.inner_wait_type <= L1.inner_wait_type, otherwise 2291 * wait context check will complain. And since B is not a sleep lock, 2292 * therefore B.inner_wait_type >= 2, and since the inner_wait_type of 2293 * local_lock() is 3, which is greater than 2, therefore there is no 2294 * way the local_lock() exists in the dependency B -> ... -> A. 2295 * 2296 * As a result, we will skip local_lock(), when we search for irq 2297 * inversion bugs. 2298 */ 2299 if (entry->class->lock_type == LD_LOCK_PERCPU) { 2300 if (DEBUG_LOCKS_WARN_ON(entry->class->wait_type_inner < LD_WAIT_CONFIG)) 2301 return false; 2302 2303 return true; 2304 } 2305 2306 return false; 2307 } 2308 2309 /* 2310 * Find a node in the forwards-direction dependency sub-graph starting 2311 * at @root->class that matches @bit. 2312 * 2313 * Return BFS_MATCH if such a node exists in the subgraph, and put that node 2314 * into *@target_entry. 2315 */ 2316 static enum bfs_result 2317 find_usage_forwards(struct lock_list *root, unsigned long usage_mask, 2318 struct lock_list **target_entry) 2319 { 2320 enum bfs_result result; 2321 2322 debug_atomic_inc(nr_find_usage_forwards_checks); 2323 2324 result = __bfs_forwards(root, &usage_mask, usage_match, usage_skip, target_entry); 2325 2326 return result; 2327 } 2328 2329 /* 2330 * Find a node in the backwards-direction dependency sub-graph starting 2331 * at @root->class that matches @bit. 2332 */ 2333 static enum bfs_result 2334 find_usage_backwards(struct lock_list *root, unsigned long usage_mask, 2335 struct lock_list **target_entry) 2336 { 2337 enum bfs_result result; 2338 2339 debug_atomic_inc(nr_find_usage_backwards_checks); 2340 2341 result = __bfs_backwards(root, &usage_mask, usage_match, usage_skip, target_entry); 2342 2343 return result; 2344 } 2345 2346 static void print_lock_class_header(struct lock_class *class, int depth) 2347 { 2348 int bit; 2349 2350 printk("%*s->", depth, ""); 2351 print_lock_name(NULL, class); 2352 #ifdef CONFIG_DEBUG_LOCKDEP 2353 printk(KERN_CONT " ops: %lu", debug_class_ops_read(class)); 2354 #endif 2355 printk(KERN_CONT " {\n"); 2356 2357 for (bit = 0; bit < LOCK_TRACE_STATES; bit++) { 2358 if (class->usage_mask & (1 << bit)) { 2359 int len = depth; 2360 2361 len += printk("%*s %s", depth, "", usage_str[bit]); 2362 len += printk(KERN_CONT " at:\n"); 2363 print_lock_trace(class->usage_traces[bit], len); 2364 } 2365 } 2366 printk("%*s }\n", depth, ""); 2367 2368 printk("%*s ... key at: [<%px>] %pS\n", 2369 depth, "", class->key, class->key); 2370 } 2371 2372 /* 2373 * Dependency path printing: 2374 * 2375 * After BFS we get a lock dependency path (linked via ->parent of lock_list), 2376 * printing out each lock in the dependency path will help on understanding how 2377 * the deadlock could happen. Here are some details about dependency path 2378 * printing: 2379 * 2380 * 1) A lock_list can be either forwards or backwards for a lock dependency, 2381 * for a lock dependency A -> B, there are two lock_lists: 2382 * 2383 * a) lock_list in the ->locks_after list of A, whose ->class is B and 2384 * ->links_to is A. In this case, we can say the lock_list is 2385 * "A -> B" (forwards case). 2386 * 2387 * b) lock_list in the ->locks_before list of B, whose ->class is A 2388 * and ->links_to is B. In this case, we can say the lock_list is 2389 * "B <- A" (bacwards case). 2390 * 2391 * The ->trace of both a) and b) point to the call trace where B was 2392 * acquired with A held. 2393 * 2394 * 2) A "helper" lock_list is introduced during BFS, this lock_list doesn't 2395 * represent a certain lock dependency, it only provides an initial entry 2396 * for BFS. For example, BFS may introduce a "helper" lock_list whose 2397 * ->class is A, as a result BFS will search all dependencies starting with 2398 * A, e.g. A -> B or A -> C. 2399 * 2400 * The notation of a forwards helper lock_list is like "-> A", which means 2401 * we should search the forwards dependencies starting with "A", e.g A -> B 2402 * or A -> C. 2403 * 2404 * The notation of a bacwards helper lock_list is like "<- B", which means 2405 * we should search the backwards dependencies ending with "B", e.g. 2406 * B <- A or B <- C. 2407 */ 2408 2409 /* 2410 * printk the shortest lock dependencies from @root to @leaf in reverse order. 2411 * 2412 * We have a lock dependency path as follow: 2413 * 2414 * @root @leaf 2415 * | | 2416 * V V 2417 * ->parent ->parent 2418 * | lock_list | <--------- | lock_list | ... | lock_list | <--------- | lock_list | 2419 * | -> L1 | | L1 -> L2 | ... |Ln-2 -> Ln-1| | Ln-1 -> Ln| 2420 * 2421 * , so it's natural that we start from @leaf and print every ->class and 2422 * ->trace until we reach the @root. 2423 */ 2424 static void __used 2425 print_shortest_lock_dependencies(struct lock_list *leaf, 2426 struct lock_list *root) 2427 { 2428 struct lock_list *entry = leaf; 2429 int depth; 2430 2431 /*compute depth from generated tree by BFS*/ 2432 depth = get_lock_depth(leaf); 2433 2434 do { 2435 print_lock_class_header(entry->class, depth); 2436 printk("%*s ... acquired at:\n", depth, ""); 2437 print_lock_trace(entry->trace, 2); 2438 printk("\n"); 2439 2440 if (depth == 0 && (entry != root)) { 2441 printk("lockdep:%s bad path found in chain graph\n", __func__); 2442 break; 2443 } 2444 2445 entry = get_lock_parent(entry); 2446 depth--; 2447 } while (entry && (depth >= 0)); 2448 } 2449 2450 /* 2451 * printk the shortest lock dependencies from @leaf to @root. 2452 * 2453 * We have a lock dependency path (from a backwards search) as follow: 2454 * 2455 * @leaf @root 2456 * | | 2457 * V V 2458 * ->parent ->parent 2459 * | lock_list | ---------> | lock_list | ... | lock_list | ---------> | lock_list | 2460 * | L2 <- L1 | | L3 <- L2 | ... | Ln <- Ln-1 | | <- Ln | 2461 * 2462 * , so when we iterate from @leaf to @root, we actually print the lock 2463 * dependency path L1 -> L2 -> .. -> Ln in the non-reverse order. 2464 * 2465 * Another thing to notice here is that ->class of L2 <- L1 is L1, while the 2466 * ->trace of L2 <- L1 is the call trace of L2, in fact we don't have the call 2467 * trace of L1 in the dependency path, which is alright, because most of the 2468 * time we can figure out where L1 is held from the call trace of L2. 2469 */ 2470 static void __used 2471 print_shortest_lock_dependencies_backwards(struct lock_list *leaf, 2472 struct lock_list *root) 2473 { 2474 struct lock_list *entry = leaf; 2475 const struct lock_trace *trace = NULL; 2476 int depth; 2477 2478 /*compute depth from generated tree by BFS*/ 2479 depth = get_lock_depth(leaf); 2480 2481 do { 2482 print_lock_class_header(entry->class, depth); 2483 if (trace) { 2484 printk("%*s ... acquired at:\n", depth, ""); 2485 print_lock_trace(trace, 2); 2486 printk("\n"); 2487 } 2488 2489 /* 2490 * Record the pointer to the trace for the next lock_list 2491 * entry, see the comments for the function. 2492 */ 2493 trace = entry->trace; 2494 2495 if (depth == 0 && (entry != root)) { 2496 printk("lockdep:%s bad path found in chain graph\n", __func__); 2497 break; 2498 } 2499 2500 entry = get_lock_parent(entry); 2501 depth--; 2502 } while (entry && (depth >= 0)); 2503 } 2504 2505 static void 2506 print_irq_lock_scenario(struct lock_list *safe_entry, 2507 struct lock_list *unsafe_entry, 2508 struct lock_class *prev_class, 2509 struct lock_class *next_class) 2510 { 2511 struct lock_class *safe_class = safe_entry->class; 2512 struct lock_class *unsafe_class = unsafe_entry->class; 2513 struct lock_class *middle_class = prev_class; 2514 2515 if (middle_class == safe_class) 2516 middle_class = next_class; 2517 2518 /* 2519 * A direct locking problem where unsafe_class lock is taken 2520 * directly by safe_class lock, then all we need to show 2521 * is the deadlock scenario, as it is obvious that the 2522 * unsafe lock is taken under the safe lock. 2523 * 2524 * But if there is a chain instead, where the safe lock takes 2525 * an intermediate lock (middle_class) where this lock is 2526 * not the same as the safe lock, then the lock chain is 2527 * used to describe the problem. Otherwise we would need 2528 * to show a different CPU case for each link in the chain 2529 * from the safe_class lock to the unsafe_class lock. 2530 */ 2531 if (middle_class != unsafe_class) { 2532 printk("Chain exists of:\n "); 2533 __print_lock_name(NULL, safe_class); 2534 printk(KERN_CONT " --> "); 2535 __print_lock_name(NULL, middle_class); 2536 printk(KERN_CONT " --> "); 2537 __print_lock_name(NULL, unsafe_class); 2538 printk(KERN_CONT "\n\n"); 2539 } 2540 2541 printk(" Possible interrupt unsafe locking scenario:\n\n"); 2542 printk(" CPU0 CPU1\n"); 2543 printk(" ---- ----\n"); 2544 printk(" lock("); 2545 __print_lock_name(NULL, unsafe_class); 2546 printk(KERN_CONT ");\n"); 2547 printk(" local_irq_disable();\n"); 2548 printk(" lock("); 2549 __print_lock_name(NULL, safe_class); 2550 printk(KERN_CONT ");\n"); 2551 printk(" lock("); 2552 __print_lock_name(NULL, middle_class); 2553 printk(KERN_CONT ");\n"); 2554 printk(" <Interrupt>\n"); 2555 printk(" lock("); 2556 __print_lock_name(NULL, safe_class); 2557 printk(KERN_CONT ");\n"); 2558 printk("\n *** DEADLOCK ***\n\n"); 2559 } 2560 2561 static void 2562 print_bad_irq_dependency(struct task_struct *curr, 2563 struct lock_list *prev_root, 2564 struct lock_list *next_root, 2565 struct lock_list *backwards_entry, 2566 struct lock_list *forwards_entry, 2567 struct held_lock *prev, 2568 struct held_lock *next, 2569 enum lock_usage_bit bit1, 2570 enum lock_usage_bit bit2, 2571 const char *irqclass) 2572 { 2573 if (!debug_locks_off_graph_unlock() || debug_locks_silent) 2574 return; 2575 2576 pr_warn("\n"); 2577 pr_warn("=====================================================\n"); 2578 pr_warn("WARNING: %s-safe -> %s-unsafe lock order detected\n", 2579 irqclass, irqclass); 2580 print_kernel_ident(); 2581 pr_warn("-----------------------------------------------------\n"); 2582 pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n", 2583 curr->comm, task_pid_nr(curr), 2584 lockdep_hardirq_context(), hardirq_count() >> HARDIRQ_SHIFT, 2585 curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT, 2586 lockdep_hardirqs_enabled(), 2587 curr->softirqs_enabled); 2588 print_lock(next); 2589 2590 pr_warn("\nand this task is already holding:\n"); 2591 print_lock(prev); 2592 pr_warn("which would create a new lock dependency:\n"); 2593 print_lock_name(prev, hlock_class(prev)); 2594 pr_cont(" ->"); 2595 print_lock_name(next, hlock_class(next)); 2596 pr_cont("\n"); 2597 2598 pr_warn("\nbut this new dependency connects a %s-irq-safe lock:\n", 2599 irqclass); 2600 print_lock_name(NULL, backwards_entry->class); 2601 pr_warn("\n... which became %s-irq-safe at:\n", irqclass); 2602 2603 print_lock_trace(backwards_entry->class->usage_traces[bit1], 1); 2604 2605 pr_warn("\nto a %s-irq-unsafe lock:\n", irqclass); 2606 print_lock_name(NULL, forwards_entry->class); 2607 pr_warn("\n... which became %s-irq-unsafe at:\n", irqclass); 2608 pr_warn("..."); 2609 2610 print_lock_trace(forwards_entry->class->usage_traces[bit2], 1); 2611 2612 pr_warn("\nother info that might help us debug this:\n\n"); 2613 print_irq_lock_scenario(backwards_entry, forwards_entry, 2614 hlock_class(prev), hlock_class(next)); 2615 2616 lockdep_print_held_locks(curr); 2617 2618 pr_warn("\nthe dependencies between %s-irq-safe lock and the holding lock:\n", irqclass); 2619 print_shortest_lock_dependencies_backwards(backwards_entry, prev_root); 2620 2621 pr_warn("\nthe dependencies between the lock to be acquired"); 2622 pr_warn(" and %s-irq-unsafe lock:\n", irqclass); 2623 next_root->trace = save_trace(); 2624 if (!next_root->trace) 2625 return; 2626 print_shortest_lock_dependencies(forwards_entry, next_root); 2627 2628 pr_warn("\nstack backtrace:\n"); 2629 dump_stack(); 2630 } 2631 2632 static const char *state_names[] = { 2633 #define LOCKDEP_STATE(__STATE) \ 2634 __stringify(__STATE), 2635 #include "lockdep_states.h" 2636 #undef LOCKDEP_STATE 2637 }; 2638 2639 static const char *state_rnames[] = { 2640 #define LOCKDEP_STATE(__STATE) \ 2641 __stringify(__STATE)"-READ", 2642 #include "lockdep_states.h" 2643 #undef LOCKDEP_STATE 2644 }; 2645 2646 static inline const char *state_name(enum lock_usage_bit bit) 2647 { 2648 if (bit & LOCK_USAGE_READ_MASK) 2649 return state_rnames[bit >> LOCK_USAGE_DIR_MASK]; 2650 else 2651 return state_names[bit >> LOCK_USAGE_DIR_MASK]; 2652 } 2653 2654 /* 2655 * The bit number is encoded like: 2656 * 2657 * bit0: 0 exclusive, 1 read lock 2658 * bit1: 0 used in irq, 1 irq enabled 2659 * bit2-n: state 2660 */ 2661 static int exclusive_bit(int new_bit) 2662 { 2663 int state = new_bit & LOCK_USAGE_STATE_MASK; 2664 int dir = new_bit & LOCK_USAGE_DIR_MASK; 2665 2666 /* 2667 * keep state, bit flip the direction and strip read. 2668 */ 2669 return state | (dir ^ LOCK_USAGE_DIR_MASK); 2670 } 2671 2672 /* 2673 * Observe that when given a bitmask where each bitnr is encoded as above, a 2674 * right shift of the mask transforms the individual bitnrs as -1 and 2675 * conversely, a left shift transforms into +1 for the individual bitnrs. 2676 * 2677 * So for all bits whose number have LOCK_ENABLED_* set (bitnr1 == 1), we can 2678 * create the mask with those bit numbers using LOCK_USED_IN_* (bitnr1 == 0) 2679 * instead by subtracting the bit number by 2, or shifting the mask right by 2. 2680 * 2681 * Similarly, bitnr1 == 0 becomes bitnr1 == 1 by adding 2, or shifting left 2. 2682 * 2683 * So split the mask (note that LOCKF_ENABLED_IRQ_ALL|LOCKF_USED_IN_IRQ_ALL is 2684 * all bits set) and recompose with bitnr1 flipped. 2685 */ 2686 static unsigned long invert_dir_mask(unsigned long mask) 2687 { 2688 unsigned long excl = 0; 2689 2690 /* Invert dir */ 2691 excl |= (mask & LOCKF_ENABLED_IRQ_ALL) >> LOCK_USAGE_DIR_MASK; 2692 excl |= (mask & LOCKF_USED_IN_IRQ_ALL) << LOCK_USAGE_DIR_MASK; 2693 2694 return excl; 2695 } 2696 2697 /* 2698 * Note that a LOCK_ENABLED_IRQ_*_READ usage and a LOCK_USED_IN_IRQ_*_READ 2699 * usage may cause deadlock too, for example: 2700 * 2701 * P1 P2 2702 * <irq disabled> 2703 * write_lock(l1); <irq enabled> 2704 * read_lock(l2); 2705 * write_lock(l2); 2706 * <in irq> 2707 * read_lock(l1); 2708 * 2709 * , in above case, l1 will be marked as LOCK_USED_IN_IRQ_HARDIRQ_READ and l2 2710 * will marked as LOCK_ENABLE_IRQ_HARDIRQ_READ, and this is a possible 2711 * deadlock. 2712 * 2713 * In fact, all of the following cases may cause deadlocks: 2714 * 2715 * LOCK_USED_IN_IRQ_* -> LOCK_ENABLED_IRQ_* 2716 * LOCK_USED_IN_IRQ_*_READ -> LOCK_ENABLED_IRQ_* 2717 * LOCK_USED_IN_IRQ_* -> LOCK_ENABLED_IRQ_*_READ 2718 * LOCK_USED_IN_IRQ_*_READ -> LOCK_ENABLED_IRQ_*_READ 2719 * 2720 * As a result, to calculate the "exclusive mask", first we invert the 2721 * direction (USED_IN/ENABLED) of the original mask, and 1) for all bits with 2722 * bitnr0 set (LOCK_*_READ), add those with bitnr0 cleared (LOCK_*). 2) for all 2723 * bits with bitnr0 cleared (LOCK_*_READ), add those with bitnr0 set (LOCK_*). 2724 */ 2725 static unsigned long exclusive_mask(unsigned long mask) 2726 { 2727 unsigned long excl = invert_dir_mask(mask); 2728 2729 excl |= (excl & LOCKF_IRQ_READ) >> LOCK_USAGE_READ_MASK; 2730 excl |= (excl & LOCKF_IRQ) << LOCK_USAGE_READ_MASK; 2731 2732 return excl; 2733 } 2734 2735 /* 2736 * Retrieve the _possible_ original mask to which @mask is 2737 * exclusive. Ie: this is the opposite of exclusive_mask(). 2738 * Note that 2 possible original bits can match an exclusive 2739 * bit: one has LOCK_USAGE_READ_MASK set, the other has it 2740 * cleared. So both are returned for each exclusive bit. 2741 */ 2742 static unsigned long original_mask(unsigned long mask) 2743 { 2744 unsigned long excl = invert_dir_mask(mask); 2745 2746 /* Include read in existing usages */ 2747 excl |= (excl & LOCKF_IRQ_READ) >> LOCK_USAGE_READ_MASK; 2748 excl |= (excl & LOCKF_IRQ) << LOCK_USAGE_READ_MASK; 2749 2750 return excl; 2751 } 2752 2753 /* 2754 * Find the first pair of bit match between an original 2755 * usage mask and an exclusive usage mask. 2756 */ 2757 static int find_exclusive_match(unsigned long mask, 2758 unsigned long excl_mask, 2759 enum lock_usage_bit *bitp, 2760 enum lock_usage_bit *excl_bitp) 2761 { 2762 int bit, excl, excl_read; 2763 2764 for_each_set_bit(bit, &mask, LOCK_USED) { 2765 /* 2766 * exclusive_bit() strips the read bit, however, 2767 * LOCK_ENABLED_IRQ_*_READ may cause deadlocks too, so we need 2768 * to search excl | LOCK_USAGE_READ_MASK as well. 2769 */ 2770 excl = exclusive_bit(bit); 2771 excl_read = excl | LOCK_USAGE_READ_MASK; 2772 if (excl_mask & lock_flag(excl)) { 2773 *bitp = bit; 2774 *excl_bitp = excl; 2775 return 0; 2776 } else if (excl_mask & lock_flag(excl_read)) { 2777 *bitp = bit; 2778 *excl_bitp = excl_read; 2779 return 0; 2780 } 2781 } 2782 return -1; 2783 } 2784 2785 /* 2786 * Prove that the new dependency does not connect a hardirq-safe(-read) 2787 * lock with a hardirq-unsafe lock - to achieve this we search 2788 * the backwards-subgraph starting at <prev>, and the 2789 * forwards-subgraph starting at <next>: 2790 */ 2791 static int check_irq_usage(struct task_struct *curr, struct held_lock *prev, 2792 struct held_lock *next) 2793 { 2794 unsigned long usage_mask = 0, forward_mask, backward_mask; 2795 enum lock_usage_bit forward_bit = 0, backward_bit = 0; 2796 struct lock_list *target_entry1; 2797 struct lock_list *target_entry; 2798 struct lock_list this, that; 2799 enum bfs_result ret; 2800 2801 /* 2802 * Step 1: gather all hard/soft IRQs usages backward in an 2803 * accumulated usage mask. 2804 */ 2805 bfs_init_rootb(&this, prev); 2806 2807 ret = __bfs_backwards(&this, &usage_mask, usage_accumulate, usage_skip, NULL); 2808 if (bfs_error(ret)) { 2809 print_bfs_bug(ret); 2810 return 0; 2811 } 2812 2813 usage_mask &= LOCKF_USED_IN_IRQ_ALL; 2814 if (!usage_mask) 2815 return 1; 2816 2817 /* 2818 * Step 2: find exclusive uses forward that match the previous 2819 * backward accumulated mask. 2820 */ 2821 forward_mask = exclusive_mask(usage_mask); 2822 2823 bfs_init_root(&that, next); 2824 2825 ret = find_usage_forwards(&that, forward_mask, &target_entry1); 2826 if (bfs_error(ret)) { 2827 print_bfs_bug(ret); 2828 return 0; 2829 } 2830 if (ret == BFS_RNOMATCH) 2831 return 1; 2832 2833 /* 2834 * Step 3: we found a bad match! Now retrieve a lock from the backward 2835 * list whose usage mask matches the exclusive usage mask from the 2836 * lock found on the forward list. 2837 * 2838 * Note, we should only keep the LOCKF_ENABLED_IRQ_ALL bits, considering 2839 * the follow case: 2840 * 2841 * When trying to add A -> B to the graph, we find that there is a 2842 * hardirq-safe L, that L -> ... -> A, and another hardirq-unsafe M, 2843 * that B -> ... -> M. However M is **softirq-safe**, if we use exact 2844 * invert bits of M's usage_mask, we will find another lock N that is 2845 * **softirq-unsafe** and N -> ... -> A, however N -> .. -> M will not 2846 * cause a inversion deadlock. 2847 */ 2848 backward_mask = original_mask(target_entry1->class->usage_mask & LOCKF_ENABLED_IRQ_ALL); 2849 2850 ret = find_usage_backwards(&this, backward_mask, &target_entry); 2851 if (bfs_error(ret)) { 2852 print_bfs_bug(ret); 2853 return 0; 2854 } 2855 if (DEBUG_LOCKS_WARN_ON(ret == BFS_RNOMATCH)) 2856 return 1; 2857 2858 /* 2859 * Step 4: narrow down to a pair of incompatible usage bits 2860 * and report it. 2861 */ 2862 ret = find_exclusive_match(target_entry->class->usage_mask, 2863 target_entry1->class->usage_mask, 2864 &backward_bit, &forward_bit); 2865 if (DEBUG_LOCKS_WARN_ON(ret == -1)) 2866 return 1; 2867 2868 print_bad_irq_dependency(curr, &this, &that, 2869 target_entry, target_entry1, 2870 prev, next, 2871 backward_bit, forward_bit, 2872 state_name(backward_bit)); 2873 2874 return 0; 2875 } 2876 2877 #else 2878 2879 static inline int check_irq_usage(struct task_struct *curr, 2880 struct held_lock *prev, struct held_lock *next) 2881 { 2882 return 1; 2883 } 2884 2885 static inline bool usage_skip(struct lock_list *entry, void *mask) 2886 { 2887 return false; 2888 } 2889 2890 #endif /* CONFIG_TRACE_IRQFLAGS */ 2891 2892 #ifdef CONFIG_LOCKDEP_SMALL 2893 /* 2894 * Check that the dependency graph starting at <src> can lead to 2895 * <target> or not. If it can, <src> -> <target> dependency is already 2896 * in the graph. 2897 * 2898 * Return BFS_RMATCH if it does, or BFS_RNOMATCH if it does not, return BFS_E* if 2899 * any error appears in the bfs search. 2900 */ 2901 static noinline enum bfs_result 2902 check_redundant(struct held_lock *src, struct held_lock *target) 2903 { 2904 enum bfs_result ret; 2905 struct lock_list *target_entry; 2906 struct lock_list src_entry; 2907 2908 bfs_init_root(&src_entry, src); 2909 /* 2910 * Special setup for check_redundant(). 2911 * 2912 * To report redundant, we need to find a strong dependency path that 2913 * is equal to or stronger than <src> -> <target>. So if <src> is E, 2914 * we need to let __bfs() only search for a path starting at a -(E*)->, 2915 * we achieve this by setting the initial node's ->only_xr to true in 2916 * that case. And if <prev> is S, we set initial ->only_xr to false 2917 * because both -(S*)-> (equal) and -(E*)-> (stronger) are redundant. 2918 */ 2919 src_entry.only_xr = src->read == 0; 2920 2921 debug_atomic_inc(nr_redundant_checks); 2922 2923 /* 2924 * Note: we skip local_lock() for redundant check, because as the 2925 * comment in usage_skip(), A -> local_lock() -> B and A -> B are not 2926 * the same. 2927 */ 2928 ret = check_path(target, &src_entry, hlock_equal, usage_skip, &target_entry); 2929 2930 if (ret == BFS_RMATCH) 2931 debug_atomic_inc(nr_redundant); 2932 2933 return ret; 2934 } 2935 2936 #else 2937 2938 static inline enum bfs_result 2939 check_redundant(struct held_lock *src, struct held_lock *target) 2940 { 2941 return BFS_RNOMATCH; 2942 } 2943 2944 #endif 2945 2946 static void inc_chains(int irq_context) 2947 { 2948 if (irq_context & LOCK_CHAIN_HARDIRQ_CONTEXT) 2949 nr_hardirq_chains++; 2950 else if (irq_context & LOCK_CHAIN_SOFTIRQ_CONTEXT) 2951 nr_softirq_chains++; 2952 else 2953 nr_process_chains++; 2954 } 2955 2956 static void dec_chains(int irq_context) 2957 { 2958 if (irq_context & LOCK_CHAIN_HARDIRQ_CONTEXT) 2959 nr_hardirq_chains--; 2960 else if (irq_context & LOCK_CHAIN_SOFTIRQ_CONTEXT) 2961 nr_softirq_chains--; 2962 else 2963 nr_process_chains--; 2964 } 2965 2966 static void 2967 print_deadlock_scenario(struct held_lock *nxt, struct held_lock *prv) 2968 { 2969 struct lock_class *next = hlock_class(nxt); 2970 struct lock_class *prev = hlock_class(prv); 2971 2972 printk(" Possible unsafe locking scenario:\n\n"); 2973 printk(" CPU0\n"); 2974 printk(" ----\n"); 2975 printk(" lock("); 2976 __print_lock_name(prv, prev); 2977 printk(KERN_CONT ");\n"); 2978 printk(" lock("); 2979 __print_lock_name(nxt, next); 2980 printk(KERN_CONT ");\n"); 2981 printk("\n *** DEADLOCK ***\n\n"); 2982 printk(" May be due to missing lock nesting notation\n\n"); 2983 } 2984 2985 static void 2986 print_deadlock_bug(struct task_struct *curr, struct held_lock *prev, 2987 struct held_lock *next) 2988 { 2989 struct lock_class *class = hlock_class(prev); 2990 2991 if (!debug_locks_off_graph_unlock() || debug_locks_silent) 2992 return; 2993 2994 pr_warn("\n"); 2995 pr_warn("============================================\n"); 2996 pr_warn("WARNING: possible recursive locking detected\n"); 2997 print_kernel_ident(); 2998 pr_warn("--------------------------------------------\n"); 2999 pr_warn("%s/%d is trying to acquire lock:\n", 3000 curr->comm, task_pid_nr(curr)); 3001 print_lock(next); 3002 pr_warn("\nbut task is already holding lock:\n"); 3003 print_lock(prev); 3004 3005 if (class->cmp_fn) { 3006 pr_warn("and the lock comparison function returns %i:\n", 3007 class->cmp_fn(prev->instance, next->instance)); 3008 } 3009 3010 pr_warn("\nother info that might help us debug this:\n"); 3011 print_deadlock_scenario(next, prev); 3012 lockdep_print_held_locks(curr); 3013 3014 pr_warn("\nstack backtrace:\n"); 3015 dump_stack(); 3016 } 3017 3018 /* 3019 * Check whether we are holding such a class already. 3020 * 3021 * (Note that this has to be done separately, because the graph cannot 3022 * detect such classes of deadlocks.) 3023 * 3024 * Returns: 0 on deadlock detected, 1 on OK, 2 if another lock with the same 3025 * lock class is held but nest_lock is also held, i.e. we rely on the 3026 * nest_lock to avoid the deadlock. 3027 */ 3028 static int 3029 check_deadlock(struct task_struct *curr, struct held_lock *next) 3030 { 3031 struct lock_class *class; 3032 struct held_lock *prev; 3033 struct held_lock *nest = NULL; 3034 int i; 3035 3036 for (i = 0; i < curr->lockdep_depth; i++) { 3037 prev = curr->held_locks + i; 3038 3039 if (prev->instance == next->nest_lock) 3040 nest = prev; 3041 3042 if (hlock_class(prev) != hlock_class(next)) 3043 continue; 3044 3045 /* 3046 * Allow read-after-read recursion of the same 3047 * lock class (i.e. read_lock(lock)+read_lock(lock)): 3048 */ 3049 if ((next->read == 2) && prev->read) 3050 continue; 3051 3052 class = hlock_class(prev); 3053 3054 if (class->cmp_fn && 3055 class->cmp_fn(prev->instance, next->instance) < 0) 3056 continue; 3057 3058 /* 3059 * We're holding the nest_lock, which serializes this lock's 3060 * nesting behaviour. 3061 */ 3062 if (nest) 3063 return 2; 3064 3065 print_deadlock_bug(curr, prev, next); 3066 return 0; 3067 } 3068 return 1; 3069 } 3070 3071 /* 3072 * There was a chain-cache miss, and we are about to add a new dependency 3073 * to a previous lock. We validate the following rules: 3074 * 3075 * - would the adding of the <prev> -> <next> dependency create a 3076 * circular dependency in the graph? [== circular deadlock] 3077 * 3078 * - does the new prev->next dependency connect any hardirq-safe lock 3079 * (in the full backwards-subgraph starting at <prev>) with any 3080 * hardirq-unsafe lock (in the full forwards-subgraph starting at 3081 * <next>)? [== illegal lock inversion with hardirq contexts] 3082 * 3083 * - does the new prev->next dependency connect any softirq-safe lock 3084 * (in the full backwards-subgraph starting at <prev>) with any 3085 * softirq-unsafe lock (in the full forwards-subgraph starting at 3086 * <next>)? [== illegal lock inversion with softirq contexts] 3087 * 3088 * any of these scenarios could lead to a deadlock. 3089 * 3090 * Then if all the validations pass, we add the forwards and backwards 3091 * dependency. 3092 */ 3093 static int 3094 check_prev_add(struct task_struct *curr, struct held_lock *prev, 3095 struct held_lock *next, u16 distance, 3096 struct lock_trace **const trace) 3097 { 3098 struct lock_list *entry; 3099 enum bfs_result ret; 3100 3101 if (!hlock_class(prev)->key || !hlock_class(next)->key) { 3102 /* 3103 * The warning statements below may trigger a use-after-free 3104 * of the class name. It is better to trigger a use-after free 3105 * and to have the class name most of the time instead of not 3106 * having the class name available. 3107 */ 3108 WARN_ONCE(!debug_locks_silent && !hlock_class(prev)->key, 3109 "Detected use-after-free of lock class %px/%s\n", 3110 hlock_class(prev), 3111 hlock_class(prev)->name); 3112 WARN_ONCE(!debug_locks_silent && !hlock_class(next)->key, 3113 "Detected use-after-free of lock class %px/%s\n", 3114 hlock_class(next), 3115 hlock_class(next)->name); 3116 return 2; 3117 } 3118 3119 if (prev->class_idx == next->class_idx) { 3120 struct lock_class *class = hlock_class(prev); 3121 3122 if (class->cmp_fn && 3123 class->cmp_fn(prev->instance, next->instance) < 0) 3124 return 2; 3125 } 3126 3127 /* 3128 * Prove that the new <prev> -> <next> dependency would not 3129 * create a circular dependency in the graph. (We do this by 3130 * a breadth-first search into the graph starting at <next>, 3131 * and check whether we can reach <prev>.) 3132 * 3133 * The search is limited by the size of the circular queue (i.e., 3134 * MAX_CIRCULAR_QUEUE_SIZE) which keeps track of a breadth of nodes 3135 * in the graph whose neighbours are to be checked. 3136 */ 3137 ret = check_noncircular(next, prev, trace); 3138 if (unlikely(bfs_error(ret) || ret == BFS_RMATCH)) 3139 return 0; 3140 3141 if (!check_irq_usage(curr, prev, next)) 3142 return 0; 3143 3144 /* 3145 * Is the <prev> -> <next> dependency already present? 3146 * 3147 * (this may occur even though this is a new chain: consider 3148 * e.g. the L1 -> L2 -> L3 -> L4 and the L5 -> L1 -> L2 -> L3 3149 * chains - the second one will be new, but L1 already has 3150 * L2 added to its dependency list, due to the first chain.) 3151 */ 3152 list_for_each_entry(entry, &hlock_class(prev)->locks_after, entry) { 3153 if (entry->class == hlock_class(next)) { 3154 if (distance == 1) 3155 entry->distance = 1; 3156 entry->dep |= calc_dep(prev, next); 3157 3158 /* 3159 * Also, update the reverse dependency in @next's 3160 * ->locks_before list. 3161 * 3162 * Here we reuse @entry as the cursor, which is fine 3163 * because we won't go to the next iteration of the 3164 * outer loop: 3165 * 3166 * For normal cases, we return in the inner loop. 3167 * 3168 * If we fail to return, we have inconsistency, i.e. 3169 * <prev>::locks_after contains <next> while 3170 * <next>::locks_before doesn't contain <prev>. In 3171 * that case, we return after the inner and indicate 3172 * something is wrong. 3173 */ 3174 list_for_each_entry(entry, &hlock_class(next)->locks_before, entry) { 3175 if (entry->class == hlock_class(prev)) { 3176 if (distance == 1) 3177 entry->distance = 1; 3178 entry->dep |= calc_depb(prev, next); 3179 return 1; 3180 } 3181 } 3182 3183 /* <prev> is not found in <next>::locks_before */ 3184 return 0; 3185 } 3186 } 3187 3188 /* 3189 * Is the <prev> -> <next> link redundant? 3190 */ 3191 ret = check_redundant(prev, next); 3192 if (bfs_error(ret)) 3193 return 0; 3194 else if (ret == BFS_RMATCH) 3195 return 2; 3196 3197 if (!*trace) { 3198 *trace = save_trace(); 3199 if (!*trace) 3200 return 0; 3201 } 3202 3203 /* 3204 * Ok, all validations passed, add the new lock 3205 * to the previous lock's dependency list: 3206 */ 3207 ret = add_lock_to_list(hlock_class(next), hlock_class(prev), 3208 &hlock_class(prev)->locks_after, distance, 3209 calc_dep(prev, next), *trace); 3210 3211 if (!ret) 3212 return 0; 3213 3214 ret = add_lock_to_list(hlock_class(prev), hlock_class(next), 3215 &hlock_class(next)->locks_before, distance, 3216 calc_depb(prev, next), *trace); 3217 if (!ret) 3218 return 0; 3219 3220 return 2; 3221 } 3222 3223 /* 3224 * Add the dependency to all directly-previous locks that are 'relevant'. 3225 * The ones that are relevant are (in increasing distance from curr): 3226 * all consecutive trylock entries and the final non-trylock entry - or 3227 * the end of this context's lock-chain - whichever comes first. 3228 */ 3229 static int 3230 check_prevs_add(struct task_struct *curr, struct held_lock *next) 3231 { 3232 struct lock_trace *trace = NULL; 3233 int depth = curr->lockdep_depth; 3234 struct held_lock *hlock; 3235 3236 /* 3237 * Debugging checks. 3238 * 3239 * Depth must not be zero for a non-head lock: 3240 */ 3241 if (!depth) 3242 goto out_bug; 3243 /* 3244 * At least two relevant locks must exist for this 3245 * to be a head: 3246 */ 3247 if (curr->held_locks[depth].irq_context != 3248 curr->held_locks[depth-1].irq_context) 3249 goto out_bug; 3250 3251 for (;;) { 3252 u16 distance = curr->lockdep_depth - depth + 1; 3253 hlock = curr->held_locks + depth - 1; 3254 3255 if (hlock->check) { 3256 int ret = check_prev_add(curr, hlock, next, distance, &trace); 3257 if (!ret) 3258 return 0; 3259 3260 /* 3261 * Stop after the first non-trylock entry, 3262 * as non-trylock entries have added their 3263 * own direct dependencies already, so this 3264 * lock is connected to them indirectly: 3265 */ 3266 if (!hlock->trylock) 3267 break; 3268 } 3269 3270 depth--; 3271 /* 3272 * End of lock-stack? 3273 */ 3274 if (!depth) 3275 break; 3276 /* 3277 * Stop the search if we cross into another context: 3278 */ 3279 if (curr->held_locks[depth].irq_context != 3280 curr->held_locks[depth-1].irq_context) 3281 break; 3282 } 3283 return 1; 3284 out_bug: 3285 if (!debug_locks_off_graph_unlock()) 3286 return 0; 3287 3288 /* 3289 * Clearly we all shouldn't be here, but since we made it we 3290 * can reliable say we messed up our state. See the above two 3291 * gotos for reasons why we could possibly end up here. 3292 */ 3293 WARN_ON(1); 3294 3295 return 0; 3296 } 3297 3298 struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS]; 3299 static DECLARE_BITMAP(lock_chains_in_use, MAX_LOCKDEP_CHAINS); 3300 static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS]; 3301 unsigned long nr_zapped_lock_chains; 3302 unsigned int nr_free_chain_hlocks; /* Free chain_hlocks in buckets */ 3303 unsigned int nr_lost_chain_hlocks; /* Lost chain_hlocks */ 3304 unsigned int nr_large_chain_blocks; /* size > MAX_CHAIN_BUCKETS */ 3305 3306 /* 3307 * The first 2 chain_hlocks entries in the chain block in the bucket 3308 * list contains the following meta data: 3309 * 3310 * entry[0]: 3311 * Bit 15 - always set to 1 (it is not a class index) 3312 * Bits 0-14 - upper 15 bits of the next block index 3313 * entry[1] - lower 16 bits of next block index 3314 * 3315 * A next block index of all 1 bits means it is the end of the list. 3316 * 3317 * On the unsized bucket (bucket-0), the 3rd and 4th entries contain 3318 * the chain block size: 3319 * 3320 * entry[2] - upper 16 bits of the chain block size 3321 * entry[3] - lower 16 bits of the chain block size 3322 */ 3323 #define MAX_CHAIN_BUCKETS 16 3324 #define CHAIN_BLK_FLAG (1U << 15) 3325 #define CHAIN_BLK_LIST_END 0xFFFFU 3326 3327 static int chain_block_buckets[MAX_CHAIN_BUCKETS]; 3328 3329 static inline int size_to_bucket(int size) 3330 { 3331 if (size > MAX_CHAIN_BUCKETS) 3332 return 0; 3333 3334 return size - 1; 3335 } 3336 3337 /* 3338 * Iterate all the chain blocks in a bucket. 3339 */ 3340 #define for_each_chain_block(bucket, prev, curr) \ 3341 for ((prev) = -1, (curr) = chain_block_buckets[bucket]; \ 3342 (curr) >= 0; \ 3343 (prev) = (curr), (curr) = chain_block_next(curr)) 3344 3345 /* 3346 * next block or -1 3347 */ 3348 static inline int chain_block_next(int offset) 3349 { 3350 int next = chain_hlocks[offset]; 3351 3352 WARN_ON_ONCE(!(next & CHAIN_BLK_FLAG)); 3353 3354 if (next == CHAIN_BLK_LIST_END) 3355 return -1; 3356 3357 next &= ~CHAIN_BLK_FLAG; 3358 next <<= 16; 3359 next |= chain_hlocks[offset + 1]; 3360 3361 return next; 3362 } 3363 3364 /* 3365 * bucket-0 only 3366 */ 3367 static inline int chain_block_size(int offset) 3368 { 3369 return (chain_hlocks[offset + 2] << 16) | chain_hlocks[offset + 3]; 3370 } 3371 3372 static inline void init_chain_block(int offset, int next, int bucket, int size) 3373 { 3374 chain_hlocks[offset] = (next >> 16) | CHAIN_BLK_FLAG; 3375 chain_hlocks[offset + 1] = (u16)next; 3376 3377 if (size && !bucket) { 3378 chain_hlocks[offset + 2] = size >> 16; 3379 chain_hlocks[offset + 3] = (u16)size; 3380 } 3381 } 3382 3383 static inline void add_chain_block(int offset, int size) 3384 { 3385 int bucket = size_to_bucket(size); 3386 int next = chain_block_buckets[bucket]; 3387 int prev, curr; 3388 3389 if (unlikely(size < 2)) { 3390 /* 3391 * We can't store single entries on the freelist. Leak them. 3392 * 3393 * One possible way out would be to uniquely mark them, other 3394 * than with CHAIN_BLK_FLAG, such that we can recover them when 3395 * the block before it is re-added. 3396 */ 3397 if (size) 3398 nr_lost_chain_hlocks++; 3399 return; 3400 } 3401 3402 nr_free_chain_hlocks += size; 3403 if (!bucket) { 3404 nr_large_chain_blocks++; 3405 3406 /* 3407 * Variable sized, sort large to small. 3408 */ 3409 for_each_chain_block(0, prev, curr) { 3410 if (size >= chain_block_size(curr)) 3411 break; 3412 } 3413 init_chain_block(offset, curr, 0, size); 3414 if (prev < 0) 3415 chain_block_buckets[0] = offset; 3416 else 3417 init_chain_block(prev, offset, 0, 0); 3418 return; 3419 } 3420 /* 3421 * Fixed size, add to head. 3422 */ 3423 init_chain_block(offset, next, bucket, size); 3424 chain_block_buckets[bucket] = offset; 3425 } 3426 3427 /* 3428 * Only the first block in the list can be deleted. 3429 * 3430 * For the variable size bucket[0], the first block (the largest one) is 3431 * returned, broken up and put back into the pool. So if a chain block of 3432 * length > MAX_CHAIN_BUCKETS is ever used and zapped, it will just be 3433 * queued up after the primordial chain block and never be used until the 3434 * hlock entries in the primordial chain block is almost used up. That 3435 * causes fragmentation and reduce allocation efficiency. That can be 3436 * monitored by looking at the "large chain blocks" number in lockdep_stats. 3437 */ 3438 static inline void del_chain_block(int bucket, int size, int next) 3439 { 3440 nr_free_chain_hlocks -= size; 3441 chain_block_buckets[bucket] = next; 3442 3443 if (!bucket) 3444 nr_large_chain_blocks--; 3445 } 3446 3447 static void init_chain_block_buckets(void) 3448 { 3449 int i; 3450 3451 for (i = 0; i < MAX_CHAIN_BUCKETS; i++) 3452 chain_block_buckets[i] = -1; 3453 3454 add_chain_block(0, ARRAY_SIZE(chain_hlocks)); 3455 } 3456 3457 /* 3458 * Return offset of a chain block of the right size or -1 if not found. 3459 * 3460 * Fairly simple worst-fit allocator with the addition of a number of size 3461 * specific free lists. 3462 */ 3463 static int alloc_chain_hlocks(int req) 3464 { 3465 int bucket, curr, size; 3466 3467 /* 3468 * We rely on the MSB to act as an escape bit to denote freelist 3469 * pointers. Make sure this bit isn't set in 'normal' class_idx usage. 3470 */ 3471 BUILD_BUG_ON((MAX_LOCKDEP_KEYS-1) & CHAIN_BLK_FLAG); 3472 3473 init_data_structures_once(); 3474 3475 if (nr_free_chain_hlocks < req) 3476 return -1; 3477 3478 /* 3479 * We require a minimum of 2 (u16) entries to encode a freelist 3480 * 'pointer'. 3481 */ 3482 req = max(req, 2); 3483 bucket = size_to_bucket(req); 3484 curr = chain_block_buckets[bucket]; 3485 3486 if (bucket) { 3487 if (curr >= 0) { 3488 del_chain_block(bucket, req, chain_block_next(curr)); 3489 return curr; 3490 } 3491 /* Try bucket 0 */ 3492 curr = chain_block_buckets[0]; 3493 } 3494 3495 /* 3496 * The variable sized freelist is sorted by size; the first entry is 3497 * the largest. Use it if it fits. 3498 */ 3499 if (curr >= 0) { 3500 size = chain_block_size(curr); 3501 if (likely(size >= req)) { 3502 del_chain_block(0, size, chain_block_next(curr)); 3503 add_chain_block(curr + req, size - req); 3504 return curr; 3505 } 3506 } 3507 3508 /* 3509 * Last resort, split a block in a larger sized bucket. 3510 */ 3511 for (size = MAX_CHAIN_BUCKETS; size > req; size--) { 3512 bucket = size_to_bucket(size); 3513 curr = chain_block_buckets[bucket]; 3514 if (curr < 0) 3515 continue; 3516 3517 del_chain_block(bucket, size, chain_block_next(curr)); 3518 add_chain_block(curr + req, size - req); 3519 return curr; 3520 } 3521 3522 return -1; 3523 } 3524 3525 static inline void free_chain_hlocks(int base, int size) 3526 { 3527 add_chain_block(base, max(size, 2)); 3528 } 3529 3530 struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i) 3531 { 3532 u16 chain_hlock = chain_hlocks[chain->base + i]; 3533 unsigned int class_idx = chain_hlock_class_idx(chain_hlock); 3534 3535 return lock_classes + class_idx; 3536 } 3537 3538 /* 3539 * Returns the index of the first held_lock of the current chain 3540 */ 3541 static inline int get_first_held_lock(struct task_struct *curr, 3542 struct held_lock *hlock) 3543 { 3544 int i; 3545 struct held_lock *hlock_curr; 3546 3547 for (i = curr->lockdep_depth - 1; i >= 0; i--) { 3548 hlock_curr = curr->held_locks + i; 3549 if (hlock_curr->irq_context != hlock->irq_context) 3550 break; 3551 3552 } 3553 3554 return ++i; 3555 } 3556 3557 #ifdef CONFIG_DEBUG_LOCKDEP 3558 /* 3559 * Returns the next chain_key iteration 3560 */ 3561 static u64 print_chain_key_iteration(u16 hlock_id, u64 chain_key) 3562 { 3563 u64 new_chain_key = iterate_chain_key(chain_key, hlock_id); 3564 3565 printk(" hlock_id:%d -> chain_key:%016Lx", 3566 (unsigned int)hlock_id, 3567 (unsigned long long)new_chain_key); 3568 return new_chain_key; 3569 } 3570 3571 static void 3572 print_chain_keys_held_locks(struct task_struct *curr, struct held_lock *hlock_next) 3573 { 3574 struct held_lock *hlock; 3575 u64 chain_key = INITIAL_CHAIN_KEY; 3576 int depth = curr->lockdep_depth; 3577 int i = get_first_held_lock(curr, hlock_next); 3578 3579 printk("depth: %u (irq_context %u)\n", depth - i + 1, 3580 hlock_next->irq_context); 3581 for (; i < depth; i++) { 3582 hlock = curr->held_locks + i; 3583 chain_key = print_chain_key_iteration(hlock_id(hlock), chain_key); 3584 3585 print_lock(hlock); 3586 } 3587 3588 print_chain_key_iteration(hlock_id(hlock_next), chain_key); 3589 print_lock(hlock_next); 3590 } 3591 3592 static void print_chain_keys_chain(struct lock_chain *chain) 3593 { 3594 int i; 3595 u64 chain_key = INITIAL_CHAIN_KEY; 3596 u16 hlock_id; 3597 3598 printk("depth: %u\n", chain->depth); 3599 for (i = 0; i < chain->depth; i++) { 3600 hlock_id = chain_hlocks[chain->base + i]; 3601 chain_key = print_chain_key_iteration(hlock_id, chain_key); 3602 3603 print_lock_name(NULL, lock_classes + chain_hlock_class_idx(hlock_id)); 3604 printk("\n"); 3605 } 3606 } 3607 3608 static void print_collision(struct task_struct *curr, 3609 struct held_lock *hlock_next, 3610 struct lock_chain *chain) 3611 { 3612 pr_warn("\n"); 3613 pr_warn("============================\n"); 3614 pr_warn("WARNING: chain_key collision\n"); 3615 print_kernel_ident(); 3616 pr_warn("----------------------------\n"); 3617 pr_warn("%s/%d: ", current->comm, task_pid_nr(current)); 3618 pr_warn("Hash chain already cached but the contents don't match!\n"); 3619 3620 pr_warn("Held locks:"); 3621 print_chain_keys_held_locks(curr, hlock_next); 3622 3623 pr_warn("Locks in cached chain:"); 3624 print_chain_keys_chain(chain); 3625 3626 pr_warn("\nstack backtrace:\n"); 3627 dump_stack(); 3628 } 3629 #endif 3630 3631 /* 3632 * Checks whether the chain and the current held locks are consistent 3633 * in depth and also in content. If they are not it most likely means 3634 * that there was a collision during the calculation of the chain_key. 3635 * Returns: 0 not passed, 1 passed 3636 */ 3637 static int check_no_collision(struct task_struct *curr, 3638 struct held_lock *hlock, 3639 struct lock_chain *chain) 3640 { 3641 #ifdef CONFIG_DEBUG_LOCKDEP 3642 int i, j, id; 3643 3644 i = get_first_held_lock(curr, hlock); 3645 3646 if (DEBUG_LOCKS_WARN_ON(chain->depth != curr->lockdep_depth - (i - 1))) { 3647 print_collision(curr, hlock, chain); 3648 return 0; 3649 } 3650 3651 for (j = 0; j < chain->depth - 1; j++, i++) { 3652 id = hlock_id(&curr->held_locks[i]); 3653 3654 if (DEBUG_LOCKS_WARN_ON(chain_hlocks[chain->base + j] != id)) { 3655 print_collision(curr, hlock, chain); 3656 return 0; 3657 } 3658 } 3659 #endif 3660 return 1; 3661 } 3662 3663 /* 3664 * Given an index that is >= -1, return the index of the next lock chain. 3665 * Return -2 if there is no next lock chain. 3666 */ 3667 long lockdep_next_lockchain(long i) 3668 { 3669 i = find_next_bit(lock_chains_in_use, ARRAY_SIZE(lock_chains), i + 1); 3670 return i < ARRAY_SIZE(lock_chains) ? i : -2; 3671 } 3672 3673 unsigned long lock_chain_count(void) 3674 { 3675 return bitmap_weight(lock_chains_in_use, ARRAY_SIZE(lock_chains)); 3676 } 3677 3678 /* Must be called with the graph lock held. */ 3679 static struct lock_chain *alloc_lock_chain(void) 3680 { 3681 int idx = find_first_zero_bit(lock_chains_in_use, 3682 ARRAY_SIZE(lock_chains)); 3683 3684 if (unlikely(idx >= ARRAY_SIZE(lock_chains))) 3685 return NULL; 3686 __set_bit(idx, lock_chains_in_use); 3687 return lock_chains + idx; 3688 } 3689 3690 /* 3691 * Adds a dependency chain into chain hashtable. And must be called with 3692 * graph_lock held. 3693 * 3694 * Return 0 if fail, and graph_lock is released. 3695 * Return 1 if succeed, with graph_lock held. 3696 */ 3697 static inline int add_chain_cache(struct task_struct *curr, 3698 struct held_lock *hlock, 3699 u64 chain_key) 3700 { 3701 struct hlist_head *hash_head = chainhashentry(chain_key); 3702 struct lock_chain *chain; 3703 int i, j; 3704 3705 /* 3706 * The caller must hold the graph lock, ensure we've got IRQs 3707 * disabled to make this an IRQ-safe lock.. for recursion reasons 3708 * lockdep won't complain about its own locking errors. 3709 */ 3710 if (lockdep_assert_locked()) 3711 return 0; 3712 3713 chain = alloc_lock_chain(); 3714 if (!chain) { 3715 if (!debug_locks_off_graph_unlock()) 3716 return 0; 3717 3718 print_lockdep_off("BUG: MAX_LOCKDEP_CHAINS too low!"); 3719 dump_stack(); 3720 return 0; 3721 } 3722 chain->chain_key = chain_key; 3723 chain->irq_context = hlock->irq_context; 3724 i = get_first_held_lock(curr, hlock); 3725 chain->depth = curr->lockdep_depth + 1 - i; 3726 3727 BUILD_BUG_ON((1UL << 24) <= ARRAY_SIZE(chain_hlocks)); 3728 BUILD_BUG_ON((1UL << 6) <= ARRAY_SIZE(curr->held_locks)); 3729 BUILD_BUG_ON((1UL << 8*sizeof(chain_hlocks[0])) <= ARRAY_SIZE(lock_classes)); 3730 3731 j = alloc_chain_hlocks(chain->depth); 3732 if (j < 0) { 3733 if (!debug_locks_off_graph_unlock()) 3734 return 0; 3735 3736 print_lockdep_off("BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!"); 3737 dump_stack(); 3738 return 0; 3739 } 3740 3741 chain->base = j; 3742 for (j = 0; j < chain->depth - 1; j++, i++) { 3743 int lock_id = hlock_id(curr->held_locks + i); 3744 3745 chain_hlocks[chain->base + j] = lock_id; 3746 } 3747 chain_hlocks[chain->base + j] = hlock_id(hlock); 3748 hlist_add_head_rcu(&chain->entry, hash_head); 3749 debug_atomic_inc(chain_lookup_misses); 3750 inc_chains(chain->irq_context); 3751 3752 return 1; 3753 } 3754 3755 /* 3756 * Look up a dependency chain. Must be called with either the graph lock or 3757 * the RCU read lock held. 3758 */ 3759 static inline struct lock_chain *lookup_chain_cache(u64 chain_key) 3760 { 3761 struct hlist_head *hash_head = chainhashentry(chain_key); 3762 struct lock_chain *chain; 3763 3764 hlist_for_each_entry_rcu(chain, hash_head, entry) { 3765 if (READ_ONCE(chain->chain_key) == chain_key) { 3766 debug_atomic_inc(chain_lookup_hits); 3767 return chain; 3768 } 3769 } 3770 return NULL; 3771 } 3772 3773 /* 3774 * If the key is not present yet in dependency chain cache then 3775 * add it and return 1 - in this case the new dependency chain is 3776 * validated. If the key is already hashed, return 0. 3777 * (On return with 1 graph_lock is held.) 3778 */ 3779 static inline int lookup_chain_cache_add(struct task_struct *curr, 3780 struct held_lock *hlock, 3781 u64 chain_key) 3782 { 3783 struct lock_class *class = hlock_class(hlock); 3784 struct lock_chain *chain = lookup_chain_cache(chain_key); 3785 3786 if (chain) { 3787 cache_hit: 3788 if (!check_no_collision(curr, hlock, chain)) 3789 return 0; 3790 3791 if (very_verbose(class)) { 3792 printk("\nhash chain already cached, key: " 3793 "%016Lx tail class: [%px] %s\n", 3794 (unsigned long long)chain_key, 3795 class->key, class->name); 3796 } 3797 3798 return 0; 3799 } 3800 3801 if (very_verbose(class)) { 3802 printk("\nnew hash chain, key: %016Lx tail class: [%px] %s\n", 3803 (unsigned long long)chain_key, class->key, class->name); 3804 } 3805 3806 if (!graph_lock()) 3807 return 0; 3808 3809 /* 3810 * We have to walk the chain again locked - to avoid duplicates: 3811 */ 3812 chain = lookup_chain_cache(chain_key); 3813 if (chain) { 3814 graph_unlock(); 3815 goto cache_hit; 3816 } 3817 3818 if (!add_chain_cache(curr, hlock, chain_key)) 3819 return 0; 3820 3821 return 1; 3822 } 3823 3824 static int validate_chain(struct task_struct *curr, 3825 struct held_lock *hlock, 3826 int chain_head, u64 chain_key) 3827 { 3828 /* 3829 * Trylock needs to maintain the stack of held locks, but it 3830 * does not add new dependencies, because trylock can be done 3831 * in any order. 3832 * 3833 * We look up the chain_key and do the O(N^2) check and update of 3834 * the dependencies only if this is a new dependency chain. 3835 * (If lookup_chain_cache_add() return with 1 it acquires 3836 * graph_lock for us) 3837 */ 3838 if (!hlock->trylock && hlock->check && 3839 lookup_chain_cache_add(curr, hlock, chain_key)) { 3840 /* 3841 * Check whether last held lock: 3842 * 3843 * - is irq-safe, if this lock is irq-unsafe 3844 * - is softirq-safe, if this lock is hardirq-unsafe 3845 * 3846 * And check whether the new lock's dependency graph 3847 * could lead back to the previous lock: 3848 * 3849 * - within the current held-lock stack 3850 * - across our accumulated lock dependency records 3851 * 3852 * any of these scenarios could lead to a deadlock. 3853 */ 3854 /* 3855 * The simple case: does the current hold the same lock 3856 * already? 3857 */ 3858 int ret = check_deadlock(curr, hlock); 3859 3860 if (!ret) 3861 return 0; 3862 /* 3863 * Add dependency only if this lock is not the head 3864 * of the chain, and if the new lock introduces no more 3865 * lock dependency (because we already hold a lock with the 3866 * same lock class) nor deadlock (because the nest_lock 3867 * serializes nesting locks), see the comments for 3868 * check_deadlock(). 3869 */ 3870 if (!chain_head && ret != 2) { 3871 if (!check_prevs_add(curr, hlock)) 3872 return 0; 3873 } 3874 3875 graph_unlock(); 3876 } else { 3877 /* after lookup_chain_cache_add(): */ 3878 if (unlikely(!debug_locks)) 3879 return 0; 3880 } 3881 3882 return 1; 3883 } 3884 #else 3885 static inline int validate_chain(struct task_struct *curr, 3886 struct held_lock *hlock, 3887 int chain_head, u64 chain_key) 3888 { 3889 return 1; 3890 } 3891 3892 static void init_chain_block_buckets(void) { } 3893 #endif /* CONFIG_PROVE_LOCKING */ 3894 3895 /* 3896 * We are building curr_chain_key incrementally, so double-check 3897 * it from scratch, to make sure that it's done correctly: 3898 */ 3899 static void check_chain_key(struct task_struct *curr) 3900 { 3901 #ifdef CONFIG_DEBUG_LOCKDEP 3902 struct held_lock *hlock, *prev_hlock = NULL; 3903 unsigned int i; 3904 u64 chain_key = INITIAL_CHAIN_KEY; 3905 3906 for (i = 0; i < curr->lockdep_depth; i++) { 3907 hlock = curr->held_locks + i; 3908 if (chain_key != hlock->prev_chain_key) { 3909 debug_locks_off(); 3910 /* 3911 * We got mighty confused, our chain keys don't match 3912 * with what we expect, someone trample on our task state? 3913 */ 3914 WARN(1, "hm#1, depth: %u [%u], %016Lx != %016Lx\n", 3915 curr->lockdep_depth, i, 3916 (unsigned long long)chain_key, 3917 (unsigned long long)hlock->prev_chain_key); 3918 return; 3919 } 3920 3921 /* 3922 * hlock->class_idx can't go beyond MAX_LOCKDEP_KEYS, but is 3923 * it registered lock class index? 3924 */ 3925 if (DEBUG_LOCKS_WARN_ON(!test_bit(hlock->class_idx, lock_classes_in_use))) 3926 return; 3927 3928 if (prev_hlock && (prev_hlock->irq_context != 3929 hlock->irq_context)) 3930 chain_key = INITIAL_CHAIN_KEY; 3931 chain_key = iterate_chain_key(chain_key, hlock_id(hlock)); 3932 prev_hlock = hlock; 3933 } 3934 if (chain_key != curr->curr_chain_key) { 3935 debug_locks_off(); 3936 /* 3937 * More smoking hash instead of calculating it, damn see these 3938 * numbers float.. I bet that a pink elephant stepped on my memory. 3939 */ 3940 WARN(1, "hm#2, depth: %u [%u], %016Lx != %016Lx\n", 3941 curr->lockdep_depth, i, 3942 (unsigned long long)chain_key, 3943 (unsigned long long)curr->curr_chain_key); 3944 } 3945 #endif 3946 } 3947 3948 #ifdef CONFIG_PROVE_LOCKING 3949 static int mark_lock(struct task_struct *curr, struct held_lock *this, 3950 enum lock_usage_bit new_bit); 3951 3952 static void print_usage_bug_scenario(struct held_lock *lock) 3953 { 3954 struct lock_class *class = hlock_class(lock); 3955 3956 printk(" Possible unsafe locking scenario:\n\n"); 3957 printk(" CPU0\n"); 3958 printk(" ----\n"); 3959 printk(" lock("); 3960 __print_lock_name(lock, class); 3961 printk(KERN_CONT ");\n"); 3962 printk(" <Interrupt>\n"); 3963 printk(" lock("); 3964 __print_lock_name(lock, class); 3965 printk(KERN_CONT ");\n"); 3966 printk("\n *** DEADLOCK ***\n\n"); 3967 } 3968 3969 static void 3970 print_usage_bug(struct task_struct *curr, struct held_lock *this, 3971 enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit) 3972 { 3973 if (!debug_locks_off() || debug_locks_silent) 3974 return; 3975 3976 pr_warn("\n"); 3977 pr_warn("================================\n"); 3978 pr_warn("WARNING: inconsistent lock state\n"); 3979 print_kernel_ident(); 3980 pr_warn("--------------------------------\n"); 3981 3982 pr_warn("inconsistent {%s} -> {%s} usage.\n", 3983 usage_str[prev_bit], usage_str[new_bit]); 3984 3985 pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n", 3986 curr->comm, task_pid_nr(curr), 3987 lockdep_hardirq_context(), hardirq_count() >> HARDIRQ_SHIFT, 3988 lockdep_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT, 3989 lockdep_hardirqs_enabled(), 3990 lockdep_softirqs_enabled(curr)); 3991 print_lock(this); 3992 3993 pr_warn("{%s} state was registered at:\n", usage_str[prev_bit]); 3994 print_lock_trace(hlock_class(this)->usage_traces[prev_bit], 1); 3995 3996 print_irqtrace_events(curr); 3997 pr_warn("\nother info that might help us debug this:\n"); 3998 print_usage_bug_scenario(this); 3999 4000 lockdep_print_held_locks(curr); 4001 4002 pr_warn("\nstack backtrace:\n"); 4003 dump_stack(); 4004 } 4005 4006 /* 4007 * Print out an error if an invalid bit is set: 4008 */ 4009 static inline int 4010 valid_state(struct task_struct *curr, struct held_lock *this, 4011 enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit) 4012 { 4013 if (unlikely(hlock_class(this)->usage_mask & (1 << bad_bit))) { 4014 graph_unlock(); 4015 print_usage_bug(curr, this, bad_bit, new_bit); 4016 return 0; 4017 } 4018 return 1; 4019 } 4020 4021 4022 /* 4023 * print irq inversion bug: 4024 */ 4025 static void 4026 print_irq_inversion_bug(struct task_struct *curr, 4027 struct lock_list *root, struct lock_list *other, 4028 struct held_lock *this, int forwards, 4029 const char *irqclass) 4030 { 4031 struct lock_list *entry = other; 4032 struct lock_list *middle = NULL; 4033 int depth; 4034 4035 if (!debug_locks_off_graph_unlock() || debug_locks_silent) 4036 return; 4037 4038 pr_warn("\n"); 4039 pr_warn("========================================================\n"); 4040 pr_warn("WARNING: possible irq lock inversion dependency detected\n"); 4041 print_kernel_ident(); 4042 pr_warn("--------------------------------------------------------\n"); 4043 pr_warn("%s/%d just changed the state of lock:\n", 4044 curr->comm, task_pid_nr(curr)); 4045 print_lock(this); 4046 if (forwards) 4047 pr_warn("but this lock took another, %s-unsafe lock in the past:\n", irqclass); 4048 else 4049 pr_warn("but this lock was taken by another, %s-safe lock in the past:\n", irqclass); 4050 print_lock_name(NULL, other->class); 4051 pr_warn("\n\nand interrupts could create inverse lock ordering between them.\n\n"); 4052 4053 pr_warn("\nother info that might help us debug this:\n"); 4054 4055 /* Find a middle lock (if one exists) */ 4056 depth = get_lock_depth(other); 4057 do { 4058 if (depth == 0 && (entry != root)) { 4059 pr_warn("lockdep:%s bad path found in chain graph\n", __func__); 4060 break; 4061 } 4062 middle = entry; 4063 entry = get_lock_parent(entry); 4064 depth--; 4065 } while (entry && entry != root && (depth >= 0)); 4066 if (forwards) 4067 print_irq_lock_scenario(root, other, 4068 middle ? middle->class : root->class, other->class); 4069 else 4070 print_irq_lock_scenario(other, root, 4071 middle ? middle->class : other->class, root->class); 4072 4073 lockdep_print_held_locks(curr); 4074 4075 pr_warn("\nthe shortest dependencies between 2nd lock and 1st lock:\n"); 4076 root->trace = save_trace(); 4077 if (!root->trace) 4078 return; 4079 print_shortest_lock_dependencies(other, root); 4080 4081 pr_warn("\nstack backtrace:\n"); 4082 dump_stack(); 4083 } 4084 4085 /* 4086 * Prove that in the forwards-direction subgraph starting at <this> 4087 * there is no lock matching <mask>: 4088 */ 4089 static int 4090 check_usage_forwards(struct task_struct *curr, struct held_lock *this, 4091 enum lock_usage_bit bit) 4092 { 4093 enum bfs_result ret; 4094 struct lock_list root; 4095 struct lock_list *target_entry; 4096 enum lock_usage_bit read_bit = bit + LOCK_USAGE_READ_MASK; 4097 unsigned usage_mask = lock_flag(bit) | lock_flag(read_bit); 4098 4099 bfs_init_root(&root, this); 4100 ret = find_usage_forwards(&root, usage_mask, &target_entry); 4101 if (bfs_error(ret)) { 4102 print_bfs_bug(ret); 4103 return 0; 4104 } 4105 if (ret == BFS_RNOMATCH) 4106 return 1; 4107 4108 /* Check whether write or read usage is the match */ 4109 if (target_entry->class->usage_mask & lock_flag(bit)) { 4110 print_irq_inversion_bug(curr, &root, target_entry, 4111 this, 1, state_name(bit)); 4112 } else { 4113 print_irq_inversion_bug(curr, &root, target_entry, 4114 this, 1, state_name(read_bit)); 4115 } 4116 4117 return 0; 4118 } 4119 4120 /* 4121 * Prove that in the backwards-direction subgraph starting at <this> 4122 * there is no lock matching <mask>: 4123 */ 4124 static int 4125 check_usage_backwards(struct task_struct *curr, struct held_lock *this, 4126 enum lock_usage_bit bit) 4127 { 4128 enum bfs_result ret; 4129 struct lock_list root; 4130 struct lock_list *target_entry; 4131 enum lock_usage_bit read_bit = bit + LOCK_USAGE_READ_MASK; 4132 unsigned usage_mask = lock_flag(bit) | lock_flag(read_bit); 4133 4134 bfs_init_rootb(&root, this); 4135 ret = find_usage_backwards(&root, usage_mask, &target_entry); 4136 if (bfs_error(ret)) { 4137 print_bfs_bug(ret); 4138 return 0; 4139 } 4140 if (ret == BFS_RNOMATCH) 4141 return 1; 4142 4143 /* Check whether write or read usage is the match */ 4144 if (target_entry->class->usage_mask & lock_flag(bit)) { 4145 print_irq_inversion_bug(curr, &root, target_entry, 4146 this, 0, state_name(bit)); 4147 } else { 4148 print_irq_inversion_bug(curr, &root, target_entry, 4149 this, 0, state_name(read_bit)); 4150 } 4151 4152 return 0; 4153 } 4154 4155 void print_irqtrace_events(struct task_struct *curr) 4156 { 4157 const struct irqtrace_events *trace = &curr->irqtrace; 4158 4159 printk("irq event stamp: %u\n", trace->irq_events); 4160 printk("hardirqs last enabled at (%u): [<%px>] %pS\n", 4161 trace->hardirq_enable_event, (void *)trace->hardirq_enable_ip, 4162 (void *)trace->hardirq_enable_ip); 4163 printk("hardirqs last disabled at (%u): [<%px>] %pS\n", 4164 trace->hardirq_disable_event, (void *)trace->hardirq_disable_ip, 4165 (void *)trace->hardirq_disable_ip); 4166 printk("softirqs last enabled at (%u): [<%px>] %pS\n", 4167 trace->softirq_enable_event, (void *)trace->softirq_enable_ip, 4168 (void *)trace->softirq_enable_ip); 4169 printk("softirqs last disabled at (%u): [<%px>] %pS\n", 4170 trace->softirq_disable_event, (void *)trace->softirq_disable_ip, 4171 (void *)trace->softirq_disable_ip); 4172 } 4173 4174 static int HARDIRQ_verbose(struct lock_class *class) 4175 { 4176 #if HARDIRQ_VERBOSE 4177 return class_filter(class); 4178 #endif 4179 return 0; 4180 } 4181 4182 static int SOFTIRQ_verbose(struct lock_class *class) 4183 { 4184 #if SOFTIRQ_VERBOSE 4185 return class_filter(class); 4186 #endif 4187 return 0; 4188 } 4189 4190 static int (*state_verbose_f[])(struct lock_class *class) = { 4191 #define LOCKDEP_STATE(__STATE) \ 4192 __STATE##_verbose, 4193 #include "lockdep_states.h" 4194 #undef LOCKDEP_STATE 4195 }; 4196 4197 static inline int state_verbose(enum lock_usage_bit bit, 4198 struct lock_class *class) 4199 { 4200 return state_verbose_f[bit >> LOCK_USAGE_DIR_MASK](class); 4201 } 4202 4203 typedef int (*check_usage_f)(struct task_struct *, struct held_lock *, 4204 enum lock_usage_bit bit, const char *name); 4205 4206 static int 4207 mark_lock_irq(struct task_struct *curr, struct held_lock *this, 4208 enum lock_usage_bit new_bit) 4209 { 4210 int excl_bit = exclusive_bit(new_bit); 4211 int read = new_bit & LOCK_USAGE_READ_MASK; 4212 int dir = new_bit & LOCK_USAGE_DIR_MASK; 4213 4214 /* 4215 * Validate that this particular lock does not have conflicting 4216 * usage states. 4217 */ 4218 if (!valid_state(curr, this, new_bit, excl_bit)) 4219 return 0; 4220 4221 /* 4222 * Check for read in write conflicts 4223 */ 4224 if (!read && !valid_state(curr, this, new_bit, 4225 excl_bit + LOCK_USAGE_READ_MASK)) 4226 return 0; 4227 4228 4229 /* 4230 * Validate that the lock dependencies don't have conflicting usage 4231 * states. 4232 */ 4233 if (dir) { 4234 /* 4235 * mark ENABLED has to look backwards -- to ensure no dependee 4236 * has USED_IN state, which, again, would allow recursion deadlocks. 4237 */ 4238 if (!check_usage_backwards(curr, this, excl_bit)) 4239 return 0; 4240 } else { 4241 /* 4242 * mark USED_IN has to look forwards -- to ensure no dependency 4243 * has ENABLED state, which would allow recursion deadlocks. 4244 */ 4245 if (!check_usage_forwards(curr, this, excl_bit)) 4246 return 0; 4247 } 4248 4249 if (state_verbose(new_bit, hlock_class(this))) 4250 return 2; 4251 4252 return 1; 4253 } 4254 4255 /* 4256 * Mark all held locks with a usage bit: 4257 */ 4258 static int 4259 mark_held_locks(struct task_struct *curr, enum lock_usage_bit base_bit) 4260 { 4261 struct held_lock *hlock; 4262 int i; 4263 4264 for (i = 0; i < curr->lockdep_depth; i++) { 4265 enum lock_usage_bit hlock_bit = base_bit; 4266 hlock = curr->held_locks + i; 4267 4268 if (hlock->read) 4269 hlock_bit += LOCK_USAGE_READ_MASK; 4270 4271 BUG_ON(hlock_bit >= LOCK_USAGE_STATES); 4272 4273 if (!hlock->check) 4274 continue; 4275 4276 if (!mark_lock(curr, hlock, hlock_bit)) 4277 return 0; 4278 } 4279 4280 return 1; 4281 } 4282 4283 /* 4284 * Hardirqs will be enabled: 4285 */ 4286 static void __trace_hardirqs_on_caller(void) 4287 { 4288 struct task_struct *curr = current; 4289 4290 /* 4291 * We are going to turn hardirqs on, so set the 4292 * usage bit for all held locks: 4293 */ 4294 if (!mark_held_locks(curr, LOCK_ENABLED_HARDIRQ)) 4295 return; 4296 /* 4297 * If we have softirqs enabled, then set the usage 4298 * bit for all held locks. (disabled hardirqs prevented 4299 * this bit from being set before) 4300 */ 4301 if (curr->softirqs_enabled) 4302 mark_held_locks(curr, LOCK_ENABLED_SOFTIRQ); 4303 } 4304 4305 /** 4306 * lockdep_hardirqs_on_prepare - Prepare for enabling interrupts 4307 * 4308 * Invoked before a possible transition to RCU idle from exit to user or 4309 * guest mode. This ensures that all RCU operations are done before RCU 4310 * stops watching. After the RCU transition lockdep_hardirqs_on() has to be 4311 * invoked to set the final state. 4312 */ 4313 void lockdep_hardirqs_on_prepare(void) 4314 { 4315 if (unlikely(!debug_locks)) 4316 return; 4317 4318 /* 4319 * NMIs do not (and cannot) track lock dependencies, nothing to do. 4320 */ 4321 if (unlikely(in_nmi())) 4322 return; 4323 4324 if (unlikely(this_cpu_read(lockdep_recursion))) 4325 return; 4326 4327 if (unlikely(lockdep_hardirqs_enabled())) { 4328 /* 4329 * Neither irq nor preemption are disabled here 4330 * so this is racy by nature but losing one hit 4331 * in a stat is not a big deal. 4332 */ 4333 __debug_atomic_inc(redundant_hardirqs_on); 4334 return; 4335 } 4336 4337 /* 4338 * We're enabling irqs and according to our state above irqs weren't 4339 * already enabled, yet we find the hardware thinks they are in fact 4340 * enabled.. someone messed up their IRQ state tracing. 4341 */ 4342 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) 4343 return; 4344 4345 /* 4346 * See the fine text that goes along with this variable definition. 4347 */ 4348 if (DEBUG_LOCKS_WARN_ON(early_boot_irqs_disabled)) 4349 return; 4350 4351 /* 4352 * Can't allow enabling interrupts while in an interrupt handler, 4353 * that's general bad form and such. Recursion, limited stack etc.. 4354 */ 4355 if (DEBUG_LOCKS_WARN_ON(lockdep_hardirq_context())) 4356 return; 4357 4358 current->hardirq_chain_key = current->curr_chain_key; 4359 4360 lockdep_recursion_inc(); 4361 __trace_hardirqs_on_caller(); 4362 lockdep_recursion_finish(); 4363 } 4364 EXPORT_SYMBOL_GPL(lockdep_hardirqs_on_prepare); 4365 4366 void noinstr lockdep_hardirqs_on(unsigned long ip) 4367 { 4368 struct irqtrace_events *trace = ¤t->irqtrace; 4369 4370 if (unlikely(!debug_locks)) 4371 return; 4372 4373 /* 4374 * NMIs can happen in the middle of local_irq_{en,dis}able() where the 4375 * tracking state and hardware state are out of sync. 4376 * 4377 * NMIs must save lockdep_hardirqs_enabled() to restore IRQ state from, 4378 * and not rely on hardware state like normal interrupts. 4379 */ 4380 if (unlikely(in_nmi())) { 4381 if (!IS_ENABLED(CONFIG_TRACE_IRQFLAGS_NMI)) 4382 return; 4383 4384 /* 4385 * Skip: 4386 * - recursion check, because NMI can hit lockdep; 4387 * - hardware state check, because above; 4388 * - chain_key check, see lockdep_hardirqs_on_prepare(). 4389 */ 4390 goto skip_checks; 4391 } 4392 4393 if (unlikely(this_cpu_read(lockdep_recursion))) 4394 return; 4395 4396 if (lockdep_hardirqs_enabled()) { 4397 /* 4398 * Neither irq nor preemption are disabled here 4399 * so this is racy by nature but losing one hit 4400 * in a stat is not a big deal. 4401 */ 4402 __debug_atomic_inc(redundant_hardirqs_on); 4403 return; 4404 } 4405 4406 /* 4407 * We're enabling irqs and according to our state above irqs weren't 4408 * already enabled, yet we find the hardware thinks they are in fact 4409 * enabled.. someone messed up their IRQ state tracing. 4410 */ 4411 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) 4412 return; 4413 4414 /* 4415 * Ensure the lock stack remained unchanged between 4416 * lockdep_hardirqs_on_prepare() and lockdep_hardirqs_on(). 4417 */ 4418 DEBUG_LOCKS_WARN_ON(current->hardirq_chain_key != 4419 current->curr_chain_key); 4420 4421 skip_checks: 4422 /* we'll do an OFF -> ON transition: */ 4423 __this_cpu_write(hardirqs_enabled, 1); 4424 trace->hardirq_enable_ip = ip; 4425 trace->hardirq_enable_event = ++trace->irq_events; 4426 debug_atomic_inc(hardirqs_on_events); 4427 } 4428 EXPORT_SYMBOL_GPL(lockdep_hardirqs_on); 4429 4430 /* 4431 * Hardirqs were disabled: 4432 */ 4433 void noinstr lockdep_hardirqs_off(unsigned long ip) 4434 { 4435 if (unlikely(!debug_locks)) 4436 return; 4437 4438 /* 4439 * Matching lockdep_hardirqs_on(), allow NMIs in the middle of lockdep; 4440 * they will restore the software state. This ensures the software 4441 * state is consistent inside NMIs as well. 4442 */ 4443 if (in_nmi()) { 4444 if (!IS_ENABLED(CONFIG_TRACE_IRQFLAGS_NMI)) 4445 return; 4446 } else if (__this_cpu_read(lockdep_recursion)) 4447 return; 4448 4449 /* 4450 * So we're supposed to get called after you mask local IRQs, but for 4451 * some reason the hardware doesn't quite think you did a proper job. 4452 */ 4453 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) 4454 return; 4455 4456 if (lockdep_hardirqs_enabled()) { 4457 struct irqtrace_events *trace = ¤t->irqtrace; 4458 4459 /* 4460 * We have done an ON -> OFF transition: 4461 */ 4462 __this_cpu_write(hardirqs_enabled, 0); 4463 trace->hardirq_disable_ip = ip; 4464 trace->hardirq_disable_event = ++trace->irq_events; 4465 debug_atomic_inc(hardirqs_off_events); 4466 } else { 4467 debug_atomic_inc(redundant_hardirqs_off); 4468 } 4469 } 4470 EXPORT_SYMBOL_GPL(lockdep_hardirqs_off); 4471 4472 /* 4473 * Softirqs will be enabled: 4474 */ 4475 void lockdep_softirqs_on(unsigned long ip) 4476 { 4477 struct irqtrace_events *trace = ¤t->irqtrace; 4478 4479 if (unlikely(!lockdep_enabled())) 4480 return; 4481 4482 /* 4483 * We fancy IRQs being disabled here, see softirq.c, avoids 4484 * funny state and nesting things. 4485 */ 4486 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) 4487 return; 4488 4489 if (current->softirqs_enabled) { 4490 debug_atomic_inc(redundant_softirqs_on); 4491 return; 4492 } 4493 4494 lockdep_recursion_inc(); 4495 /* 4496 * We'll do an OFF -> ON transition: 4497 */ 4498 current->softirqs_enabled = 1; 4499 trace->softirq_enable_ip = ip; 4500 trace->softirq_enable_event = ++trace->irq_events; 4501 debug_atomic_inc(softirqs_on_events); 4502 /* 4503 * We are going to turn softirqs on, so set the 4504 * usage bit for all held locks, if hardirqs are 4505 * enabled too: 4506 */ 4507 if (lockdep_hardirqs_enabled()) 4508 mark_held_locks(current, LOCK_ENABLED_SOFTIRQ); 4509 lockdep_recursion_finish(); 4510 } 4511 4512 /* 4513 * Softirqs were disabled: 4514 */ 4515 void lockdep_softirqs_off(unsigned long ip) 4516 { 4517 if (unlikely(!lockdep_enabled())) 4518 return; 4519 4520 /* 4521 * We fancy IRQs being disabled here, see softirq.c 4522 */ 4523 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) 4524 return; 4525 4526 if (current->softirqs_enabled) { 4527 struct irqtrace_events *trace = ¤t->irqtrace; 4528 4529 /* 4530 * We have done an ON -> OFF transition: 4531 */ 4532 current->softirqs_enabled = 0; 4533 trace->softirq_disable_ip = ip; 4534 trace->softirq_disable_event = ++trace->irq_events; 4535 debug_atomic_inc(softirqs_off_events); 4536 /* 4537 * Whoops, we wanted softirqs off, so why aren't they? 4538 */ 4539 DEBUG_LOCKS_WARN_ON(!softirq_count()); 4540 } else 4541 debug_atomic_inc(redundant_softirqs_off); 4542 } 4543 4544 static int 4545 mark_usage(struct task_struct *curr, struct held_lock *hlock, int check) 4546 { 4547 if (!check) 4548 goto lock_used; 4549 4550 /* 4551 * If non-trylock use in a hardirq or softirq context, then 4552 * mark the lock as used in these contexts: 4553 */ 4554 if (!hlock->trylock) { 4555 if (hlock->read) { 4556 if (lockdep_hardirq_context()) 4557 if (!mark_lock(curr, hlock, 4558 LOCK_USED_IN_HARDIRQ_READ)) 4559 return 0; 4560 if (curr->softirq_context) 4561 if (!mark_lock(curr, hlock, 4562 LOCK_USED_IN_SOFTIRQ_READ)) 4563 return 0; 4564 } else { 4565 if (lockdep_hardirq_context()) 4566 if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ)) 4567 return 0; 4568 if (curr->softirq_context) 4569 if (!mark_lock(curr, hlock, LOCK_USED_IN_SOFTIRQ)) 4570 return 0; 4571 } 4572 } 4573 4574 /* 4575 * For lock_sync(), don't mark the ENABLED usage, since lock_sync() 4576 * creates no critical section and no extra dependency can be introduced 4577 * by interrupts 4578 */ 4579 if (!hlock->hardirqs_off && !hlock->sync) { 4580 if (hlock->read) { 4581 if (!mark_lock(curr, hlock, 4582 LOCK_ENABLED_HARDIRQ_READ)) 4583 return 0; 4584 if (curr->softirqs_enabled) 4585 if (!mark_lock(curr, hlock, 4586 LOCK_ENABLED_SOFTIRQ_READ)) 4587 return 0; 4588 } else { 4589 if (!mark_lock(curr, hlock, 4590 LOCK_ENABLED_HARDIRQ)) 4591 return 0; 4592 if (curr->softirqs_enabled) 4593 if (!mark_lock(curr, hlock, 4594 LOCK_ENABLED_SOFTIRQ)) 4595 return 0; 4596 } 4597 } 4598 4599 lock_used: 4600 /* mark it as used: */ 4601 if (!mark_lock(curr, hlock, LOCK_USED)) 4602 return 0; 4603 4604 return 1; 4605 } 4606 4607 static inline unsigned int task_irq_context(struct task_struct *task) 4608 { 4609 return LOCK_CHAIN_HARDIRQ_CONTEXT * !!lockdep_hardirq_context() + 4610 LOCK_CHAIN_SOFTIRQ_CONTEXT * !!task->softirq_context; 4611 } 4612 4613 static int separate_irq_context(struct task_struct *curr, 4614 struct held_lock *hlock) 4615 { 4616 unsigned int depth = curr->lockdep_depth; 4617 4618 /* 4619 * Keep track of points where we cross into an interrupt context: 4620 */ 4621 if (depth) { 4622 struct held_lock *prev_hlock; 4623 4624 prev_hlock = curr->held_locks + depth-1; 4625 /* 4626 * If we cross into another context, reset the 4627 * hash key (this also prevents the checking and the 4628 * adding of the dependency to 'prev'): 4629 */ 4630 if (prev_hlock->irq_context != hlock->irq_context) 4631 return 1; 4632 } 4633 return 0; 4634 } 4635 4636 /* 4637 * Mark a lock with a usage bit, and validate the state transition: 4638 */ 4639 static int mark_lock(struct task_struct *curr, struct held_lock *this, 4640 enum lock_usage_bit new_bit) 4641 { 4642 unsigned int new_mask, ret = 1; 4643 4644 if (new_bit >= LOCK_USAGE_STATES) { 4645 DEBUG_LOCKS_WARN_ON(1); 4646 return 0; 4647 } 4648 4649 if (new_bit == LOCK_USED && this->read) 4650 new_bit = LOCK_USED_READ; 4651 4652 new_mask = 1 << new_bit; 4653 4654 /* 4655 * If already set then do not dirty the cacheline, 4656 * nor do any checks: 4657 */ 4658 if (likely(hlock_class(this)->usage_mask & new_mask)) 4659 return 1; 4660 4661 if (!graph_lock()) 4662 return 0; 4663 /* 4664 * Make sure we didn't race: 4665 */ 4666 if (unlikely(hlock_class(this)->usage_mask & new_mask)) 4667 goto unlock; 4668 4669 if (!hlock_class(this)->usage_mask) 4670 debug_atomic_dec(nr_unused_locks); 4671 4672 hlock_class(this)->usage_mask |= new_mask; 4673 4674 if (new_bit < LOCK_TRACE_STATES) { 4675 if (!(hlock_class(this)->usage_traces[new_bit] = save_trace())) 4676 return 0; 4677 } 4678 4679 if (new_bit < LOCK_USED) { 4680 ret = mark_lock_irq(curr, this, new_bit); 4681 if (!ret) 4682 return 0; 4683 } 4684 4685 unlock: 4686 graph_unlock(); 4687 4688 /* 4689 * We must printk outside of the graph_lock: 4690 */ 4691 if (ret == 2) { 4692 printk("\nmarked lock as {%s}:\n", usage_str[new_bit]); 4693 print_lock(this); 4694 print_irqtrace_events(curr); 4695 dump_stack(); 4696 } 4697 4698 return ret; 4699 } 4700 4701 static inline short task_wait_context(struct task_struct *curr) 4702 { 4703 /* 4704 * Set appropriate wait type for the context; for IRQs we have to take 4705 * into account force_irqthread as that is implied by PREEMPT_RT. 4706 */ 4707 if (lockdep_hardirq_context()) { 4708 /* 4709 * Check if force_irqthreads will run us threaded. 4710 */ 4711 if (curr->hardirq_threaded || curr->irq_config) 4712 return LD_WAIT_CONFIG; 4713 4714 return LD_WAIT_SPIN; 4715 } else if (curr->softirq_context) { 4716 /* 4717 * Softirqs are always threaded. 4718 */ 4719 return LD_WAIT_CONFIG; 4720 } 4721 4722 return LD_WAIT_MAX; 4723 } 4724 4725 static int 4726 print_lock_invalid_wait_context(struct task_struct *curr, 4727 struct held_lock *hlock) 4728 { 4729 short curr_inner; 4730 4731 if (!debug_locks_off()) 4732 return 0; 4733 if (debug_locks_silent) 4734 return 0; 4735 4736 pr_warn("\n"); 4737 pr_warn("=============================\n"); 4738 pr_warn("[ BUG: Invalid wait context ]\n"); 4739 print_kernel_ident(); 4740 pr_warn("-----------------------------\n"); 4741 4742 pr_warn("%s/%d is trying to lock:\n", curr->comm, task_pid_nr(curr)); 4743 print_lock(hlock); 4744 4745 pr_warn("other info that might help us debug this:\n"); 4746 4747 curr_inner = task_wait_context(curr); 4748 pr_warn("context-{%d:%d}\n", curr_inner, curr_inner); 4749 4750 lockdep_print_held_locks(curr); 4751 4752 pr_warn("stack backtrace:\n"); 4753 dump_stack(); 4754 4755 return 0; 4756 } 4757 4758 /* 4759 * Verify the wait_type context. 4760 * 4761 * This check validates we take locks in the right wait-type order; that is it 4762 * ensures that we do not take mutexes inside spinlocks and do not attempt to 4763 * acquire spinlocks inside raw_spinlocks and the sort. 4764 * 4765 * The entire thing is slightly more complex because of RCU, RCU is a lock that 4766 * can be taken from (pretty much) any context but also has constraints. 4767 * However when taken in a stricter environment the RCU lock does not loosen 4768 * the constraints. 4769 * 4770 * Therefore we must look for the strictest environment in the lock stack and 4771 * compare that to the lock we're trying to acquire. 4772 */ 4773 static int check_wait_context(struct task_struct *curr, struct held_lock *next) 4774 { 4775 u8 next_inner = hlock_class(next)->wait_type_inner; 4776 u8 next_outer = hlock_class(next)->wait_type_outer; 4777 u8 curr_inner; 4778 int depth; 4779 4780 if (!next_inner || next->trylock) 4781 return 0; 4782 4783 if (!next_outer) 4784 next_outer = next_inner; 4785 4786 /* 4787 * Find start of current irq_context.. 4788 */ 4789 for (depth = curr->lockdep_depth - 1; depth >= 0; depth--) { 4790 struct held_lock *prev = curr->held_locks + depth; 4791 if (prev->irq_context != next->irq_context) 4792 break; 4793 } 4794 depth++; 4795 4796 curr_inner = task_wait_context(curr); 4797 4798 for (; depth < curr->lockdep_depth; depth++) { 4799 struct held_lock *prev = curr->held_locks + depth; 4800 u8 prev_inner = hlock_class(prev)->wait_type_inner; 4801 4802 if (prev_inner) { 4803 /* 4804 * We can have a bigger inner than a previous one 4805 * when outer is smaller than inner, as with RCU. 4806 * 4807 * Also due to trylocks. 4808 */ 4809 curr_inner = min(curr_inner, prev_inner); 4810 } 4811 } 4812 4813 if (next_outer > curr_inner) 4814 return print_lock_invalid_wait_context(curr, next); 4815 4816 return 0; 4817 } 4818 4819 #else /* CONFIG_PROVE_LOCKING */ 4820 4821 static inline int 4822 mark_usage(struct task_struct *curr, struct held_lock *hlock, int check) 4823 { 4824 return 1; 4825 } 4826 4827 static inline unsigned int task_irq_context(struct task_struct *task) 4828 { 4829 return 0; 4830 } 4831 4832 static inline int separate_irq_context(struct task_struct *curr, 4833 struct held_lock *hlock) 4834 { 4835 return 0; 4836 } 4837 4838 static inline int check_wait_context(struct task_struct *curr, 4839 struct held_lock *next) 4840 { 4841 return 0; 4842 } 4843 4844 #endif /* CONFIG_PROVE_LOCKING */ 4845 4846 /* 4847 * Initialize a lock instance's lock-class mapping info: 4848 */ 4849 void lockdep_init_map_type(struct lockdep_map *lock, const char *name, 4850 struct lock_class_key *key, int subclass, 4851 u8 inner, u8 outer, u8 lock_type) 4852 { 4853 int i; 4854 4855 for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++) 4856 lock->class_cache[i] = NULL; 4857 4858 #ifdef CONFIG_LOCK_STAT 4859 lock->cpu = raw_smp_processor_id(); 4860 #endif 4861 4862 /* 4863 * Can't be having no nameless bastards around this place! 4864 */ 4865 if (DEBUG_LOCKS_WARN_ON(!name)) { 4866 lock->name = "NULL"; 4867 return; 4868 } 4869 4870 lock->name = name; 4871 4872 lock->wait_type_outer = outer; 4873 lock->wait_type_inner = inner; 4874 lock->lock_type = lock_type; 4875 4876 /* 4877 * No key, no joy, we need to hash something. 4878 */ 4879 if (DEBUG_LOCKS_WARN_ON(!key)) 4880 return; 4881 /* 4882 * Sanity check, the lock-class key must either have been allocated 4883 * statically or must have been registered as a dynamic key. 4884 */ 4885 if (!static_obj(key) && !is_dynamic_key(key)) { 4886 if (debug_locks) 4887 printk(KERN_ERR "BUG: key %px has not been registered!\n", key); 4888 DEBUG_LOCKS_WARN_ON(1); 4889 return; 4890 } 4891 lock->key = key; 4892 4893 if (unlikely(!debug_locks)) 4894 return; 4895 4896 if (subclass) { 4897 unsigned long flags; 4898 4899 if (DEBUG_LOCKS_WARN_ON(!lockdep_enabled())) 4900 return; 4901 4902 raw_local_irq_save(flags); 4903 lockdep_recursion_inc(); 4904 register_lock_class(lock, subclass, 1); 4905 lockdep_recursion_finish(); 4906 raw_local_irq_restore(flags); 4907 } 4908 } 4909 EXPORT_SYMBOL_GPL(lockdep_init_map_type); 4910 4911 struct lock_class_key __lockdep_no_validate__; 4912 EXPORT_SYMBOL_GPL(__lockdep_no_validate__); 4913 4914 #ifdef CONFIG_PROVE_LOCKING 4915 void lockdep_set_lock_cmp_fn(struct lockdep_map *lock, lock_cmp_fn cmp_fn, 4916 lock_print_fn print_fn) 4917 { 4918 struct lock_class *class = lock->class_cache[0]; 4919 unsigned long flags; 4920 4921 raw_local_irq_save(flags); 4922 lockdep_recursion_inc(); 4923 4924 if (!class) 4925 class = register_lock_class(lock, 0, 0); 4926 4927 if (class) { 4928 WARN_ON(class->cmp_fn && class->cmp_fn != cmp_fn); 4929 WARN_ON(class->print_fn && class->print_fn != print_fn); 4930 4931 class->cmp_fn = cmp_fn; 4932 class->print_fn = print_fn; 4933 } 4934 4935 lockdep_recursion_finish(); 4936 raw_local_irq_restore(flags); 4937 } 4938 EXPORT_SYMBOL_GPL(lockdep_set_lock_cmp_fn); 4939 #endif 4940 4941 static void 4942 print_lock_nested_lock_not_held(struct task_struct *curr, 4943 struct held_lock *hlock) 4944 { 4945 if (!debug_locks_off()) 4946 return; 4947 if (debug_locks_silent) 4948 return; 4949 4950 pr_warn("\n"); 4951 pr_warn("==================================\n"); 4952 pr_warn("WARNING: Nested lock was not taken\n"); 4953 print_kernel_ident(); 4954 pr_warn("----------------------------------\n"); 4955 4956 pr_warn("%s/%d is trying to lock:\n", curr->comm, task_pid_nr(curr)); 4957 print_lock(hlock); 4958 4959 pr_warn("\nbut this task is not holding:\n"); 4960 pr_warn("%s\n", hlock->nest_lock->name); 4961 4962 pr_warn("\nstack backtrace:\n"); 4963 dump_stack(); 4964 4965 pr_warn("\nother info that might help us debug this:\n"); 4966 lockdep_print_held_locks(curr); 4967 4968 pr_warn("\nstack backtrace:\n"); 4969 dump_stack(); 4970 } 4971 4972 static int __lock_is_held(const struct lockdep_map *lock, int read); 4973 4974 /* 4975 * This gets called for every mutex_lock*()/spin_lock*() operation. 4976 * We maintain the dependency maps and validate the locking attempt: 4977 * 4978 * The callers must make sure that IRQs are disabled before calling it, 4979 * otherwise we could get an interrupt which would want to take locks, 4980 * which would end up in lockdep again. 4981 */ 4982 static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, 4983 int trylock, int read, int check, int hardirqs_off, 4984 struct lockdep_map *nest_lock, unsigned long ip, 4985 int references, int pin_count, int sync) 4986 { 4987 struct task_struct *curr = current; 4988 struct lock_class *class = NULL; 4989 struct held_lock *hlock; 4990 unsigned int depth; 4991 int chain_head = 0; 4992 int class_idx; 4993 u64 chain_key; 4994 4995 if (unlikely(!debug_locks)) 4996 return 0; 4997 4998 if (!prove_locking || lock->key == &__lockdep_no_validate__) 4999 check = 0; 5000 5001 if (subclass < NR_LOCKDEP_CACHING_CLASSES) 5002 class = lock->class_cache[subclass]; 5003 /* 5004 * Not cached? 5005 */ 5006 if (unlikely(!class)) { 5007 class = register_lock_class(lock, subclass, 0); 5008 if (!class) 5009 return 0; 5010 } 5011 5012 debug_class_ops_inc(class); 5013 5014 if (very_verbose(class)) { 5015 printk("\nacquire class [%px] %s", class->key, class->name); 5016 if (class->name_version > 1) 5017 printk(KERN_CONT "#%d", class->name_version); 5018 printk(KERN_CONT "\n"); 5019 dump_stack(); 5020 } 5021 5022 /* 5023 * Add the lock to the list of currently held locks. 5024 * (we dont increase the depth just yet, up until the 5025 * dependency checks are done) 5026 */ 5027 depth = curr->lockdep_depth; 5028 /* 5029 * Ran out of static storage for our per-task lock stack again have we? 5030 */ 5031 if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH)) 5032 return 0; 5033 5034 class_idx = class - lock_classes; 5035 5036 if (depth && !sync) { 5037 /* we're holding locks and the new held lock is not a sync */ 5038 hlock = curr->held_locks + depth - 1; 5039 if (hlock->class_idx == class_idx && nest_lock) { 5040 if (!references) 5041 references++; 5042 5043 if (!hlock->references) 5044 hlock->references++; 5045 5046 hlock->references += references; 5047 5048 /* Overflow */ 5049 if (DEBUG_LOCKS_WARN_ON(hlock->references < references)) 5050 return 0; 5051 5052 return 2; 5053 } 5054 } 5055 5056 hlock = curr->held_locks + depth; 5057 /* 5058 * Plain impossible, we just registered it and checked it weren't no 5059 * NULL like.. I bet this mushroom I ate was good! 5060 */ 5061 if (DEBUG_LOCKS_WARN_ON(!class)) 5062 return 0; 5063 hlock->class_idx = class_idx; 5064 hlock->acquire_ip = ip; 5065 hlock->instance = lock; 5066 hlock->nest_lock = nest_lock; 5067 hlock->irq_context = task_irq_context(curr); 5068 hlock->trylock = trylock; 5069 hlock->read = read; 5070 hlock->check = check; 5071 hlock->sync = !!sync; 5072 hlock->hardirqs_off = !!hardirqs_off; 5073 hlock->references = references; 5074 #ifdef CONFIG_LOCK_STAT 5075 hlock->waittime_stamp = 0; 5076 hlock->holdtime_stamp = lockstat_clock(); 5077 #endif 5078 hlock->pin_count = pin_count; 5079 5080 if (check_wait_context(curr, hlock)) 5081 return 0; 5082 5083 /* Initialize the lock usage bit */ 5084 if (!mark_usage(curr, hlock, check)) 5085 return 0; 5086 5087 /* 5088 * Calculate the chain hash: it's the combined hash of all the 5089 * lock keys along the dependency chain. We save the hash value 5090 * at every step so that we can get the current hash easily 5091 * after unlock. The chain hash is then used to cache dependency 5092 * results. 5093 * 5094 * The 'key ID' is what is the most compact key value to drive 5095 * the hash, not class->key. 5096 */ 5097 /* 5098 * Whoops, we did it again.. class_idx is invalid. 5099 */ 5100 if (DEBUG_LOCKS_WARN_ON(!test_bit(class_idx, lock_classes_in_use))) 5101 return 0; 5102 5103 chain_key = curr->curr_chain_key; 5104 if (!depth) { 5105 /* 5106 * How can we have a chain hash when we ain't got no keys?! 5107 */ 5108 if (DEBUG_LOCKS_WARN_ON(chain_key != INITIAL_CHAIN_KEY)) 5109 return 0; 5110 chain_head = 1; 5111 } 5112 5113 hlock->prev_chain_key = chain_key; 5114 if (separate_irq_context(curr, hlock)) { 5115 chain_key = INITIAL_CHAIN_KEY; 5116 chain_head = 1; 5117 } 5118 chain_key = iterate_chain_key(chain_key, hlock_id(hlock)); 5119 5120 if (nest_lock && !__lock_is_held(nest_lock, -1)) { 5121 print_lock_nested_lock_not_held(curr, hlock); 5122 return 0; 5123 } 5124 5125 if (!debug_locks_silent) { 5126 WARN_ON_ONCE(depth && !hlock_class(hlock - 1)->key); 5127 WARN_ON_ONCE(!hlock_class(hlock)->key); 5128 } 5129 5130 if (!validate_chain(curr, hlock, chain_head, chain_key)) 5131 return 0; 5132 5133 /* For lock_sync(), we are done here since no actual critical section */ 5134 if (hlock->sync) 5135 return 1; 5136 5137 curr->curr_chain_key = chain_key; 5138 curr->lockdep_depth++; 5139 check_chain_key(curr); 5140 #ifdef CONFIG_DEBUG_LOCKDEP 5141 if (unlikely(!debug_locks)) 5142 return 0; 5143 #endif 5144 if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) { 5145 debug_locks_off(); 5146 print_lockdep_off("BUG: MAX_LOCK_DEPTH too low!"); 5147 printk(KERN_DEBUG "depth: %i max: %lu!\n", 5148 curr->lockdep_depth, MAX_LOCK_DEPTH); 5149 5150 lockdep_print_held_locks(current); 5151 debug_show_all_locks(); 5152 dump_stack(); 5153 5154 return 0; 5155 } 5156 5157 if (unlikely(curr->lockdep_depth > max_lockdep_depth)) 5158 max_lockdep_depth = curr->lockdep_depth; 5159 5160 return 1; 5161 } 5162 5163 static void print_unlock_imbalance_bug(struct task_struct *curr, 5164 struct lockdep_map *lock, 5165 unsigned long ip) 5166 { 5167 if (!debug_locks_off()) 5168 return; 5169 if (debug_locks_silent) 5170 return; 5171 5172 pr_warn("\n"); 5173 pr_warn("=====================================\n"); 5174 pr_warn("WARNING: bad unlock balance detected!\n"); 5175 print_kernel_ident(); 5176 pr_warn("-------------------------------------\n"); 5177 pr_warn("%s/%d is trying to release lock (", 5178 curr->comm, task_pid_nr(curr)); 5179 print_lockdep_cache(lock); 5180 pr_cont(") at:\n"); 5181 print_ip_sym(KERN_WARNING, ip); 5182 pr_warn("but there are no more locks to release!\n"); 5183 pr_warn("\nother info that might help us debug this:\n"); 5184 lockdep_print_held_locks(curr); 5185 5186 pr_warn("\nstack backtrace:\n"); 5187 dump_stack(); 5188 } 5189 5190 static noinstr int match_held_lock(const struct held_lock *hlock, 5191 const struct lockdep_map *lock) 5192 { 5193 if (hlock->instance == lock) 5194 return 1; 5195 5196 if (hlock->references) { 5197 const struct lock_class *class = lock->class_cache[0]; 5198 5199 if (!class) 5200 class = look_up_lock_class(lock, 0); 5201 5202 /* 5203 * If look_up_lock_class() failed to find a class, we're trying 5204 * to test if we hold a lock that has never yet been acquired. 5205 * Clearly if the lock hasn't been acquired _ever_, we're not 5206 * holding it either, so report failure. 5207 */ 5208 if (!class) 5209 return 0; 5210 5211 /* 5212 * References, but not a lock we're actually ref-counting? 5213 * State got messed up, follow the sites that change ->references 5214 * and try to make sense of it. 5215 */ 5216 if (DEBUG_LOCKS_WARN_ON(!hlock->nest_lock)) 5217 return 0; 5218 5219 if (hlock->class_idx == class - lock_classes) 5220 return 1; 5221 } 5222 5223 return 0; 5224 } 5225 5226 /* @depth must not be zero */ 5227 static struct held_lock *find_held_lock(struct task_struct *curr, 5228 struct lockdep_map *lock, 5229 unsigned int depth, int *idx) 5230 { 5231 struct held_lock *ret, *hlock, *prev_hlock; 5232 int i; 5233 5234 i = depth - 1; 5235 hlock = curr->held_locks + i; 5236 ret = hlock; 5237 if (match_held_lock(hlock, lock)) 5238 goto out; 5239 5240 ret = NULL; 5241 for (i--, prev_hlock = hlock--; 5242 i >= 0; 5243 i--, prev_hlock = hlock--) { 5244 /* 5245 * We must not cross into another context: 5246 */ 5247 if (prev_hlock->irq_context != hlock->irq_context) { 5248 ret = NULL; 5249 break; 5250 } 5251 if (match_held_lock(hlock, lock)) { 5252 ret = hlock; 5253 break; 5254 } 5255 } 5256 5257 out: 5258 *idx = i; 5259 return ret; 5260 } 5261 5262 static int reacquire_held_locks(struct task_struct *curr, unsigned int depth, 5263 int idx, unsigned int *merged) 5264 { 5265 struct held_lock *hlock; 5266 int first_idx = idx; 5267 5268 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) 5269 return 0; 5270 5271 for (hlock = curr->held_locks + idx; idx < depth; idx++, hlock++) { 5272 switch (__lock_acquire(hlock->instance, 5273 hlock_class(hlock)->subclass, 5274 hlock->trylock, 5275 hlock->read, hlock->check, 5276 hlock->hardirqs_off, 5277 hlock->nest_lock, hlock->acquire_ip, 5278 hlock->references, hlock->pin_count, 0)) { 5279 case 0: 5280 return 1; 5281 case 1: 5282 break; 5283 case 2: 5284 *merged += (idx == first_idx); 5285 break; 5286 default: 5287 WARN_ON(1); 5288 return 0; 5289 } 5290 } 5291 return 0; 5292 } 5293 5294 static int 5295 __lock_set_class(struct lockdep_map *lock, const char *name, 5296 struct lock_class_key *key, unsigned int subclass, 5297 unsigned long ip) 5298 { 5299 struct task_struct *curr = current; 5300 unsigned int depth, merged = 0; 5301 struct held_lock *hlock; 5302 struct lock_class *class; 5303 int i; 5304 5305 if (unlikely(!debug_locks)) 5306 return 0; 5307 5308 depth = curr->lockdep_depth; 5309 /* 5310 * This function is about (re)setting the class of a held lock, 5311 * yet we're not actually holding any locks. Naughty user! 5312 */ 5313 if (DEBUG_LOCKS_WARN_ON(!depth)) 5314 return 0; 5315 5316 hlock = find_held_lock(curr, lock, depth, &i); 5317 if (!hlock) { 5318 print_unlock_imbalance_bug(curr, lock, ip); 5319 return 0; 5320 } 5321 5322 lockdep_init_map_type(lock, name, key, 0, 5323 lock->wait_type_inner, 5324 lock->wait_type_outer, 5325 lock->lock_type); 5326 class = register_lock_class(lock, subclass, 0); 5327 hlock->class_idx = class - lock_classes; 5328 5329 curr->lockdep_depth = i; 5330 curr->curr_chain_key = hlock->prev_chain_key; 5331 5332 if (reacquire_held_locks(curr, depth, i, &merged)) 5333 return 0; 5334 5335 /* 5336 * I took it apart and put it back together again, except now I have 5337 * these 'spare' parts.. where shall I put them. 5338 */ 5339 if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - merged)) 5340 return 0; 5341 return 1; 5342 } 5343 5344 static int __lock_downgrade(struct lockdep_map *lock, unsigned long ip) 5345 { 5346 struct task_struct *curr = current; 5347 unsigned int depth, merged = 0; 5348 struct held_lock *hlock; 5349 int i; 5350 5351 if (unlikely(!debug_locks)) 5352 return 0; 5353 5354 depth = curr->lockdep_depth; 5355 /* 5356 * This function is about (re)setting the class of a held lock, 5357 * yet we're not actually holding any locks. Naughty user! 5358 */ 5359 if (DEBUG_LOCKS_WARN_ON(!depth)) 5360 return 0; 5361 5362 hlock = find_held_lock(curr, lock, depth, &i); 5363 if (!hlock) { 5364 print_unlock_imbalance_bug(curr, lock, ip); 5365 return 0; 5366 } 5367 5368 curr->lockdep_depth = i; 5369 curr->curr_chain_key = hlock->prev_chain_key; 5370 5371 WARN(hlock->read, "downgrading a read lock"); 5372 hlock->read = 1; 5373 hlock->acquire_ip = ip; 5374 5375 if (reacquire_held_locks(curr, depth, i, &merged)) 5376 return 0; 5377 5378 /* Merging can't happen with unchanged classes.. */ 5379 if (DEBUG_LOCKS_WARN_ON(merged)) 5380 return 0; 5381 5382 /* 5383 * I took it apart and put it back together again, except now I have 5384 * these 'spare' parts.. where shall I put them. 5385 */ 5386 if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth)) 5387 return 0; 5388 5389 return 1; 5390 } 5391 5392 /* 5393 * Remove the lock from the list of currently held locks - this gets 5394 * called on mutex_unlock()/spin_unlock*() (or on a failed 5395 * mutex_lock_interruptible()). 5396 */ 5397 static int 5398 __lock_release(struct lockdep_map *lock, unsigned long ip) 5399 { 5400 struct task_struct *curr = current; 5401 unsigned int depth, merged = 1; 5402 struct held_lock *hlock; 5403 int i; 5404 5405 if (unlikely(!debug_locks)) 5406 return 0; 5407 5408 depth = curr->lockdep_depth; 5409 /* 5410 * So we're all set to release this lock.. wait what lock? We don't 5411 * own any locks, you've been drinking again? 5412 */ 5413 if (depth <= 0) { 5414 print_unlock_imbalance_bug(curr, lock, ip); 5415 return 0; 5416 } 5417 5418 /* 5419 * Check whether the lock exists in the current stack 5420 * of held locks: 5421 */ 5422 hlock = find_held_lock(curr, lock, depth, &i); 5423 if (!hlock) { 5424 print_unlock_imbalance_bug(curr, lock, ip); 5425 return 0; 5426 } 5427 5428 if (hlock->instance == lock) 5429 lock_release_holdtime(hlock); 5430 5431 WARN(hlock->pin_count, "releasing a pinned lock\n"); 5432 5433 if (hlock->references) { 5434 hlock->references--; 5435 if (hlock->references) { 5436 /* 5437 * We had, and after removing one, still have 5438 * references, the current lock stack is still 5439 * valid. We're done! 5440 */ 5441 return 1; 5442 } 5443 } 5444 5445 /* 5446 * We have the right lock to unlock, 'hlock' points to it. 5447 * Now we remove it from the stack, and add back the other 5448 * entries (if any), recalculating the hash along the way: 5449 */ 5450 5451 curr->lockdep_depth = i; 5452 curr->curr_chain_key = hlock->prev_chain_key; 5453 5454 /* 5455 * The most likely case is when the unlock is on the innermost 5456 * lock. In this case, we are done! 5457 */ 5458 if (i == depth-1) 5459 return 1; 5460 5461 if (reacquire_held_locks(curr, depth, i + 1, &merged)) 5462 return 0; 5463 5464 /* 5465 * We had N bottles of beer on the wall, we drank one, but now 5466 * there's not N-1 bottles of beer left on the wall... 5467 * Pouring two of the bottles together is acceptable. 5468 */ 5469 DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - merged); 5470 5471 /* 5472 * Since reacquire_held_locks() would have called check_chain_key() 5473 * indirectly via __lock_acquire(), we don't need to do it again 5474 * on return. 5475 */ 5476 return 0; 5477 } 5478 5479 static __always_inline 5480 int __lock_is_held(const struct lockdep_map *lock, int read) 5481 { 5482 struct task_struct *curr = current; 5483 int i; 5484 5485 for (i = 0; i < curr->lockdep_depth; i++) { 5486 struct held_lock *hlock = curr->held_locks + i; 5487 5488 if (match_held_lock(hlock, lock)) { 5489 if (read == -1 || !!hlock->read == read) 5490 return LOCK_STATE_HELD; 5491 5492 return LOCK_STATE_NOT_HELD; 5493 } 5494 } 5495 5496 return LOCK_STATE_NOT_HELD; 5497 } 5498 5499 static struct pin_cookie __lock_pin_lock(struct lockdep_map *lock) 5500 { 5501 struct pin_cookie cookie = NIL_COOKIE; 5502 struct task_struct *curr = current; 5503 int i; 5504 5505 if (unlikely(!debug_locks)) 5506 return cookie; 5507 5508 for (i = 0; i < curr->lockdep_depth; i++) { 5509 struct held_lock *hlock = curr->held_locks + i; 5510 5511 if (match_held_lock(hlock, lock)) { 5512 /* 5513 * Grab 16bits of randomness; this is sufficient to not 5514 * be guessable and still allows some pin nesting in 5515 * our u32 pin_count. 5516 */ 5517 cookie.val = 1 + (sched_clock() & 0xffff); 5518 hlock->pin_count += cookie.val; 5519 return cookie; 5520 } 5521 } 5522 5523 WARN(1, "pinning an unheld lock\n"); 5524 return cookie; 5525 } 5526 5527 static void __lock_repin_lock(struct lockdep_map *lock, struct pin_cookie cookie) 5528 { 5529 struct task_struct *curr = current; 5530 int i; 5531 5532 if (unlikely(!debug_locks)) 5533 return; 5534 5535 for (i = 0; i < curr->lockdep_depth; i++) { 5536 struct held_lock *hlock = curr->held_locks + i; 5537 5538 if (match_held_lock(hlock, lock)) { 5539 hlock->pin_count += cookie.val; 5540 return; 5541 } 5542 } 5543 5544 WARN(1, "pinning an unheld lock\n"); 5545 } 5546 5547 static void __lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie) 5548 { 5549 struct task_struct *curr = current; 5550 int i; 5551 5552 if (unlikely(!debug_locks)) 5553 return; 5554 5555 for (i = 0; i < curr->lockdep_depth; i++) { 5556 struct held_lock *hlock = curr->held_locks + i; 5557 5558 if (match_held_lock(hlock, lock)) { 5559 if (WARN(!hlock->pin_count, "unpinning an unpinned lock\n")) 5560 return; 5561 5562 hlock->pin_count -= cookie.val; 5563 5564 if (WARN((int)hlock->pin_count < 0, "pin count corrupted\n")) 5565 hlock->pin_count = 0; 5566 5567 return; 5568 } 5569 } 5570 5571 WARN(1, "unpinning an unheld lock\n"); 5572 } 5573 5574 /* 5575 * Check whether we follow the irq-flags state precisely: 5576 */ 5577 static noinstr void check_flags(unsigned long flags) 5578 { 5579 #if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP) 5580 if (!debug_locks) 5581 return; 5582 5583 /* Get the warning out.. */ 5584 instrumentation_begin(); 5585 5586 if (irqs_disabled_flags(flags)) { 5587 if (DEBUG_LOCKS_WARN_ON(lockdep_hardirqs_enabled())) { 5588 printk("possible reason: unannotated irqs-off.\n"); 5589 } 5590 } else { 5591 if (DEBUG_LOCKS_WARN_ON(!lockdep_hardirqs_enabled())) { 5592 printk("possible reason: unannotated irqs-on.\n"); 5593 } 5594 } 5595 5596 #ifndef CONFIG_PREEMPT_RT 5597 /* 5598 * We dont accurately track softirq state in e.g. 5599 * hardirq contexts (such as on 4KSTACKS), so only 5600 * check if not in hardirq contexts: 5601 */ 5602 if (!hardirq_count()) { 5603 if (softirq_count()) { 5604 /* like the above, but with softirqs */ 5605 DEBUG_LOCKS_WARN_ON(current->softirqs_enabled); 5606 } else { 5607 /* lick the above, does it taste good? */ 5608 DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled); 5609 } 5610 } 5611 #endif 5612 5613 if (!debug_locks) 5614 print_irqtrace_events(current); 5615 5616 instrumentation_end(); 5617 #endif 5618 } 5619 5620 void lock_set_class(struct lockdep_map *lock, const char *name, 5621 struct lock_class_key *key, unsigned int subclass, 5622 unsigned long ip) 5623 { 5624 unsigned long flags; 5625 5626 if (unlikely(!lockdep_enabled())) 5627 return; 5628 5629 raw_local_irq_save(flags); 5630 lockdep_recursion_inc(); 5631 check_flags(flags); 5632 if (__lock_set_class(lock, name, key, subclass, ip)) 5633 check_chain_key(current); 5634 lockdep_recursion_finish(); 5635 raw_local_irq_restore(flags); 5636 } 5637 EXPORT_SYMBOL_GPL(lock_set_class); 5638 5639 void lock_downgrade(struct lockdep_map *lock, unsigned long ip) 5640 { 5641 unsigned long flags; 5642 5643 if (unlikely(!lockdep_enabled())) 5644 return; 5645 5646 raw_local_irq_save(flags); 5647 lockdep_recursion_inc(); 5648 check_flags(flags); 5649 if (__lock_downgrade(lock, ip)) 5650 check_chain_key(current); 5651 lockdep_recursion_finish(); 5652 raw_local_irq_restore(flags); 5653 } 5654 EXPORT_SYMBOL_GPL(lock_downgrade); 5655 5656 /* NMI context !!! */ 5657 static void verify_lock_unused(struct lockdep_map *lock, struct held_lock *hlock, int subclass) 5658 { 5659 #ifdef CONFIG_PROVE_LOCKING 5660 struct lock_class *class = look_up_lock_class(lock, subclass); 5661 unsigned long mask = LOCKF_USED; 5662 5663 /* if it doesn't have a class (yet), it certainly hasn't been used yet */ 5664 if (!class) 5665 return; 5666 5667 /* 5668 * READ locks only conflict with USED, such that if we only ever use 5669 * READ locks, there is no deadlock possible -- RCU. 5670 */ 5671 if (!hlock->read) 5672 mask |= LOCKF_USED_READ; 5673 5674 if (!(class->usage_mask & mask)) 5675 return; 5676 5677 hlock->class_idx = class - lock_classes; 5678 5679 print_usage_bug(current, hlock, LOCK_USED, LOCK_USAGE_STATES); 5680 #endif 5681 } 5682 5683 static bool lockdep_nmi(void) 5684 { 5685 if (raw_cpu_read(lockdep_recursion)) 5686 return false; 5687 5688 if (!in_nmi()) 5689 return false; 5690 5691 return true; 5692 } 5693 5694 /* 5695 * read_lock() is recursive if: 5696 * 1. We force lockdep think this way in selftests or 5697 * 2. The implementation is not queued read/write lock or 5698 * 3. The locker is at an in_interrupt() context. 5699 */ 5700 bool read_lock_is_recursive(void) 5701 { 5702 return force_read_lock_recursive || 5703 !IS_ENABLED(CONFIG_QUEUED_RWLOCKS) || 5704 in_interrupt(); 5705 } 5706 EXPORT_SYMBOL_GPL(read_lock_is_recursive); 5707 5708 /* 5709 * We are not always called with irqs disabled - do that here, 5710 * and also avoid lockdep recursion: 5711 */ 5712 void lock_acquire(struct lockdep_map *lock, unsigned int subclass, 5713 int trylock, int read, int check, 5714 struct lockdep_map *nest_lock, unsigned long ip) 5715 { 5716 unsigned long flags; 5717 5718 trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip); 5719 5720 if (!debug_locks) 5721 return; 5722 5723 if (unlikely(!lockdep_enabled())) { 5724 /* XXX allow trylock from NMI ?!? */ 5725 if (lockdep_nmi() && !trylock) { 5726 struct held_lock hlock; 5727 5728 hlock.acquire_ip = ip; 5729 hlock.instance = lock; 5730 hlock.nest_lock = nest_lock; 5731 hlock.irq_context = 2; // XXX 5732 hlock.trylock = trylock; 5733 hlock.read = read; 5734 hlock.check = check; 5735 hlock.hardirqs_off = true; 5736 hlock.references = 0; 5737 5738 verify_lock_unused(lock, &hlock, subclass); 5739 } 5740 return; 5741 } 5742 5743 raw_local_irq_save(flags); 5744 check_flags(flags); 5745 5746 lockdep_recursion_inc(); 5747 __lock_acquire(lock, subclass, trylock, read, check, 5748 irqs_disabled_flags(flags), nest_lock, ip, 0, 0, 0); 5749 lockdep_recursion_finish(); 5750 raw_local_irq_restore(flags); 5751 } 5752 EXPORT_SYMBOL_GPL(lock_acquire); 5753 5754 void lock_release(struct lockdep_map *lock, unsigned long ip) 5755 { 5756 unsigned long flags; 5757 5758 trace_lock_release(lock, ip); 5759 5760 if (unlikely(!lockdep_enabled())) 5761 return; 5762 5763 raw_local_irq_save(flags); 5764 check_flags(flags); 5765 5766 lockdep_recursion_inc(); 5767 if (__lock_release(lock, ip)) 5768 check_chain_key(current); 5769 lockdep_recursion_finish(); 5770 raw_local_irq_restore(flags); 5771 } 5772 EXPORT_SYMBOL_GPL(lock_release); 5773 5774 /* 5775 * lock_sync() - A special annotation for synchronize_{s,}rcu()-like API. 5776 * 5777 * No actual critical section is created by the APIs annotated with this: these 5778 * APIs are used to wait for one or multiple critical sections (on other CPUs 5779 * or threads), and it means that calling these APIs inside these critical 5780 * sections is potential deadlock. 5781 */ 5782 void lock_sync(struct lockdep_map *lock, unsigned subclass, int read, 5783 int check, struct lockdep_map *nest_lock, unsigned long ip) 5784 { 5785 unsigned long flags; 5786 5787 if (unlikely(!lockdep_enabled())) 5788 return; 5789 5790 raw_local_irq_save(flags); 5791 check_flags(flags); 5792 5793 lockdep_recursion_inc(); 5794 __lock_acquire(lock, subclass, 0, read, check, 5795 irqs_disabled_flags(flags), nest_lock, ip, 0, 0, 1); 5796 check_chain_key(current); 5797 lockdep_recursion_finish(); 5798 raw_local_irq_restore(flags); 5799 } 5800 EXPORT_SYMBOL_GPL(lock_sync); 5801 5802 noinstr int lock_is_held_type(const struct lockdep_map *lock, int read) 5803 { 5804 unsigned long flags; 5805 int ret = LOCK_STATE_NOT_HELD; 5806 5807 /* 5808 * Avoid false negative lockdep_assert_held() and 5809 * lockdep_assert_not_held(). 5810 */ 5811 if (unlikely(!lockdep_enabled())) 5812 return LOCK_STATE_UNKNOWN; 5813 5814 raw_local_irq_save(flags); 5815 check_flags(flags); 5816 5817 lockdep_recursion_inc(); 5818 ret = __lock_is_held(lock, read); 5819 lockdep_recursion_finish(); 5820 raw_local_irq_restore(flags); 5821 5822 return ret; 5823 } 5824 EXPORT_SYMBOL_GPL(lock_is_held_type); 5825 NOKPROBE_SYMBOL(lock_is_held_type); 5826 5827 struct pin_cookie lock_pin_lock(struct lockdep_map *lock) 5828 { 5829 struct pin_cookie cookie = NIL_COOKIE; 5830 unsigned long flags; 5831 5832 if (unlikely(!lockdep_enabled())) 5833 return cookie; 5834 5835 raw_local_irq_save(flags); 5836 check_flags(flags); 5837 5838 lockdep_recursion_inc(); 5839 cookie = __lock_pin_lock(lock); 5840 lockdep_recursion_finish(); 5841 raw_local_irq_restore(flags); 5842 5843 return cookie; 5844 } 5845 EXPORT_SYMBOL_GPL(lock_pin_lock); 5846 5847 void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie cookie) 5848 { 5849 unsigned long flags; 5850 5851 if (unlikely(!lockdep_enabled())) 5852 return; 5853 5854 raw_local_irq_save(flags); 5855 check_flags(flags); 5856 5857 lockdep_recursion_inc(); 5858 __lock_repin_lock(lock, cookie); 5859 lockdep_recursion_finish(); 5860 raw_local_irq_restore(flags); 5861 } 5862 EXPORT_SYMBOL_GPL(lock_repin_lock); 5863 5864 void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie) 5865 { 5866 unsigned long flags; 5867 5868 if (unlikely(!lockdep_enabled())) 5869 return; 5870 5871 raw_local_irq_save(flags); 5872 check_flags(flags); 5873 5874 lockdep_recursion_inc(); 5875 __lock_unpin_lock(lock, cookie); 5876 lockdep_recursion_finish(); 5877 raw_local_irq_restore(flags); 5878 } 5879 EXPORT_SYMBOL_GPL(lock_unpin_lock); 5880 5881 #ifdef CONFIG_LOCK_STAT 5882 static void print_lock_contention_bug(struct task_struct *curr, 5883 struct lockdep_map *lock, 5884 unsigned long ip) 5885 { 5886 if (!debug_locks_off()) 5887 return; 5888 if (debug_locks_silent) 5889 return; 5890 5891 pr_warn("\n"); 5892 pr_warn("=================================\n"); 5893 pr_warn("WARNING: bad contention detected!\n"); 5894 print_kernel_ident(); 5895 pr_warn("---------------------------------\n"); 5896 pr_warn("%s/%d is trying to contend lock (", 5897 curr->comm, task_pid_nr(curr)); 5898 print_lockdep_cache(lock); 5899 pr_cont(") at:\n"); 5900 print_ip_sym(KERN_WARNING, ip); 5901 pr_warn("but there are no locks held!\n"); 5902 pr_warn("\nother info that might help us debug this:\n"); 5903 lockdep_print_held_locks(curr); 5904 5905 pr_warn("\nstack backtrace:\n"); 5906 dump_stack(); 5907 } 5908 5909 static void 5910 __lock_contended(struct lockdep_map *lock, unsigned long ip) 5911 { 5912 struct task_struct *curr = current; 5913 struct held_lock *hlock; 5914 struct lock_class_stats *stats; 5915 unsigned int depth; 5916 int i, contention_point, contending_point; 5917 5918 depth = curr->lockdep_depth; 5919 /* 5920 * Whee, we contended on this lock, except it seems we're not 5921 * actually trying to acquire anything much at all.. 5922 */ 5923 if (DEBUG_LOCKS_WARN_ON(!depth)) 5924 return; 5925 5926 hlock = find_held_lock(curr, lock, depth, &i); 5927 if (!hlock) { 5928 print_lock_contention_bug(curr, lock, ip); 5929 return; 5930 } 5931 5932 if (hlock->instance != lock) 5933 return; 5934 5935 hlock->waittime_stamp = lockstat_clock(); 5936 5937 contention_point = lock_point(hlock_class(hlock)->contention_point, ip); 5938 contending_point = lock_point(hlock_class(hlock)->contending_point, 5939 lock->ip); 5940 5941 stats = get_lock_stats(hlock_class(hlock)); 5942 if (contention_point < LOCKSTAT_POINTS) 5943 stats->contention_point[contention_point]++; 5944 if (contending_point < LOCKSTAT_POINTS) 5945 stats->contending_point[contending_point]++; 5946 if (lock->cpu != smp_processor_id()) 5947 stats->bounces[bounce_contended + !!hlock->read]++; 5948 } 5949 5950 static void 5951 __lock_acquired(struct lockdep_map *lock, unsigned long ip) 5952 { 5953 struct task_struct *curr = current; 5954 struct held_lock *hlock; 5955 struct lock_class_stats *stats; 5956 unsigned int depth; 5957 u64 now, waittime = 0; 5958 int i, cpu; 5959 5960 depth = curr->lockdep_depth; 5961 /* 5962 * Yay, we acquired ownership of this lock we didn't try to 5963 * acquire, how the heck did that happen? 5964 */ 5965 if (DEBUG_LOCKS_WARN_ON(!depth)) 5966 return; 5967 5968 hlock = find_held_lock(curr, lock, depth, &i); 5969 if (!hlock) { 5970 print_lock_contention_bug(curr, lock, _RET_IP_); 5971 return; 5972 } 5973 5974 if (hlock->instance != lock) 5975 return; 5976 5977 cpu = smp_processor_id(); 5978 if (hlock->waittime_stamp) { 5979 now = lockstat_clock(); 5980 waittime = now - hlock->waittime_stamp; 5981 hlock->holdtime_stamp = now; 5982 } 5983 5984 stats = get_lock_stats(hlock_class(hlock)); 5985 if (waittime) { 5986 if (hlock->read) 5987 lock_time_inc(&stats->read_waittime, waittime); 5988 else 5989 lock_time_inc(&stats->write_waittime, waittime); 5990 } 5991 if (lock->cpu != cpu) 5992 stats->bounces[bounce_acquired + !!hlock->read]++; 5993 5994 lock->cpu = cpu; 5995 lock->ip = ip; 5996 } 5997 5998 void lock_contended(struct lockdep_map *lock, unsigned long ip) 5999 { 6000 unsigned long flags; 6001 6002 trace_lock_contended(lock, ip); 6003 6004 if (unlikely(!lock_stat || !lockdep_enabled())) 6005 return; 6006 6007 raw_local_irq_save(flags); 6008 check_flags(flags); 6009 lockdep_recursion_inc(); 6010 __lock_contended(lock, ip); 6011 lockdep_recursion_finish(); 6012 raw_local_irq_restore(flags); 6013 } 6014 EXPORT_SYMBOL_GPL(lock_contended); 6015 6016 void lock_acquired(struct lockdep_map *lock, unsigned long ip) 6017 { 6018 unsigned long flags; 6019 6020 trace_lock_acquired(lock, ip); 6021 6022 if (unlikely(!lock_stat || !lockdep_enabled())) 6023 return; 6024 6025 raw_local_irq_save(flags); 6026 check_flags(flags); 6027 lockdep_recursion_inc(); 6028 __lock_acquired(lock, ip); 6029 lockdep_recursion_finish(); 6030 raw_local_irq_restore(flags); 6031 } 6032 EXPORT_SYMBOL_GPL(lock_acquired); 6033 #endif 6034 6035 /* 6036 * Used by the testsuite, sanitize the validator state 6037 * after a simulated failure: 6038 */ 6039 6040 void lockdep_reset(void) 6041 { 6042 unsigned long flags; 6043 int i; 6044 6045 raw_local_irq_save(flags); 6046 lockdep_init_task(current); 6047 memset(current->held_locks, 0, MAX_LOCK_DEPTH*sizeof(struct held_lock)); 6048 nr_hardirq_chains = 0; 6049 nr_softirq_chains = 0; 6050 nr_process_chains = 0; 6051 debug_locks = 1; 6052 for (i = 0; i < CHAINHASH_SIZE; i++) 6053 INIT_HLIST_HEAD(chainhash_table + i); 6054 raw_local_irq_restore(flags); 6055 } 6056 6057 /* Remove a class from a lock chain. Must be called with the graph lock held. */ 6058 static void remove_class_from_lock_chain(struct pending_free *pf, 6059 struct lock_chain *chain, 6060 struct lock_class *class) 6061 { 6062 #ifdef CONFIG_PROVE_LOCKING 6063 int i; 6064 6065 for (i = chain->base; i < chain->base + chain->depth; i++) { 6066 if (chain_hlock_class_idx(chain_hlocks[i]) != class - lock_classes) 6067 continue; 6068 /* 6069 * Each lock class occurs at most once in a lock chain so once 6070 * we found a match we can break out of this loop. 6071 */ 6072 goto free_lock_chain; 6073 } 6074 /* Since the chain has not been modified, return. */ 6075 return; 6076 6077 free_lock_chain: 6078 free_chain_hlocks(chain->base, chain->depth); 6079 /* Overwrite the chain key for concurrent RCU readers. */ 6080 WRITE_ONCE(chain->chain_key, INITIAL_CHAIN_KEY); 6081 dec_chains(chain->irq_context); 6082 6083 /* 6084 * Note: calling hlist_del_rcu() from inside a 6085 * hlist_for_each_entry_rcu() loop is safe. 6086 */ 6087 hlist_del_rcu(&chain->entry); 6088 __set_bit(chain - lock_chains, pf->lock_chains_being_freed); 6089 nr_zapped_lock_chains++; 6090 #endif 6091 } 6092 6093 /* Must be called with the graph lock held. */ 6094 static void remove_class_from_lock_chains(struct pending_free *pf, 6095 struct lock_class *class) 6096 { 6097 struct lock_chain *chain; 6098 struct hlist_head *head; 6099 int i; 6100 6101 for (i = 0; i < ARRAY_SIZE(chainhash_table); i++) { 6102 head = chainhash_table + i; 6103 hlist_for_each_entry_rcu(chain, head, entry) { 6104 remove_class_from_lock_chain(pf, chain, class); 6105 } 6106 } 6107 } 6108 6109 /* 6110 * Remove all references to a lock class. The caller must hold the graph lock. 6111 */ 6112 static void zap_class(struct pending_free *pf, struct lock_class *class) 6113 { 6114 struct lock_list *entry; 6115 int i; 6116 6117 WARN_ON_ONCE(!class->key); 6118 6119 /* 6120 * Remove all dependencies this lock is 6121 * involved in: 6122 */ 6123 for_each_set_bit(i, list_entries_in_use, ARRAY_SIZE(list_entries)) { 6124 entry = list_entries + i; 6125 if (entry->class != class && entry->links_to != class) 6126 continue; 6127 __clear_bit(i, list_entries_in_use); 6128 nr_list_entries--; 6129 list_del_rcu(&entry->entry); 6130 } 6131 if (list_empty(&class->locks_after) && 6132 list_empty(&class->locks_before)) { 6133 list_move_tail(&class->lock_entry, &pf->zapped); 6134 hlist_del_rcu(&class->hash_entry); 6135 WRITE_ONCE(class->key, NULL); 6136 WRITE_ONCE(class->name, NULL); 6137 nr_lock_classes--; 6138 __clear_bit(class - lock_classes, lock_classes_in_use); 6139 if (class - lock_classes == max_lock_class_idx) 6140 max_lock_class_idx--; 6141 } else { 6142 WARN_ONCE(true, "%s() failed for class %s\n", __func__, 6143 class->name); 6144 } 6145 6146 remove_class_from_lock_chains(pf, class); 6147 nr_zapped_classes++; 6148 } 6149 6150 static void reinit_class(struct lock_class *class) 6151 { 6152 WARN_ON_ONCE(!class->lock_entry.next); 6153 WARN_ON_ONCE(!list_empty(&class->locks_after)); 6154 WARN_ON_ONCE(!list_empty(&class->locks_before)); 6155 memset_startat(class, 0, key); 6156 WARN_ON_ONCE(!class->lock_entry.next); 6157 WARN_ON_ONCE(!list_empty(&class->locks_after)); 6158 WARN_ON_ONCE(!list_empty(&class->locks_before)); 6159 } 6160 6161 static inline int within(const void *addr, void *start, unsigned long size) 6162 { 6163 return addr >= start && addr < start + size; 6164 } 6165 6166 static bool inside_selftest(void) 6167 { 6168 return current == lockdep_selftest_task_struct; 6169 } 6170 6171 /* The caller must hold the graph lock. */ 6172 static struct pending_free *get_pending_free(void) 6173 { 6174 return delayed_free.pf + delayed_free.index; 6175 } 6176 6177 static void free_zapped_rcu(struct rcu_head *cb); 6178 6179 /* 6180 * Schedule an RCU callback if no RCU callback is pending. Must be called with 6181 * the graph lock held. 6182 */ 6183 static void call_rcu_zapped(struct pending_free *pf) 6184 { 6185 WARN_ON_ONCE(inside_selftest()); 6186 6187 if (list_empty(&pf->zapped)) 6188 return; 6189 6190 if (delayed_free.scheduled) 6191 return; 6192 6193 delayed_free.scheduled = true; 6194 6195 WARN_ON_ONCE(delayed_free.pf + delayed_free.index != pf); 6196 delayed_free.index ^= 1; 6197 6198 call_rcu(&delayed_free.rcu_head, free_zapped_rcu); 6199 } 6200 6201 /* The caller must hold the graph lock. May be called from RCU context. */ 6202 static void __free_zapped_classes(struct pending_free *pf) 6203 { 6204 struct lock_class *class; 6205 6206 check_data_structures(); 6207 6208 list_for_each_entry(class, &pf->zapped, lock_entry) 6209 reinit_class(class); 6210 6211 list_splice_init(&pf->zapped, &free_lock_classes); 6212 6213 #ifdef CONFIG_PROVE_LOCKING 6214 bitmap_andnot(lock_chains_in_use, lock_chains_in_use, 6215 pf->lock_chains_being_freed, ARRAY_SIZE(lock_chains)); 6216 bitmap_clear(pf->lock_chains_being_freed, 0, ARRAY_SIZE(lock_chains)); 6217 #endif 6218 } 6219 6220 static void free_zapped_rcu(struct rcu_head *ch) 6221 { 6222 struct pending_free *pf; 6223 unsigned long flags; 6224 6225 if (WARN_ON_ONCE(ch != &delayed_free.rcu_head)) 6226 return; 6227 6228 raw_local_irq_save(flags); 6229 lockdep_lock(); 6230 6231 /* closed head */ 6232 pf = delayed_free.pf + (delayed_free.index ^ 1); 6233 __free_zapped_classes(pf); 6234 delayed_free.scheduled = false; 6235 6236 /* 6237 * If there's anything on the open list, close and start a new callback. 6238 */ 6239 call_rcu_zapped(delayed_free.pf + delayed_free.index); 6240 6241 lockdep_unlock(); 6242 raw_local_irq_restore(flags); 6243 } 6244 6245 /* 6246 * Remove all lock classes from the class hash table and from the 6247 * all_lock_classes list whose key or name is in the address range [start, 6248 * start + size). Move these lock classes to the zapped_classes list. Must 6249 * be called with the graph lock held. 6250 */ 6251 static void __lockdep_free_key_range(struct pending_free *pf, void *start, 6252 unsigned long size) 6253 { 6254 struct lock_class *class; 6255 struct hlist_head *head; 6256 int i; 6257 6258 /* Unhash all classes that were created by a module. */ 6259 for (i = 0; i < CLASSHASH_SIZE; i++) { 6260 head = classhash_table + i; 6261 hlist_for_each_entry_rcu(class, head, hash_entry) { 6262 if (!within(class->key, start, size) && 6263 !within(class->name, start, size)) 6264 continue; 6265 zap_class(pf, class); 6266 } 6267 } 6268 } 6269 6270 /* 6271 * Used in module.c to remove lock classes from memory that is going to be 6272 * freed; and possibly re-used by other modules. 6273 * 6274 * We will have had one synchronize_rcu() before getting here, so we're 6275 * guaranteed nobody will look up these exact classes -- they're properly dead 6276 * but still allocated. 6277 */ 6278 static void lockdep_free_key_range_reg(void *start, unsigned long size) 6279 { 6280 struct pending_free *pf; 6281 unsigned long flags; 6282 6283 init_data_structures_once(); 6284 6285 raw_local_irq_save(flags); 6286 lockdep_lock(); 6287 pf = get_pending_free(); 6288 __lockdep_free_key_range(pf, start, size); 6289 call_rcu_zapped(pf); 6290 lockdep_unlock(); 6291 raw_local_irq_restore(flags); 6292 6293 /* 6294 * Wait for any possible iterators from look_up_lock_class() to pass 6295 * before continuing to free the memory they refer to. 6296 */ 6297 synchronize_rcu(); 6298 } 6299 6300 /* 6301 * Free all lockdep keys in the range [start, start+size). Does not sleep. 6302 * Ignores debug_locks. Must only be used by the lockdep selftests. 6303 */ 6304 static void lockdep_free_key_range_imm(void *start, unsigned long size) 6305 { 6306 struct pending_free *pf = delayed_free.pf; 6307 unsigned long flags; 6308 6309 init_data_structures_once(); 6310 6311 raw_local_irq_save(flags); 6312 lockdep_lock(); 6313 __lockdep_free_key_range(pf, start, size); 6314 __free_zapped_classes(pf); 6315 lockdep_unlock(); 6316 raw_local_irq_restore(flags); 6317 } 6318 6319 void lockdep_free_key_range(void *start, unsigned long size) 6320 { 6321 init_data_structures_once(); 6322 6323 if (inside_selftest()) 6324 lockdep_free_key_range_imm(start, size); 6325 else 6326 lockdep_free_key_range_reg(start, size); 6327 } 6328 6329 /* 6330 * Check whether any element of the @lock->class_cache[] array refers to a 6331 * registered lock class. The caller must hold either the graph lock or the 6332 * RCU read lock. 6333 */ 6334 static bool lock_class_cache_is_registered(struct lockdep_map *lock) 6335 { 6336 struct lock_class *class; 6337 struct hlist_head *head; 6338 int i, j; 6339 6340 for (i = 0; i < CLASSHASH_SIZE; i++) { 6341 head = classhash_table + i; 6342 hlist_for_each_entry_rcu(class, head, hash_entry) { 6343 for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++) 6344 if (lock->class_cache[j] == class) 6345 return true; 6346 } 6347 } 6348 return false; 6349 } 6350 6351 /* The caller must hold the graph lock. Does not sleep. */ 6352 static void __lockdep_reset_lock(struct pending_free *pf, 6353 struct lockdep_map *lock) 6354 { 6355 struct lock_class *class; 6356 int j; 6357 6358 /* 6359 * Remove all classes this lock might have: 6360 */ 6361 for (j = 0; j < MAX_LOCKDEP_SUBCLASSES; j++) { 6362 /* 6363 * If the class exists we look it up and zap it: 6364 */ 6365 class = look_up_lock_class(lock, j); 6366 if (class) 6367 zap_class(pf, class); 6368 } 6369 /* 6370 * Debug check: in the end all mapped classes should 6371 * be gone. 6372 */ 6373 if (WARN_ON_ONCE(lock_class_cache_is_registered(lock))) 6374 debug_locks_off(); 6375 } 6376 6377 /* 6378 * Remove all information lockdep has about a lock if debug_locks == 1. Free 6379 * released data structures from RCU context. 6380 */ 6381 static void lockdep_reset_lock_reg(struct lockdep_map *lock) 6382 { 6383 struct pending_free *pf; 6384 unsigned long flags; 6385 int locked; 6386 6387 raw_local_irq_save(flags); 6388 locked = graph_lock(); 6389 if (!locked) 6390 goto out_irq; 6391 6392 pf = get_pending_free(); 6393 __lockdep_reset_lock(pf, lock); 6394 call_rcu_zapped(pf); 6395 6396 graph_unlock(); 6397 out_irq: 6398 raw_local_irq_restore(flags); 6399 } 6400 6401 /* 6402 * Reset a lock. Does not sleep. Ignores debug_locks. Must only be used by the 6403 * lockdep selftests. 6404 */ 6405 static void lockdep_reset_lock_imm(struct lockdep_map *lock) 6406 { 6407 struct pending_free *pf = delayed_free.pf; 6408 unsigned long flags; 6409 6410 raw_local_irq_save(flags); 6411 lockdep_lock(); 6412 __lockdep_reset_lock(pf, lock); 6413 __free_zapped_classes(pf); 6414 lockdep_unlock(); 6415 raw_local_irq_restore(flags); 6416 } 6417 6418 void lockdep_reset_lock(struct lockdep_map *lock) 6419 { 6420 init_data_structures_once(); 6421 6422 if (inside_selftest()) 6423 lockdep_reset_lock_imm(lock); 6424 else 6425 lockdep_reset_lock_reg(lock); 6426 } 6427 6428 /* 6429 * Unregister a dynamically allocated key. 6430 * 6431 * Unlike lockdep_register_key(), a search is always done to find a matching 6432 * key irrespective of debug_locks to avoid potential invalid access to freed 6433 * memory in lock_class entry. 6434 */ 6435 void lockdep_unregister_key(struct lock_class_key *key) 6436 { 6437 struct hlist_head *hash_head = keyhashentry(key); 6438 struct lock_class_key *k; 6439 struct pending_free *pf; 6440 unsigned long flags; 6441 bool found = false; 6442 6443 might_sleep(); 6444 6445 if (WARN_ON_ONCE(static_obj(key))) 6446 return; 6447 6448 raw_local_irq_save(flags); 6449 lockdep_lock(); 6450 6451 hlist_for_each_entry_rcu(k, hash_head, hash_entry) { 6452 if (k == key) { 6453 hlist_del_rcu(&k->hash_entry); 6454 found = true; 6455 break; 6456 } 6457 } 6458 WARN_ON_ONCE(!found && debug_locks); 6459 if (found) { 6460 pf = get_pending_free(); 6461 __lockdep_free_key_range(pf, key, 1); 6462 call_rcu_zapped(pf); 6463 } 6464 lockdep_unlock(); 6465 raw_local_irq_restore(flags); 6466 6467 /* Wait until is_dynamic_key() has finished accessing k->hash_entry. */ 6468 synchronize_rcu(); 6469 } 6470 EXPORT_SYMBOL_GPL(lockdep_unregister_key); 6471 6472 void __init lockdep_init(void) 6473 { 6474 printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n"); 6475 6476 printk("... MAX_LOCKDEP_SUBCLASSES: %lu\n", MAX_LOCKDEP_SUBCLASSES); 6477 printk("... MAX_LOCK_DEPTH: %lu\n", MAX_LOCK_DEPTH); 6478 printk("... MAX_LOCKDEP_KEYS: %lu\n", MAX_LOCKDEP_KEYS); 6479 printk("... CLASSHASH_SIZE: %lu\n", CLASSHASH_SIZE); 6480 printk("... MAX_LOCKDEP_ENTRIES: %lu\n", MAX_LOCKDEP_ENTRIES); 6481 printk("... MAX_LOCKDEP_CHAINS: %lu\n", MAX_LOCKDEP_CHAINS); 6482 printk("... CHAINHASH_SIZE: %lu\n", CHAINHASH_SIZE); 6483 6484 printk(" memory used by lock dependency info: %zu kB\n", 6485 (sizeof(lock_classes) + 6486 sizeof(lock_classes_in_use) + 6487 sizeof(classhash_table) + 6488 sizeof(list_entries) + 6489 sizeof(list_entries_in_use) + 6490 sizeof(chainhash_table) + 6491 sizeof(delayed_free) 6492 #ifdef CONFIG_PROVE_LOCKING 6493 + sizeof(lock_cq) 6494 + sizeof(lock_chains) 6495 + sizeof(lock_chains_in_use) 6496 + sizeof(chain_hlocks) 6497 #endif 6498 ) / 1024 6499 ); 6500 6501 #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) 6502 printk(" memory used for stack traces: %zu kB\n", 6503 (sizeof(stack_trace) + sizeof(stack_trace_hash)) / 1024 6504 ); 6505 #endif 6506 6507 printk(" per task-struct memory footprint: %zu bytes\n", 6508 sizeof(((struct task_struct *)NULL)->held_locks)); 6509 } 6510 6511 static void 6512 print_freed_lock_bug(struct task_struct *curr, const void *mem_from, 6513 const void *mem_to, struct held_lock *hlock) 6514 { 6515 if (!debug_locks_off()) 6516 return; 6517 if (debug_locks_silent) 6518 return; 6519 6520 pr_warn("\n"); 6521 pr_warn("=========================\n"); 6522 pr_warn("WARNING: held lock freed!\n"); 6523 print_kernel_ident(); 6524 pr_warn("-------------------------\n"); 6525 pr_warn("%s/%d is freeing memory %px-%px, with a lock still held there!\n", 6526 curr->comm, task_pid_nr(curr), mem_from, mem_to-1); 6527 print_lock(hlock); 6528 lockdep_print_held_locks(curr); 6529 6530 pr_warn("\nstack backtrace:\n"); 6531 dump_stack(); 6532 } 6533 6534 static inline int not_in_range(const void* mem_from, unsigned long mem_len, 6535 const void* lock_from, unsigned long lock_len) 6536 { 6537 return lock_from + lock_len <= mem_from || 6538 mem_from + mem_len <= lock_from; 6539 } 6540 6541 /* 6542 * Called when kernel memory is freed (or unmapped), or if a lock 6543 * is destroyed or reinitialized - this code checks whether there is 6544 * any held lock in the memory range of <from> to <to>: 6545 */ 6546 void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len) 6547 { 6548 struct task_struct *curr = current; 6549 struct held_lock *hlock; 6550 unsigned long flags; 6551 int i; 6552 6553 if (unlikely(!debug_locks)) 6554 return; 6555 6556 raw_local_irq_save(flags); 6557 for (i = 0; i < curr->lockdep_depth; i++) { 6558 hlock = curr->held_locks + i; 6559 6560 if (not_in_range(mem_from, mem_len, hlock->instance, 6561 sizeof(*hlock->instance))) 6562 continue; 6563 6564 print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock); 6565 break; 6566 } 6567 raw_local_irq_restore(flags); 6568 } 6569 EXPORT_SYMBOL_GPL(debug_check_no_locks_freed); 6570 6571 static void print_held_locks_bug(void) 6572 { 6573 if (!debug_locks_off()) 6574 return; 6575 if (debug_locks_silent) 6576 return; 6577 6578 pr_warn("\n"); 6579 pr_warn("====================================\n"); 6580 pr_warn("WARNING: %s/%d still has locks held!\n", 6581 current->comm, task_pid_nr(current)); 6582 print_kernel_ident(); 6583 pr_warn("------------------------------------\n"); 6584 lockdep_print_held_locks(current); 6585 pr_warn("\nstack backtrace:\n"); 6586 dump_stack(); 6587 } 6588 6589 void debug_check_no_locks_held(void) 6590 { 6591 if (unlikely(current->lockdep_depth > 0)) 6592 print_held_locks_bug(); 6593 } 6594 EXPORT_SYMBOL_GPL(debug_check_no_locks_held); 6595 6596 #ifdef __KERNEL__ 6597 void debug_show_all_locks(void) 6598 { 6599 struct task_struct *g, *p; 6600 6601 if (unlikely(!debug_locks)) { 6602 pr_warn("INFO: lockdep is turned off.\n"); 6603 return; 6604 } 6605 pr_warn("\nShowing all locks held in the system:\n"); 6606 6607 rcu_read_lock(); 6608 for_each_process_thread(g, p) { 6609 if (!p->lockdep_depth) 6610 continue; 6611 lockdep_print_held_locks(p); 6612 touch_nmi_watchdog(); 6613 touch_all_softlockup_watchdogs(); 6614 } 6615 rcu_read_unlock(); 6616 6617 pr_warn("\n"); 6618 pr_warn("=============================================\n\n"); 6619 } 6620 EXPORT_SYMBOL_GPL(debug_show_all_locks); 6621 #endif 6622 6623 /* 6624 * Careful: only use this function if you are sure that 6625 * the task cannot run in parallel! 6626 */ 6627 void debug_show_held_locks(struct task_struct *task) 6628 { 6629 if (unlikely(!debug_locks)) { 6630 printk("INFO: lockdep is turned off.\n"); 6631 return; 6632 } 6633 lockdep_print_held_locks(task); 6634 } 6635 EXPORT_SYMBOL_GPL(debug_show_held_locks); 6636 6637 asmlinkage __visible void lockdep_sys_exit(void) 6638 { 6639 struct task_struct *curr = current; 6640 6641 if (unlikely(curr->lockdep_depth)) { 6642 if (!debug_locks_off()) 6643 return; 6644 pr_warn("\n"); 6645 pr_warn("================================================\n"); 6646 pr_warn("WARNING: lock held when returning to user space!\n"); 6647 print_kernel_ident(); 6648 pr_warn("------------------------------------------------\n"); 6649 pr_warn("%s/%d is leaving the kernel with locks still held!\n", 6650 curr->comm, curr->pid); 6651 lockdep_print_held_locks(curr); 6652 } 6653 6654 /* 6655 * The lock history for each syscall should be independent. So wipe the 6656 * slate clean on return to userspace. 6657 */ 6658 lockdep_invariant_state(false); 6659 } 6660 6661 void lockdep_rcu_suspicious(const char *file, const int line, const char *s) 6662 { 6663 struct task_struct *curr = current; 6664 int dl = READ_ONCE(debug_locks); 6665 bool rcu = warn_rcu_enter(); 6666 6667 /* Note: the following can be executed concurrently, so be careful. */ 6668 pr_warn("\n"); 6669 pr_warn("=============================\n"); 6670 pr_warn("WARNING: suspicious RCU usage\n"); 6671 print_kernel_ident(); 6672 pr_warn("-----------------------------\n"); 6673 pr_warn("%s:%d %s!\n", file, line, s); 6674 pr_warn("\nother info that might help us debug this:\n\n"); 6675 pr_warn("\n%srcu_scheduler_active = %d, debug_locks = %d\n%s", 6676 !rcu_lockdep_current_cpu_online() 6677 ? "RCU used illegally from offline CPU!\n" 6678 : "", 6679 rcu_scheduler_active, dl, 6680 dl ? "" : "Possible false positive due to lockdep disabling via debug_locks = 0\n"); 6681 6682 /* 6683 * If a CPU is in the RCU-free window in idle (ie: in the section 6684 * between ct_idle_enter() and ct_idle_exit(), then RCU 6685 * considers that CPU to be in an "extended quiescent state", 6686 * which means that RCU will be completely ignoring that CPU. 6687 * Therefore, rcu_read_lock() and friends have absolutely no 6688 * effect on a CPU running in that state. In other words, even if 6689 * such an RCU-idle CPU has called rcu_read_lock(), RCU might well 6690 * delete data structures out from under it. RCU really has no 6691 * choice here: we need to keep an RCU-free window in idle where 6692 * the CPU may possibly enter into low power mode. This way we can 6693 * notice an extended quiescent state to other CPUs that started a grace 6694 * period. Otherwise we would delay any grace period as long as we run 6695 * in the idle task. 6696 * 6697 * So complain bitterly if someone does call rcu_read_lock(), 6698 * rcu_read_lock_bh() and so on from extended quiescent states. 6699 */ 6700 if (!rcu_is_watching()) 6701 pr_warn("RCU used illegally from extended quiescent state!\n"); 6702 6703 lockdep_print_held_locks(curr); 6704 pr_warn("\nstack backtrace:\n"); 6705 dump_stack(); 6706 warn_rcu_exit(rcu); 6707 } 6708 EXPORT_SYMBOL_GPL(lockdep_rcu_suspicious); 6709