Merge tag 'kbuild-v5.2' of git://git.kernel.org/pub/scm/linux/kernel/git/masahiroy...
[sfrench/cifs-2.6.git] / kernel / locking / lockdep.c
1 /*
2  * kernel/lockdep.c
3  *
4  * Runtime locking correctness validator
5  *
6  * Started by Ingo Molnar:
7  *
8  *  Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
9  *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
10  *
11  * this code maps all the lock dependencies as they occur in a live kernel
12  * and will warn about the following classes of locking bugs:
13  *
14  * - lock inversion scenarios
15  * - circular lock dependencies
16  * - hardirq/softirq safe/unsafe locking bugs
17  *
18  * Bugs are reported even if the current locking scenario does not cause
19  * any deadlock at this point.
20  *
21  * I.e. if anytime in the past two locks were taken in a different order,
22  * even if it happened for another task, even if those were different
23  * locks (but of the same class as this lock), this code will detect it.
24  *
25  * Thanks to Arjan van de Ven for coming up with the initial idea of
26  * mapping lock dependencies runtime.
27  */
28 #define DISABLE_BRANCH_PROFILING
29 #include <linux/mutex.h>
30 #include <linux/sched.h>
31 #include <linux/sched/clock.h>
32 #include <linux/sched/task.h>
33 #include <linux/sched/mm.h>
34 #include <linux/delay.h>
35 #include <linux/module.h>
36 #include <linux/proc_fs.h>
37 #include <linux/seq_file.h>
38 #include <linux/spinlock.h>
39 #include <linux/kallsyms.h>
40 #include <linux/interrupt.h>
41 #include <linux/stacktrace.h>
42 #include <linux/debug_locks.h>
43 #include <linux/irqflags.h>
44 #include <linux/utsname.h>
45 #include <linux/hash.h>
46 #include <linux/ftrace.h>
47 #include <linux/stringify.h>
48 #include <linux/bitmap.h>
49 #include <linux/bitops.h>
50 #include <linux/gfp.h>
51 #include <linux/random.h>
52 #include <linux/jhash.h>
53 #include <linux/nmi.h>
54 #include <linux/rcupdate.h>
55 #include <linux/kprobes.h>
56
57 #include <asm/sections.h>
58
59 #include "lockdep_internals.h"
60
61 #define CREATE_TRACE_POINTS
62 #include <trace/events/lock.h>
63
64 #ifdef CONFIG_PROVE_LOCKING
65 int prove_locking = 1;
66 module_param(prove_locking, int, 0644);
67 #else
68 #define prove_locking 0
69 #endif
70
71 #ifdef CONFIG_LOCK_STAT
72 int lock_stat = 1;
73 module_param(lock_stat, int, 0644);
74 #else
75 #define lock_stat 0
76 #endif
77
78 /*
79  * lockdep_lock: protects the lockdep graph, the hashes and the
80  *               class/list/hash allocators.
81  *
82  * This is one of the rare exceptions where it's justified
83  * to use a raw spinlock - we really dont want the spinlock
84  * code to recurse back into the lockdep code...
85  */
86 static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
87 static struct task_struct *lockdep_selftest_task_struct;
88
89 static int graph_lock(void)
90 {
91         arch_spin_lock(&lockdep_lock);
92         /*
93          * Make sure that if another CPU detected a bug while
94          * walking the graph we dont change it (while the other
95          * CPU is busy printing out stuff with the graph lock
96          * dropped already)
97          */
98         if (!debug_locks) {
99                 arch_spin_unlock(&lockdep_lock);
100                 return 0;
101         }
102         /* prevent any recursions within lockdep from causing deadlocks */
103         current->lockdep_recursion++;
104         return 1;
105 }
106
107 static inline int graph_unlock(void)
108 {
109         if (debug_locks && !arch_spin_is_locked(&lockdep_lock)) {
110                 /*
111                  * The lockdep graph lock isn't locked while we expect it to
112                  * be, we're confused now, bye!
113                  */
114                 return DEBUG_LOCKS_WARN_ON(1);
115         }
116
117         current->lockdep_recursion--;
118         arch_spin_unlock(&lockdep_lock);
119         return 0;
120 }
121
122 /*
123  * Turn lock debugging off and return with 0 if it was off already,
124  * and also release the graph lock:
125  */
126 static inline int debug_locks_off_graph_unlock(void)
127 {
128         int ret = debug_locks_off();
129
130         arch_spin_unlock(&lockdep_lock);
131
132         return ret;
133 }
134
135 unsigned long nr_list_entries;
136 static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
137 static DECLARE_BITMAP(list_entries_in_use, MAX_LOCKDEP_ENTRIES);
138
139 /*
140  * All data structures here are protected by the global debug_lock.
141  *
142  * nr_lock_classes is the number of elements of lock_classes[] that is
143  * in use.
144  */
145 #define KEYHASH_BITS            (MAX_LOCKDEP_KEYS_BITS - 1)
146 #define KEYHASH_SIZE            (1UL << KEYHASH_BITS)
147 static struct hlist_head lock_keys_hash[KEYHASH_SIZE];
148 unsigned long nr_lock_classes;
149 #ifndef CONFIG_DEBUG_LOCKDEP
150 static
151 #endif
152 struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
153
154 static inline struct lock_class *hlock_class(struct held_lock *hlock)
155 {
156         if (!hlock->class_idx) {
157                 /*
158                  * Someone passed in garbage, we give up.
159                  */
160                 DEBUG_LOCKS_WARN_ON(1);
161                 return NULL;
162         }
163         return lock_classes + hlock->class_idx - 1;
164 }
165
166 #ifdef CONFIG_LOCK_STAT
167 static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], cpu_lock_stats);
168
169 static inline u64 lockstat_clock(void)
170 {
171         return local_clock();
172 }
173
174 static int lock_point(unsigned long points[], unsigned long ip)
175 {
176         int i;
177
178         for (i = 0; i < LOCKSTAT_POINTS; i++) {
179                 if (points[i] == 0) {
180                         points[i] = ip;
181                         break;
182                 }
183                 if (points[i] == ip)
184                         break;
185         }
186
187         return i;
188 }
189
190 static void lock_time_inc(struct lock_time *lt, u64 time)
191 {
192         if (time > lt->max)
193                 lt->max = time;
194
195         if (time < lt->min || !lt->nr)
196                 lt->min = time;
197
198         lt->total += time;
199         lt->nr++;
200 }
201
202 static inline void lock_time_add(struct lock_time *src, struct lock_time *dst)
203 {
204         if (!src->nr)
205                 return;
206
207         if (src->max > dst->max)
208                 dst->max = src->max;
209
210         if (src->min < dst->min || !dst->nr)
211                 dst->min = src->min;
212
213         dst->total += src->total;
214         dst->nr += src->nr;
215 }
216
217 struct lock_class_stats lock_stats(struct lock_class *class)
218 {
219         struct lock_class_stats stats;
220         int cpu, i;
221
222         memset(&stats, 0, sizeof(struct lock_class_stats));
223         for_each_possible_cpu(cpu) {
224                 struct lock_class_stats *pcs =
225                         &per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
226
227                 for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++)
228                         stats.contention_point[i] += pcs->contention_point[i];
229
230                 for (i = 0; i < ARRAY_SIZE(stats.contending_point); i++)
231                         stats.contending_point[i] += pcs->contending_point[i];
232
233                 lock_time_add(&pcs->read_waittime, &stats.read_waittime);
234                 lock_time_add(&pcs->write_waittime, &stats.write_waittime);
235
236                 lock_time_add(&pcs->read_holdtime, &stats.read_holdtime);
237                 lock_time_add(&pcs->write_holdtime, &stats.write_holdtime);
238
239                 for (i = 0; i < ARRAY_SIZE(stats.bounces); i++)
240                         stats.bounces[i] += pcs->bounces[i];
241         }
242
243         return stats;
244 }
245
246 void clear_lock_stats(struct lock_class *class)
247 {
248         int cpu;
249
250         for_each_possible_cpu(cpu) {
251                 struct lock_class_stats *cpu_stats =
252                         &per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
253
254                 memset(cpu_stats, 0, sizeof(struct lock_class_stats));
255         }
256         memset(class->contention_point, 0, sizeof(class->contention_point));
257         memset(class->contending_point, 0, sizeof(class->contending_point));
258 }
259
260 static struct lock_class_stats *get_lock_stats(struct lock_class *class)
261 {
262         return &this_cpu_ptr(cpu_lock_stats)[class - lock_classes];
263 }
264
265 static void lock_release_holdtime(struct held_lock *hlock)
266 {
267         struct lock_class_stats *stats;
268         u64 holdtime;
269
270         if (!lock_stat)
271                 return;
272
273         holdtime = lockstat_clock() - hlock->holdtime_stamp;
274
275         stats = get_lock_stats(hlock_class(hlock));
276         if (hlock->read)
277                 lock_time_inc(&stats->read_holdtime, holdtime);
278         else
279                 lock_time_inc(&stats->write_holdtime, holdtime);
280 }
281 #else
282 static inline void lock_release_holdtime(struct held_lock *hlock)
283 {
284 }
285 #endif
286
287 /*
288  * We keep a global list of all lock classes. The list is only accessed with
289  * the lockdep spinlock lock held. free_lock_classes is a list with free
290  * elements. These elements are linked together by the lock_entry member in
291  * struct lock_class.
292  */
293 LIST_HEAD(all_lock_classes);
294 static LIST_HEAD(free_lock_classes);
295
296 /**
297  * struct pending_free - information about data structures about to be freed
298  * @zapped: Head of a list with struct lock_class elements.
299  * @lock_chains_being_freed: Bitmap that indicates which lock_chains[] elements
300  *      are about to be freed.
301  */
302 struct pending_free {
303         struct list_head zapped;
304         DECLARE_BITMAP(lock_chains_being_freed, MAX_LOCKDEP_CHAINS);
305 };
306
307 /**
308  * struct delayed_free - data structures used for delayed freeing
309  *
310  * A data structure for delayed freeing of data structures that may be
311  * accessed by RCU readers at the time these were freed.
312  *
313  * @rcu_head:  Used to schedule an RCU callback for freeing data structures.
314  * @index:     Index of @pf to which freed data structures are added.
315  * @scheduled: Whether or not an RCU callback has been scheduled.
316  * @pf:        Array with information about data structures about to be freed.
317  */
318 static struct delayed_free {
319         struct rcu_head         rcu_head;
320         int                     index;
321         int                     scheduled;
322         struct pending_free     pf[2];
323 } delayed_free;
324
325 /*
326  * The lockdep classes are in a hash-table as well, for fast lookup:
327  */
328 #define CLASSHASH_BITS          (MAX_LOCKDEP_KEYS_BITS - 1)
329 #define CLASSHASH_SIZE          (1UL << CLASSHASH_BITS)
330 #define __classhashfn(key)      hash_long((unsigned long)key, CLASSHASH_BITS)
331 #define classhashentry(key)     (classhash_table + __classhashfn((key)))
332
333 static struct hlist_head classhash_table[CLASSHASH_SIZE];
334
335 /*
336  * We put the lock dependency chains into a hash-table as well, to cache
337  * their existence:
338  */
339 #define CHAINHASH_BITS          (MAX_LOCKDEP_CHAINS_BITS-1)
340 #define CHAINHASH_SIZE          (1UL << CHAINHASH_BITS)
341 #define __chainhashfn(chain)    hash_long(chain, CHAINHASH_BITS)
342 #define chainhashentry(chain)   (chainhash_table + __chainhashfn((chain)))
343
344 static struct hlist_head chainhash_table[CHAINHASH_SIZE];
345
346 /*
347  * The hash key of the lock dependency chains is a hash itself too:
348  * it's a hash of all locks taken up to that lock, including that lock.
349  * It's a 64-bit hash, because it's important for the keys to be
350  * unique.
351  */
352 static inline u64 iterate_chain_key(u64 key, u32 idx)
353 {
354         u32 k0 = key, k1 = key >> 32;
355
356         __jhash_mix(idx, k0, k1); /* Macro that modifies arguments! */
357
358         return k0 | (u64)k1 << 32;
359 }
360
361 void lockdep_off(void)
362 {
363         current->lockdep_recursion++;
364 }
365 EXPORT_SYMBOL(lockdep_off);
366
367 void lockdep_on(void)
368 {
369         current->lockdep_recursion--;
370 }
371 EXPORT_SYMBOL(lockdep_on);
372
373 void lockdep_set_selftest_task(struct task_struct *task)
374 {
375         lockdep_selftest_task_struct = task;
376 }
377
378 /*
379  * Debugging switches:
380  */
381
382 #define VERBOSE                 0
383 #define VERY_VERBOSE            0
384
385 #if VERBOSE
386 # define HARDIRQ_VERBOSE        1
387 # define SOFTIRQ_VERBOSE        1
388 #else
389 # define HARDIRQ_VERBOSE        0
390 # define SOFTIRQ_VERBOSE        0
391 #endif
392
393 #if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE
394 /*
395  * Quick filtering for interesting events:
396  */
397 static int class_filter(struct lock_class *class)
398 {
399 #if 0
400         /* Example */
401         if (class->name_version == 1 &&
402                         !strcmp(class->name, "lockname"))
403                 return 1;
404         if (class->name_version == 1 &&
405                         !strcmp(class->name, "&struct->lockfield"))
406                 return 1;
407 #endif
408         /* Filter everything else. 1 would be to allow everything else */
409         return 0;
410 }
411 #endif
412
413 static int verbose(struct lock_class *class)
414 {
415 #if VERBOSE
416         return class_filter(class);
417 #endif
418         return 0;
419 }
420
421 /*
422  * Stack-trace: tightly packed array of stack backtrace
423  * addresses. Protected by the graph_lock.
424  */
425 unsigned long nr_stack_trace_entries;
426 static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES];
427
428 static void print_lockdep_off(const char *bug_msg)
429 {
430         printk(KERN_DEBUG "%s\n", bug_msg);
431         printk(KERN_DEBUG "turning off the locking correctness validator.\n");
432 #ifdef CONFIG_LOCK_STAT
433         printk(KERN_DEBUG "Please attach the output of /proc/lock_stat to the bug report\n");
434 #endif
435 }
436
437 static int save_trace(struct lock_trace *trace)
438 {
439         unsigned long *entries = stack_trace + nr_stack_trace_entries;
440         unsigned int max_entries;
441
442         trace->offset = nr_stack_trace_entries;
443         max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries;
444         trace->nr_entries = stack_trace_save(entries, max_entries, 3);
445         nr_stack_trace_entries += trace->nr_entries;
446
447         if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES-1) {
448                 if (!debug_locks_off_graph_unlock())
449                         return 0;
450
451                 print_lockdep_off("BUG: MAX_STACK_TRACE_ENTRIES too low!");
452                 dump_stack();
453
454                 return 0;
455         }
456
457         return 1;
458 }
459
460 unsigned int nr_hardirq_chains;
461 unsigned int nr_softirq_chains;
462 unsigned int nr_process_chains;
463 unsigned int max_lockdep_depth;
464
465 #ifdef CONFIG_DEBUG_LOCKDEP
466 /*
467  * Various lockdep statistics:
468  */
469 DEFINE_PER_CPU(struct lockdep_stats, lockdep_stats);
470 #endif
471
472 /*
473  * Locking printouts:
474  */
475
476 #define __USAGE(__STATE)                                                \
477         [LOCK_USED_IN_##__STATE] = "IN-"__stringify(__STATE)"-W",       \
478         [LOCK_ENABLED_##__STATE] = __stringify(__STATE)"-ON-W",         \
479         [LOCK_USED_IN_##__STATE##_READ] = "IN-"__stringify(__STATE)"-R",\
480         [LOCK_ENABLED_##__STATE##_READ] = __stringify(__STATE)"-ON-R",
481
482 static const char *usage_str[] =
483 {
484 #define LOCKDEP_STATE(__STATE) __USAGE(__STATE)
485 #include "lockdep_states.h"
486 #undef LOCKDEP_STATE
487         [LOCK_USED] = "INITIAL USE",
488 };
489
490 const char * __get_key_name(struct lockdep_subclass_key *key, char *str)
491 {
492         return kallsyms_lookup((unsigned long)key, NULL, NULL, NULL, str);
493 }
494
495 static inline unsigned long lock_flag(enum lock_usage_bit bit)
496 {
497         return 1UL << bit;
498 }
499
500 static char get_usage_char(struct lock_class *class, enum lock_usage_bit bit)
501 {
502         char c = '.';
503
504         if (class->usage_mask & lock_flag(bit + LOCK_USAGE_DIR_MASK))
505                 c = '+';
506         if (class->usage_mask & lock_flag(bit)) {
507                 c = '-';
508                 if (class->usage_mask & lock_flag(bit + LOCK_USAGE_DIR_MASK))
509                         c = '?';
510         }
511
512         return c;
513 }
514
515 void get_usage_chars(struct lock_class *class, char usage[LOCK_USAGE_CHARS])
516 {
517         int i = 0;
518
519 #define LOCKDEP_STATE(__STATE)                                          \
520         usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE);     \
521         usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE##_READ);
522 #include "lockdep_states.h"
523 #undef LOCKDEP_STATE
524
525         usage[i] = '\0';
526 }
527
528 static void __print_lock_name(struct lock_class *class)
529 {
530         char str[KSYM_NAME_LEN];
531         const char *name;
532
533         name = class->name;
534         if (!name) {
535                 name = __get_key_name(class->key, str);
536                 printk(KERN_CONT "%s", name);
537         } else {
538                 printk(KERN_CONT "%s", name);
539                 if (class->name_version > 1)
540                         printk(KERN_CONT "#%d", class->name_version);
541                 if (class->subclass)
542                         printk(KERN_CONT "/%d", class->subclass);
543         }
544 }
545
546 static void print_lock_name(struct lock_class *class)
547 {
548         char usage[LOCK_USAGE_CHARS];
549
550         get_usage_chars(class, usage);
551
552         printk(KERN_CONT " (");
553         __print_lock_name(class);
554         printk(KERN_CONT "){%s}", usage);
555 }
556
557 static void print_lockdep_cache(struct lockdep_map *lock)
558 {
559         const char *name;
560         char str[KSYM_NAME_LEN];
561
562         name = lock->name;
563         if (!name)
564                 name = __get_key_name(lock->key->subkeys, str);
565
566         printk(KERN_CONT "%s", name);
567 }
568
569 static void print_lock(struct held_lock *hlock)
570 {
571         /*
572          * We can be called locklessly through debug_show_all_locks() so be
573          * extra careful, the hlock might have been released and cleared.
574          */
575         unsigned int class_idx = hlock->class_idx;
576
577         /* Don't re-read hlock->class_idx, can't use READ_ONCE() on bitfields: */
578         barrier();
579
580         if (!class_idx || (class_idx - 1) >= MAX_LOCKDEP_KEYS) {
581                 printk(KERN_CONT "<RELEASED>\n");
582                 return;
583         }
584
585         printk(KERN_CONT "%p", hlock->instance);
586         print_lock_name(lock_classes + class_idx - 1);
587         printk(KERN_CONT ", at: %pS\n", (void *)hlock->acquire_ip);
588 }
589
590 static void lockdep_print_held_locks(struct task_struct *p)
591 {
592         int i, depth = READ_ONCE(p->lockdep_depth);
593
594         if (!depth)
595                 printk("no locks held by %s/%d.\n", p->comm, task_pid_nr(p));
596         else
597                 printk("%d lock%s held by %s/%d:\n", depth,
598                        depth > 1 ? "s" : "", p->comm, task_pid_nr(p));
599         /*
600          * It's not reliable to print a task's held locks if it's not sleeping
601          * and it's not the current task.
602          */
603         if (p->state == TASK_RUNNING && p != current)
604                 return;
605         for (i = 0; i < depth; i++) {
606                 printk(" #%d: ", i);
607                 print_lock(p->held_locks + i);
608         }
609 }
610
611 static void print_kernel_ident(void)
612 {
613         printk("%s %.*s %s\n", init_utsname()->release,
614                 (int)strcspn(init_utsname()->version, " "),
615                 init_utsname()->version,
616                 print_tainted());
617 }
618
619 static int very_verbose(struct lock_class *class)
620 {
621 #if VERY_VERBOSE
622         return class_filter(class);
623 #endif
624         return 0;
625 }
626
627 /*
628  * Is this the address of a static object:
629  */
630 #ifdef __KERNEL__
631 static int static_obj(const void *obj)
632 {
633         unsigned long start = (unsigned long) &_stext,
634                       end   = (unsigned long) &_end,
635                       addr  = (unsigned long) obj;
636
637         if (arch_is_kernel_initmem_freed(addr))
638                 return 0;
639
640         /*
641          * static variable?
642          */
643         if ((addr >= start) && (addr < end))
644                 return 1;
645
646         if (arch_is_kernel_data(addr))
647                 return 1;
648
649         /*
650          * in-kernel percpu var?
651          */
652         if (is_kernel_percpu_address(addr))
653                 return 1;
654
655         /*
656          * module static or percpu var?
657          */
658         return is_module_address(addr) || is_module_percpu_address(addr);
659 }
660 #endif
661
662 /*
663  * To make lock name printouts unique, we calculate a unique
664  * class->name_version generation counter. The caller must hold the graph
665  * lock.
666  */
667 static int count_matching_names(struct lock_class *new_class)
668 {
669         struct lock_class *class;
670         int count = 0;
671
672         if (!new_class->name)
673                 return 0;
674
675         list_for_each_entry(class, &all_lock_classes, lock_entry) {
676                 if (new_class->key - new_class->subclass == class->key)
677                         return class->name_version;
678                 if (class->name && !strcmp(class->name, new_class->name))
679                         count = max(count, class->name_version);
680         }
681
682         return count + 1;
683 }
684
685 static inline struct lock_class *
686 look_up_lock_class(const struct lockdep_map *lock, unsigned int subclass)
687 {
688         struct lockdep_subclass_key *key;
689         struct hlist_head *hash_head;
690         struct lock_class *class;
691
692         if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
693                 debug_locks_off();
694                 printk(KERN_ERR
695                         "BUG: looking up invalid subclass: %u\n", subclass);
696                 printk(KERN_ERR
697                         "turning off the locking correctness validator.\n");
698                 dump_stack();
699                 return NULL;
700         }
701
702         /*
703          * If it is not initialised then it has never been locked,
704          * so it won't be present in the hash table.
705          */
706         if (unlikely(!lock->key))
707                 return NULL;
708
709         /*
710          * NOTE: the class-key must be unique. For dynamic locks, a static
711          * lock_class_key variable is passed in through the mutex_init()
712          * (or spin_lock_init()) call - which acts as the key. For static
713          * locks we use the lock object itself as the key.
714          */
715         BUILD_BUG_ON(sizeof(struct lock_class_key) >
716                         sizeof(struct lockdep_map));
717
718         key = lock->key->subkeys + subclass;
719
720         hash_head = classhashentry(key);
721
722         /*
723          * We do an RCU walk of the hash, see lockdep_free_key_range().
724          */
725         if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
726                 return NULL;
727
728         hlist_for_each_entry_rcu(class, hash_head, hash_entry) {
729                 if (class->key == key) {
730                         /*
731                          * Huh! same key, different name? Did someone trample
732                          * on some memory? We're most confused.
733                          */
734                         WARN_ON_ONCE(class->name != lock->name);
735                         return class;
736                 }
737         }
738
739         return NULL;
740 }
741
742 /*
743  * Static locks do not have their class-keys yet - for them the key is
744  * the lock object itself. If the lock is in the per cpu area, the
745  * canonical address of the lock (per cpu offset removed) is used.
746  */
747 static bool assign_lock_key(struct lockdep_map *lock)
748 {
749         unsigned long can_addr, addr = (unsigned long)lock;
750
751 #ifdef __KERNEL__
752         /*
753          * lockdep_free_key_range() assumes that struct lock_class_key
754          * objects do not overlap. Since we use the address of lock
755          * objects as class key for static objects, check whether the
756          * size of lock_class_key objects does not exceed the size of
757          * the smallest lock object.
758          */
759         BUILD_BUG_ON(sizeof(struct lock_class_key) > sizeof(raw_spinlock_t));
760 #endif
761
762         if (__is_kernel_percpu_address(addr, &can_addr))
763                 lock->key = (void *)can_addr;
764         else if (__is_module_percpu_address(addr, &can_addr))
765                 lock->key = (void *)can_addr;
766         else if (static_obj(lock))
767                 lock->key = (void *)lock;
768         else {
769                 /* Debug-check: all keys must be persistent! */
770                 debug_locks_off();
771                 pr_err("INFO: trying to register non-static key.\n");
772                 pr_err("the code is fine but needs lockdep annotation.\n");
773                 pr_err("turning off the locking correctness validator.\n");
774                 dump_stack();
775                 return false;
776         }
777
778         return true;
779 }
780
781 #ifdef CONFIG_DEBUG_LOCKDEP
782
783 /* Check whether element @e occurs in list @h */
784 static bool in_list(struct list_head *e, struct list_head *h)
785 {
786         struct list_head *f;
787
788         list_for_each(f, h) {
789                 if (e == f)
790                         return true;
791         }
792
793         return false;
794 }
795
796 /*
797  * Check whether entry @e occurs in any of the locks_after or locks_before
798  * lists.
799  */
800 static bool in_any_class_list(struct list_head *e)
801 {
802         struct lock_class *class;
803         int i;
804
805         for (i = 0; i < ARRAY_SIZE(lock_classes); i++) {
806                 class = &lock_classes[i];
807                 if (in_list(e, &class->locks_after) ||
808                     in_list(e, &class->locks_before))
809                         return true;
810         }
811         return false;
812 }
813
814 static bool class_lock_list_valid(struct lock_class *c, struct list_head *h)
815 {
816         struct lock_list *e;
817
818         list_for_each_entry(e, h, entry) {
819                 if (e->links_to != c) {
820                         printk(KERN_INFO "class %s: mismatch for lock entry %ld; class %s <> %s",
821                                c->name ? : "(?)",
822                                (unsigned long)(e - list_entries),
823                                e->links_to && e->links_to->name ?
824                                e->links_to->name : "(?)",
825                                e->class && e->class->name ? e->class->name :
826                                "(?)");
827                         return false;
828                 }
829         }
830         return true;
831 }
832
833 #ifdef CONFIG_PROVE_LOCKING
834 static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS];
835 #endif
836
837 static bool check_lock_chain_key(struct lock_chain *chain)
838 {
839 #ifdef CONFIG_PROVE_LOCKING
840         u64 chain_key = 0;
841         int i;
842
843         for (i = chain->base; i < chain->base + chain->depth; i++)
844                 chain_key = iterate_chain_key(chain_key, chain_hlocks[i] + 1);
845         /*
846          * The 'unsigned long long' casts avoid that a compiler warning
847          * is reported when building tools/lib/lockdep.
848          */
849         if (chain->chain_key != chain_key) {
850                 printk(KERN_INFO "chain %lld: key %#llx <> %#llx\n",
851                        (unsigned long long)(chain - lock_chains),
852                        (unsigned long long)chain->chain_key,
853                        (unsigned long long)chain_key);
854                 return false;
855         }
856 #endif
857         return true;
858 }
859
860 static bool in_any_zapped_class_list(struct lock_class *class)
861 {
862         struct pending_free *pf;
863         int i;
864
865         for (i = 0, pf = delayed_free.pf; i < ARRAY_SIZE(delayed_free.pf); i++, pf++) {
866                 if (in_list(&class->lock_entry, &pf->zapped))
867                         return true;
868         }
869
870         return false;
871 }
872
873 static bool __check_data_structures(void)
874 {
875         struct lock_class *class;
876         struct lock_chain *chain;
877         struct hlist_head *head;
878         struct lock_list *e;
879         int i;
880
881         /* Check whether all classes occur in a lock list. */
882         for (i = 0; i < ARRAY_SIZE(lock_classes); i++) {
883                 class = &lock_classes[i];
884                 if (!in_list(&class->lock_entry, &all_lock_classes) &&
885                     !in_list(&class->lock_entry, &free_lock_classes) &&
886                     !in_any_zapped_class_list(class)) {
887                         printk(KERN_INFO "class %px/%s is not in any class list\n",
888                                class, class->name ? : "(?)");
889                         return false;
890                 }
891         }
892
893         /* Check whether all classes have valid lock lists. */
894         for (i = 0; i < ARRAY_SIZE(lock_classes); i++) {
895                 class = &lock_classes[i];
896                 if (!class_lock_list_valid(class, &class->locks_before))
897                         return false;
898                 if (!class_lock_list_valid(class, &class->locks_after))
899                         return false;
900         }
901
902         /* Check the chain_key of all lock chains. */
903         for (i = 0; i < ARRAY_SIZE(chainhash_table); i++) {
904                 head = chainhash_table + i;
905                 hlist_for_each_entry_rcu(chain, head, entry) {
906                         if (!check_lock_chain_key(chain))
907                                 return false;
908                 }
909         }
910
911         /*
912          * Check whether all list entries that are in use occur in a class
913          * lock list.
914          */
915         for_each_set_bit(i, list_entries_in_use, ARRAY_SIZE(list_entries)) {
916                 e = list_entries + i;
917                 if (!in_any_class_list(&e->entry)) {
918                         printk(KERN_INFO "list entry %d is not in any class list; class %s <> %s\n",
919                                (unsigned int)(e - list_entries),
920                                e->class->name ? : "(?)",
921                                e->links_to->name ? : "(?)");
922                         return false;
923                 }
924         }
925
926         /*
927          * Check whether all list entries that are not in use do not occur in
928          * a class lock list.
929          */
930         for_each_clear_bit(i, list_entries_in_use, ARRAY_SIZE(list_entries)) {
931                 e = list_entries + i;
932                 if (in_any_class_list(&e->entry)) {
933                         printk(KERN_INFO "list entry %d occurs in a class list; class %s <> %s\n",
934                                (unsigned int)(e - list_entries),
935                                e->class && e->class->name ? e->class->name :
936                                "(?)",
937                                e->links_to && e->links_to->name ?
938                                e->links_to->name : "(?)");
939                         return false;
940                 }
941         }
942
943         return true;
944 }
945
946 int check_consistency = 0;
947 module_param(check_consistency, int, 0644);
948
949 static void check_data_structures(void)
950 {
951         static bool once = false;
952
953         if (check_consistency && !once) {
954                 if (!__check_data_structures()) {
955                         once = true;
956                         WARN_ON(once);
957                 }
958         }
959 }
960
961 #else /* CONFIG_DEBUG_LOCKDEP */
962
963 static inline void check_data_structures(void) { }
964
965 #endif /* CONFIG_DEBUG_LOCKDEP */
966
967 /*
968  * Initialize the lock_classes[] array elements, the free_lock_classes list
969  * and also the delayed_free structure.
970  */
971 static void init_data_structures_once(void)
972 {
973         static bool ds_initialized, rcu_head_initialized;
974         int i;
975
976         if (likely(rcu_head_initialized))
977                 return;
978
979         if (system_state >= SYSTEM_SCHEDULING) {
980                 init_rcu_head(&delayed_free.rcu_head);
981                 rcu_head_initialized = true;
982         }
983
984         if (ds_initialized)
985                 return;
986
987         ds_initialized = true;
988
989         INIT_LIST_HEAD(&delayed_free.pf[0].zapped);
990         INIT_LIST_HEAD(&delayed_free.pf[1].zapped);
991
992         for (i = 0; i < ARRAY_SIZE(lock_classes); i++) {
993                 list_add_tail(&lock_classes[i].lock_entry, &free_lock_classes);
994                 INIT_LIST_HEAD(&lock_classes[i].locks_after);
995                 INIT_LIST_HEAD(&lock_classes[i].locks_before);
996         }
997 }
998
999 static inline struct hlist_head *keyhashentry(const struct lock_class_key *key)
1000 {
1001         unsigned long hash = hash_long((uintptr_t)key, KEYHASH_BITS);
1002
1003         return lock_keys_hash + hash;
1004 }
1005
1006 /* Register a dynamically allocated key. */
1007 void lockdep_register_key(struct lock_class_key *key)
1008 {
1009         struct hlist_head *hash_head;
1010         struct lock_class_key *k;
1011         unsigned long flags;
1012
1013         if (WARN_ON_ONCE(static_obj(key)))
1014                 return;
1015         hash_head = keyhashentry(key);
1016
1017         raw_local_irq_save(flags);
1018         if (!graph_lock())
1019                 goto restore_irqs;
1020         hlist_for_each_entry_rcu(k, hash_head, hash_entry) {
1021                 if (WARN_ON_ONCE(k == key))
1022                         goto out_unlock;
1023         }
1024         hlist_add_head_rcu(&key->hash_entry, hash_head);
1025 out_unlock:
1026         graph_unlock();
1027 restore_irqs:
1028         raw_local_irq_restore(flags);
1029 }
1030 EXPORT_SYMBOL_GPL(lockdep_register_key);
1031
1032 /* Check whether a key has been registered as a dynamic key. */
1033 static bool is_dynamic_key(const struct lock_class_key *key)
1034 {
1035         struct hlist_head *hash_head;
1036         struct lock_class_key *k;
1037         bool found = false;
1038
1039         if (WARN_ON_ONCE(static_obj(key)))
1040                 return false;
1041
1042         /*
1043          * If lock debugging is disabled lock_keys_hash[] may contain
1044          * pointers to memory that has already been freed. Avoid triggering
1045          * a use-after-free in that case by returning early.
1046          */
1047         if (!debug_locks)
1048                 return true;
1049
1050         hash_head = keyhashentry(key);
1051
1052         rcu_read_lock();
1053         hlist_for_each_entry_rcu(k, hash_head, hash_entry) {
1054                 if (k == key) {
1055                         found = true;
1056                         break;
1057                 }
1058         }
1059         rcu_read_unlock();
1060
1061         return found;
1062 }
1063
1064 /*
1065  * Register a lock's class in the hash-table, if the class is not present
1066  * yet. Otherwise we look it up. We cache the result in the lock object
1067  * itself, so actual lookup of the hash should be once per lock object.
1068  */
1069 static struct lock_class *
1070 register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
1071 {
1072         struct lockdep_subclass_key *key;
1073         struct hlist_head *hash_head;
1074         struct lock_class *class;
1075
1076         DEBUG_LOCKS_WARN_ON(!irqs_disabled());
1077
1078         class = look_up_lock_class(lock, subclass);
1079         if (likely(class))
1080                 goto out_set_class_cache;
1081
1082         if (!lock->key) {
1083                 if (!assign_lock_key(lock))
1084                         return NULL;
1085         } else if (!static_obj(lock->key) && !is_dynamic_key(lock->key)) {
1086                 return NULL;
1087         }
1088
1089         key = lock->key->subkeys + subclass;
1090         hash_head = classhashentry(key);
1091
1092         if (!graph_lock()) {
1093                 return NULL;
1094         }
1095         /*
1096          * We have to do the hash-walk again, to avoid races
1097          * with another CPU:
1098          */
1099         hlist_for_each_entry_rcu(class, hash_head, hash_entry) {
1100                 if (class->key == key)
1101                         goto out_unlock_set;
1102         }
1103
1104         init_data_structures_once();
1105
1106         /* Allocate a new lock class and add it to the hash. */
1107         class = list_first_entry_or_null(&free_lock_classes, typeof(*class),
1108                                          lock_entry);
1109         if (!class) {
1110                 if (!debug_locks_off_graph_unlock()) {
1111                         return NULL;
1112                 }
1113
1114                 print_lockdep_off("BUG: MAX_LOCKDEP_KEYS too low!");
1115                 dump_stack();
1116                 return NULL;
1117         }
1118         nr_lock_classes++;
1119         debug_atomic_inc(nr_unused_locks);
1120         class->key = key;
1121         class->name = lock->name;
1122         class->subclass = subclass;
1123         WARN_ON_ONCE(!list_empty(&class->locks_before));
1124         WARN_ON_ONCE(!list_empty(&class->locks_after));
1125         class->name_version = count_matching_names(class);
1126         /*
1127          * We use RCU's safe list-add method to make
1128          * parallel walking of the hash-list safe:
1129          */
1130         hlist_add_head_rcu(&class->hash_entry, hash_head);
1131         /*
1132          * Remove the class from the free list and add it to the global list
1133          * of classes.
1134          */
1135         list_move_tail(&class->lock_entry, &all_lock_classes);
1136
1137         if (verbose(class)) {
1138                 graph_unlock();
1139
1140                 printk("\nnew class %px: %s", class->key, class->name);
1141                 if (class->name_version > 1)
1142                         printk(KERN_CONT "#%d", class->name_version);
1143                 printk(KERN_CONT "\n");
1144                 dump_stack();
1145
1146                 if (!graph_lock()) {
1147                         return NULL;
1148                 }
1149         }
1150 out_unlock_set:
1151         graph_unlock();
1152
1153 out_set_class_cache:
1154         if (!subclass || force)
1155                 lock->class_cache[0] = class;
1156         else if (subclass < NR_LOCKDEP_CACHING_CLASSES)
1157                 lock->class_cache[subclass] = class;
1158
1159         /*
1160          * Hash collision, did we smoke some? We found a class with a matching
1161          * hash but the subclass -- which is hashed in -- didn't match.
1162          */
1163         if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass))
1164                 return NULL;
1165
1166         return class;
1167 }
1168
1169 #ifdef CONFIG_PROVE_LOCKING
1170 /*
1171  * Allocate a lockdep entry. (assumes the graph_lock held, returns
1172  * with NULL on failure)
1173  */
1174 static struct lock_list *alloc_list_entry(void)
1175 {
1176         int idx = find_first_zero_bit(list_entries_in_use,
1177                                       ARRAY_SIZE(list_entries));
1178
1179         if (idx >= ARRAY_SIZE(list_entries)) {
1180                 if (!debug_locks_off_graph_unlock())
1181                         return NULL;
1182
1183                 print_lockdep_off("BUG: MAX_LOCKDEP_ENTRIES too low!");
1184                 dump_stack();
1185                 return NULL;
1186         }
1187         nr_list_entries++;
1188         __set_bit(idx, list_entries_in_use);
1189         return list_entries + idx;
1190 }
1191
1192 /*
1193  * Add a new dependency to the head of the list:
1194  */
1195 static int add_lock_to_list(struct lock_class *this,
1196                             struct lock_class *links_to, struct list_head *head,
1197                             unsigned long ip, int distance,
1198                             struct lock_trace *trace)
1199 {
1200         struct lock_list *entry;
1201         /*
1202          * Lock not present yet - get a new dependency struct and
1203          * add it to the list:
1204          */
1205         entry = alloc_list_entry();
1206         if (!entry)
1207                 return 0;
1208
1209         entry->class = this;
1210         entry->links_to = links_to;
1211         entry->distance = distance;
1212         entry->trace = *trace;
1213         /*
1214          * Both allocation and removal are done under the graph lock; but
1215          * iteration is under RCU-sched; see look_up_lock_class() and
1216          * lockdep_free_key_range().
1217          */
1218         list_add_tail_rcu(&entry->entry, head);
1219
1220         return 1;
1221 }
1222
1223 /*
1224  * For good efficiency of modular, we use power of 2
1225  */
1226 #define MAX_CIRCULAR_QUEUE_SIZE         4096UL
1227 #define CQ_MASK                         (MAX_CIRCULAR_QUEUE_SIZE-1)
1228
1229 /*
1230  * The circular_queue and helpers is used to implement the
1231  * breadth-first search(BFS)algorithem, by which we can build
1232  * the shortest path from the next lock to be acquired to the
1233  * previous held lock if there is a circular between them.
1234  */
1235 struct circular_queue {
1236         unsigned long element[MAX_CIRCULAR_QUEUE_SIZE];
1237         unsigned int  front, rear;
1238 };
1239
1240 static struct circular_queue lock_cq;
1241
1242 unsigned int max_bfs_queue_depth;
1243
1244 static unsigned int lockdep_dependency_gen_id;
1245
1246 static inline void __cq_init(struct circular_queue *cq)
1247 {
1248         cq->front = cq->rear = 0;
1249         lockdep_dependency_gen_id++;
1250 }
1251
1252 static inline int __cq_empty(struct circular_queue *cq)
1253 {
1254         return (cq->front == cq->rear);
1255 }
1256
1257 static inline int __cq_full(struct circular_queue *cq)
1258 {
1259         return ((cq->rear + 1) & CQ_MASK) == cq->front;
1260 }
1261
1262 static inline int __cq_enqueue(struct circular_queue *cq, unsigned long elem)
1263 {
1264         if (__cq_full(cq))
1265                 return -1;
1266
1267         cq->element[cq->rear] = elem;
1268         cq->rear = (cq->rear + 1) & CQ_MASK;
1269         return 0;
1270 }
1271
1272 static inline int __cq_dequeue(struct circular_queue *cq, unsigned long *elem)
1273 {
1274         if (__cq_empty(cq))
1275                 return -1;
1276
1277         *elem = cq->element[cq->front];
1278         cq->front = (cq->front + 1) & CQ_MASK;
1279         return 0;
1280 }
1281
1282 static inline unsigned int  __cq_get_elem_count(struct circular_queue *cq)
1283 {
1284         return (cq->rear - cq->front) & CQ_MASK;
1285 }
1286
1287 static inline void mark_lock_accessed(struct lock_list *lock,
1288                                         struct lock_list *parent)
1289 {
1290         unsigned long nr;
1291
1292         nr = lock - list_entries;
1293         WARN_ON(nr >= ARRAY_SIZE(list_entries)); /* Out-of-bounds, input fail */
1294         lock->parent = parent;
1295         lock->class->dep_gen_id = lockdep_dependency_gen_id;
1296 }
1297
1298 static inline unsigned long lock_accessed(struct lock_list *lock)
1299 {
1300         unsigned long nr;
1301
1302         nr = lock - list_entries;
1303         WARN_ON(nr >= ARRAY_SIZE(list_entries)); /* Out-of-bounds, input fail */
1304         return lock->class->dep_gen_id == lockdep_dependency_gen_id;
1305 }
1306
1307 static inline struct lock_list *get_lock_parent(struct lock_list *child)
1308 {
1309         return child->parent;
1310 }
1311
1312 static inline int get_lock_depth(struct lock_list *child)
1313 {
1314         int depth = 0;
1315         struct lock_list *parent;
1316
1317         while ((parent = get_lock_parent(child))) {
1318                 child = parent;
1319                 depth++;
1320         }
1321         return depth;
1322 }
1323
1324 static int __bfs(struct lock_list *source_entry,
1325                  void *data,
1326                  int (*match)(struct lock_list *entry, void *data),
1327                  struct lock_list **target_entry,
1328                  int forward)
1329 {
1330         struct lock_list *entry;
1331         struct list_head *head;
1332         struct circular_queue *cq = &lock_cq;
1333         int ret = 1;
1334
1335         if (match(source_entry, data)) {
1336                 *target_entry = source_entry;
1337                 ret = 0;
1338                 goto exit;
1339         }
1340
1341         if (forward)
1342                 head = &source_entry->class->locks_after;
1343         else
1344                 head = &source_entry->class->locks_before;
1345
1346         if (list_empty(head))
1347                 goto exit;
1348
1349         __cq_init(cq);
1350         __cq_enqueue(cq, (unsigned long)source_entry);
1351
1352         while (!__cq_empty(cq)) {
1353                 struct lock_list *lock;
1354
1355                 __cq_dequeue(cq, (unsigned long *)&lock);
1356
1357                 if (!lock->class) {
1358                         ret = -2;
1359                         goto exit;
1360                 }
1361
1362                 if (forward)
1363                         head = &lock->class->locks_after;
1364                 else
1365                         head = &lock->class->locks_before;
1366
1367                 DEBUG_LOCKS_WARN_ON(!irqs_disabled());
1368
1369                 list_for_each_entry_rcu(entry, head, entry) {
1370                         if (!lock_accessed(entry)) {
1371                                 unsigned int cq_depth;
1372                                 mark_lock_accessed(entry, lock);
1373                                 if (match(entry, data)) {
1374                                         *target_entry = entry;
1375                                         ret = 0;
1376                                         goto exit;
1377                                 }
1378
1379                                 if (__cq_enqueue(cq, (unsigned long)entry)) {
1380                                         ret = -1;
1381                                         goto exit;
1382                                 }
1383                                 cq_depth = __cq_get_elem_count(cq);
1384                                 if (max_bfs_queue_depth < cq_depth)
1385                                         max_bfs_queue_depth = cq_depth;
1386                         }
1387                 }
1388         }
1389 exit:
1390         return ret;
1391 }
1392
1393 static inline int __bfs_forwards(struct lock_list *src_entry,
1394                         void *data,
1395                         int (*match)(struct lock_list *entry, void *data),
1396                         struct lock_list **target_entry)
1397 {
1398         return __bfs(src_entry, data, match, target_entry, 1);
1399
1400 }
1401
1402 static inline int __bfs_backwards(struct lock_list *src_entry,
1403                         void *data,
1404                         int (*match)(struct lock_list *entry, void *data),
1405                         struct lock_list **target_entry)
1406 {
1407         return __bfs(src_entry, data, match, target_entry, 0);
1408
1409 }
1410
1411 /*
1412  * Recursive, forwards-direction lock-dependency checking, used for
1413  * both noncyclic checking and for hardirq-unsafe/softirq-unsafe
1414  * checking.
1415  */
1416
1417 static void print_lock_trace(struct lock_trace *trace, unsigned int spaces)
1418 {
1419         unsigned long *entries = stack_trace + trace->offset;
1420
1421         stack_trace_print(entries, trace->nr_entries, spaces);
1422 }
1423
1424 /*
1425  * Print a dependency chain entry (this is only done when a deadlock
1426  * has been detected):
1427  */
1428 static noinline int
1429 print_circular_bug_entry(struct lock_list *target, int depth)
1430 {
1431         if (debug_locks_silent)
1432                 return 0;
1433         printk("\n-> #%u", depth);
1434         print_lock_name(target->class);
1435         printk(KERN_CONT ":\n");
1436         print_lock_trace(&target->trace, 6);
1437         return 0;
1438 }
1439
1440 static void
1441 print_circular_lock_scenario(struct held_lock *src,
1442                              struct held_lock *tgt,
1443                              struct lock_list *prt)
1444 {
1445         struct lock_class *source = hlock_class(src);
1446         struct lock_class *target = hlock_class(tgt);
1447         struct lock_class *parent = prt->class;
1448
1449         /*
1450          * A direct locking problem where unsafe_class lock is taken
1451          * directly by safe_class lock, then all we need to show
1452          * is the deadlock scenario, as it is obvious that the
1453          * unsafe lock is taken under the safe lock.
1454          *
1455          * But if there is a chain instead, where the safe lock takes
1456          * an intermediate lock (middle_class) where this lock is
1457          * not the same as the safe lock, then the lock chain is
1458          * used to describe the problem. Otherwise we would need
1459          * to show a different CPU case for each link in the chain
1460          * from the safe_class lock to the unsafe_class lock.
1461          */
1462         if (parent != source) {
1463                 printk("Chain exists of:\n  ");
1464                 __print_lock_name(source);
1465                 printk(KERN_CONT " --> ");
1466                 __print_lock_name(parent);
1467                 printk(KERN_CONT " --> ");
1468                 __print_lock_name(target);
1469                 printk(KERN_CONT "\n\n");
1470         }
1471
1472         printk(" Possible unsafe locking scenario:\n\n");
1473         printk("       CPU0                    CPU1\n");
1474         printk("       ----                    ----\n");
1475         printk("  lock(");
1476         __print_lock_name(target);
1477         printk(KERN_CONT ");\n");
1478         printk("                               lock(");
1479         __print_lock_name(parent);
1480         printk(KERN_CONT ");\n");
1481         printk("                               lock(");
1482         __print_lock_name(target);
1483         printk(KERN_CONT ");\n");
1484         printk("  lock(");
1485         __print_lock_name(source);
1486         printk(KERN_CONT ");\n");
1487         printk("\n *** DEADLOCK ***\n\n");
1488 }
1489
1490 /*
1491  * When a circular dependency is detected, print the
1492  * header first:
1493  */
1494 static noinline int
1495 print_circular_bug_header(struct lock_list *entry, unsigned int depth,
1496                         struct held_lock *check_src,
1497                         struct held_lock *check_tgt)
1498 {
1499         struct task_struct *curr = current;
1500
1501         if (debug_locks_silent)
1502                 return 0;
1503
1504         pr_warn("\n");
1505         pr_warn("======================================================\n");
1506         pr_warn("WARNING: possible circular locking dependency detected\n");
1507         print_kernel_ident();
1508         pr_warn("------------------------------------------------------\n");
1509         pr_warn("%s/%d is trying to acquire lock:\n",
1510                 curr->comm, task_pid_nr(curr));
1511         print_lock(check_src);
1512
1513         pr_warn("\nbut task is already holding lock:\n");
1514
1515         print_lock(check_tgt);
1516         pr_warn("\nwhich lock already depends on the new lock.\n\n");
1517         pr_warn("\nthe existing dependency chain (in reverse order) is:\n");
1518
1519         print_circular_bug_entry(entry, depth);
1520
1521         return 0;
1522 }
1523
1524 static inline int class_equal(struct lock_list *entry, void *data)
1525 {
1526         return entry->class == data;
1527 }
1528
1529 static noinline int print_circular_bug(struct lock_list *this,
1530                                        struct lock_list *target,
1531                                        struct held_lock *check_src,
1532                                        struct held_lock *check_tgt)
1533 {
1534         struct task_struct *curr = current;
1535         struct lock_list *parent;
1536         struct lock_list *first_parent;
1537         int depth;
1538
1539         if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1540                 return 0;
1541
1542         if (!save_trace(&this->trace))
1543                 return 0;
1544
1545         depth = get_lock_depth(target);
1546
1547         print_circular_bug_header(target, depth, check_src, check_tgt);
1548
1549         parent = get_lock_parent(target);
1550         first_parent = parent;
1551
1552         while (parent) {
1553                 print_circular_bug_entry(parent, --depth);
1554                 parent = get_lock_parent(parent);
1555         }
1556
1557         printk("\nother info that might help us debug this:\n\n");
1558         print_circular_lock_scenario(check_src, check_tgt,
1559                                      first_parent);
1560
1561         lockdep_print_held_locks(curr);
1562
1563         printk("\nstack backtrace:\n");
1564         dump_stack();
1565
1566         return 0;
1567 }
1568
1569 static noinline int print_bfs_bug(int ret)
1570 {
1571         if (!debug_locks_off_graph_unlock())
1572                 return 0;
1573
1574         /*
1575          * Breadth-first-search failed, graph got corrupted?
1576          */
1577         WARN(1, "lockdep bfs error:%d\n", ret);
1578
1579         return 0;
1580 }
1581
1582 static int noop_count(struct lock_list *entry, void *data)
1583 {
1584         (*(unsigned long *)data)++;
1585         return 0;
1586 }
1587
1588 static unsigned long __lockdep_count_forward_deps(struct lock_list *this)
1589 {
1590         unsigned long  count = 0;
1591         struct lock_list *uninitialized_var(target_entry);
1592
1593         __bfs_forwards(this, (void *)&count, noop_count, &target_entry);
1594
1595         return count;
1596 }
1597 unsigned long lockdep_count_forward_deps(struct lock_class *class)
1598 {
1599         unsigned long ret, flags;
1600         struct lock_list this;
1601
1602         this.parent = NULL;
1603         this.class = class;
1604
1605         raw_local_irq_save(flags);
1606         arch_spin_lock(&lockdep_lock);
1607         ret = __lockdep_count_forward_deps(&this);
1608         arch_spin_unlock(&lockdep_lock);
1609         raw_local_irq_restore(flags);
1610
1611         return ret;
1612 }
1613
1614 static unsigned long __lockdep_count_backward_deps(struct lock_list *this)
1615 {
1616         unsigned long  count = 0;
1617         struct lock_list *uninitialized_var(target_entry);
1618
1619         __bfs_backwards(this, (void *)&count, noop_count, &target_entry);
1620
1621         return count;
1622 }
1623
1624 unsigned long lockdep_count_backward_deps(struct lock_class *class)
1625 {
1626         unsigned long ret, flags;
1627         struct lock_list this;
1628
1629         this.parent = NULL;
1630         this.class = class;
1631
1632         raw_local_irq_save(flags);
1633         arch_spin_lock(&lockdep_lock);
1634         ret = __lockdep_count_backward_deps(&this);
1635         arch_spin_unlock(&lockdep_lock);
1636         raw_local_irq_restore(flags);
1637
1638         return ret;
1639 }
1640
1641 /*
1642  * Prove that the dependency graph starting at <entry> can not
1643  * lead to <target>. Print an error and return 0 if it does.
1644  */
1645 static noinline int
1646 check_noncircular(struct lock_list *root, struct lock_class *target,
1647                 struct lock_list **target_entry)
1648 {
1649         int result;
1650
1651         debug_atomic_inc(nr_cyclic_checks);
1652
1653         result = __bfs_forwards(root, target, class_equal, target_entry);
1654
1655         return result;
1656 }
1657
1658 static noinline int
1659 check_redundant(struct lock_list *root, struct lock_class *target,
1660                 struct lock_list **target_entry)
1661 {
1662         int result;
1663
1664         debug_atomic_inc(nr_redundant_checks);
1665
1666         result = __bfs_forwards(root, target, class_equal, target_entry);
1667
1668         return result;
1669 }
1670
1671 #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
1672
1673 static inline int usage_accumulate(struct lock_list *entry, void *mask)
1674 {
1675         *(unsigned long *)mask |= entry->class->usage_mask;
1676
1677         return 0;
1678 }
1679
1680 /*
1681  * Forwards and backwards subgraph searching, for the purposes of
1682  * proving that two subgraphs can be connected by a new dependency
1683  * without creating any illegal irq-safe -> irq-unsafe lock dependency.
1684  */
1685
1686 static inline int usage_match(struct lock_list *entry, void *mask)
1687 {
1688         return entry->class->usage_mask & *(unsigned long *)mask;
1689 }
1690
1691 /*
1692  * Find a node in the forwards-direction dependency sub-graph starting
1693  * at @root->class that matches @bit.
1694  *
1695  * Return 0 if such a node exists in the subgraph, and put that node
1696  * into *@target_entry.
1697  *
1698  * Return 1 otherwise and keep *@target_entry unchanged.
1699  * Return <0 on error.
1700  */
1701 static int
1702 find_usage_forwards(struct lock_list *root, unsigned long usage_mask,
1703                         struct lock_list **target_entry)
1704 {
1705         int result;
1706
1707         debug_atomic_inc(nr_find_usage_forwards_checks);
1708
1709         result = __bfs_forwards(root, &usage_mask, usage_match, target_entry);
1710
1711         return result;
1712 }
1713
1714 /*
1715  * Find a node in the backwards-direction dependency sub-graph starting
1716  * at @root->class that matches @bit.
1717  *
1718  * Return 0 if such a node exists in the subgraph, and put that node
1719  * into *@target_entry.
1720  *
1721  * Return 1 otherwise and keep *@target_entry unchanged.
1722  * Return <0 on error.
1723  */
1724 static int
1725 find_usage_backwards(struct lock_list *root, unsigned long usage_mask,
1726                         struct lock_list **target_entry)
1727 {
1728         int result;
1729
1730         debug_atomic_inc(nr_find_usage_backwards_checks);
1731
1732         result = __bfs_backwards(root, &usage_mask, usage_match, target_entry);
1733
1734         return result;
1735 }
1736
1737 static void print_lock_class_header(struct lock_class *class, int depth)
1738 {
1739         int bit;
1740
1741         printk("%*s->", depth, "");
1742         print_lock_name(class);
1743 #ifdef CONFIG_DEBUG_LOCKDEP
1744         printk(KERN_CONT " ops: %lu", debug_class_ops_read(class));
1745 #endif
1746         printk(KERN_CONT " {\n");
1747
1748         for (bit = 0; bit < LOCK_USAGE_STATES; bit++) {
1749                 if (class->usage_mask & (1 << bit)) {
1750                         int len = depth;
1751
1752                         len += printk("%*s   %s", depth, "", usage_str[bit]);
1753                         len += printk(KERN_CONT " at:\n");
1754                         print_lock_trace(class->usage_traces + bit, len);
1755                 }
1756         }
1757         printk("%*s }\n", depth, "");
1758
1759         printk("%*s ... key      at: [<%px>] %pS\n",
1760                 depth, "", class->key, class->key);
1761 }
1762
1763 /*
1764  * printk the shortest lock dependencies from @start to @end in reverse order:
1765  */
1766 static void __used
1767 print_shortest_lock_dependencies(struct lock_list *leaf,
1768                                 struct lock_list *root)
1769 {
1770         struct lock_list *entry = leaf;
1771         int depth;
1772
1773         /*compute depth from generated tree by BFS*/
1774         depth = get_lock_depth(leaf);
1775
1776         do {
1777                 print_lock_class_header(entry->class, depth);
1778                 printk("%*s ... acquired at:\n", depth, "");
1779                 print_lock_trace(&entry->trace, 2);
1780                 printk("\n");
1781
1782                 if (depth == 0 && (entry != root)) {
1783                         printk("lockdep:%s bad path found in chain graph\n", __func__);
1784                         break;
1785                 }
1786
1787                 entry = get_lock_parent(entry);
1788                 depth--;
1789         } while (entry && (depth >= 0));
1790
1791         return;
1792 }
1793
1794 static void
1795 print_irq_lock_scenario(struct lock_list *safe_entry,
1796                         struct lock_list *unsafe_entry,
1797                         struct lock_class *prev_class,
1798                         struct lock_class *next_class)
1799 {
1800         struct lock_class *safe_class = safe_entry->class;
1801         struct lock_class *unsafe_class = unsafe_entry->class;
1802         struct lock_class *middle_class = prev_class;
1803
1804         if (middle_class == safe_class)
1805                 middle_class = next_class;
1806
1807         /*
1808          * A direct locking problem where unsafe_class lock is taken
1809          * directly by safe_class lock, then all we need to show
1810          * is the deadlock scenario, as it is obvious that the
1811          * unsafe lock is taken under the safe lock.
1812          *
1813          * But if there is a chain instead, where the safe lock takes
1814          * an intermediate lock (middle_class) where this lock is
1815          * not the same as the safe lock, then the lock chain is
1816          * used to describe the problem. Otherwise we would need
1817          * to show a different CPU case for each link in the chain
1818          * from the safe_class lock to the unsafe_class lock.
1819          */
1820         if (middle_class != unsafe_class) {
1821                 printk("Chain exists of:\n  ");
1822                 __print_lock_name(safe_class);
1823                 printk(KERN_CONT " --> ");
1824                 __print_lock_name(middle_class);
1825                 printk(KERN_CONT " --> ");
1826                 __print_lock_name(unsafe_class);
1827                 printk(KERN_CONT "\n\n");
1828         }
1829
1830         printk(" Possible interrupt unsafe locking scenario:\n\n");
1831         printk("       CPU0                    CPU1\n");
1832         printk("       ----                    ----\n");
1833         printk("  lock(");
1834         __print_lock_name(unsafe_class);
1835         printk(KERN_CONT ");\n");
1836         printk("                               local_irq_disable();\n");
1837         printk("                               lock(");
1838         __print_lock_name(safe_class);
1839         printk(KERN_CONT ");\n");
1840         printk("                               lock(");
1841         __print_lock_name(middle_class);
1842         printk(KERN_CONT ");\n");
1843         printk("  <Interrupt>\n");
1844         printk("    lock(");
1845         __print_lock_name(safe_class);
1846         printk(KERN_CONT ");\n");
1847         printk("\n *** DEADLOCK ***\n\n");
1848 }
1849
1850 static int
1851 print_bad_irq_dependency(struct task_struct *curr,
1852                          struct lock_list *prev_root,
1853                          struct lock_list *next_root,
1854                          struct lock_list *backwards_entry,
1855                          struct lock_list *forwards_entry,
1856                          struct held_lock *prev,
1857                          struct held_lock *next,
1858                          enum lock_usage_bit bit1,
1859                          enum lock_usage_bit bit2,
1860                          const char *irqclass)
1861 {
1862         if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1863                 return 0;
1864
1865         pr_warn("\n");
1866         pr_warn("=====================================================\n");
1867         pr_warn("WARNING: %s-safe -> %s-unsafe lock order detected\n",
1868                 irqclass, irqclass);
1869         print_kernel_ident();
1870         pr_warn("-----------------------------------------------------\n");
1871         pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n",
1872                 curr->comm, task_pid_nr(curr),
1873                 curr->hardirq_context, hardirq_count() >> HARDIRQ_SHIFT,
1874                 curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT,
1875                 curr->hardirqs_enabled,
1876                 curr->softirqs_enabled);
1877         print_lock(next);
1878
1879         pr_warn("\nand this task is already holding:\n");
1880         print_lock(prev);
1881         pr_warn("which would create a new lock dependency:\n");
1882         print_lock_name(hlock_class(prev));
1883         pr_cont(" ->");
1884         print_lock_name(hlock_class(next));
1885         pr_cont("\n");
1886
1887         pr_warn("\nbut this new dependency connects a %s-irq-safe lock:\n",
1888                 irqclass);
1889         print_lock_name(backwards_entry->class);
1890         pr_warn("\n... which became %s-irq-safe at:\n", irqclass);
1891
1892         print_lock_trace(backwards_entry->class->usage_traces + bit1, 1);
1893
1894         pr_warn("\nto a %s-irq-unsafe lock:\n", irqclass);
1895         print_lock_name(forwards_entry->class);
1896         pr_warn("\n... which became %s-irq-unsafe at:\n", irqclass);
1897         pr_warn("...");
1898
1899         print_lock_trace(forwards_entry->class->usage_traces + bit2, 1);
1900
1901         pr_warn("\nother info that might help us debug this:\n\n");
1902         print_irq_lock_scenario(backwards_entry, forwards_entry,
1903                                 hlock_class(prev), hlock_class(next));
1904
1905         lockdep_print_held_locks(curr);
1906
1907         pr_warn("\nthe dependencies between %s-irq-safe lock and the holding lock:\n", irqclass);
1908         if (!save_trace(&prev_root->trace))
1909                 return 0;
1910         print_shortest_lock_dependencies(backwards_entry, prev_root);
1911
1912         pr_warn("\nthe dependencies between the lock to be acquired");
1913         pr_warn(" and %s-irq-unsafe lock:\n", irqclass);
1914         if (!save_trace(&next_root->trace))
1915                 return 0;
1916         print_shortest_lock_dependencies(forwards_entry, next_root);
1917
1918         pr_warn("\nstack backtrace:\n");
1919         dump_stack();
1920
1921         return 0;
1922 }
1923
1924 static const char *state_names[] = {
1925 #define LOCKDEP_STATE(__STATE) \
1926         __stringify(__STATE),
1927 #include "lockdep_states.h"
1928 #undef LOCKDEP_STATE
1929 };
1930
1931 static const char *state_rnames[] = {
1932 #define LOCKDEP_STATE(__STATE) \
1933         __stringify(__STATE)"-READ",
1934 #include "lockdep_states.h"
1935 #undef LOCKDEP_STATE
1936 };
1937
1938 static inline const char *state_name(enum lock_usage_bit bit)
1939 {
1940         if (bit & LOCK_USAGE_READ_MASK)
1941                 return state_rnames[bit >> LOCK_USAGE_DIR_MASK];
1942         else
1943                 return state_names[bit >> LOCK_USAGE_DIR_MASK];
1944 }
1945
1946 /*
1947  * The bit number is encoded like:
1948  *
1949  *  bit0: 0 exclusive, 1 read lock
1950  *  bit1: 0 used in irq, 1 irq enabled
1951  *  bit2-n: state
1952  */
1953 static int exclusive_bit(int new_bit)
1954 {
1955         int state = new_bit & LOCK_USAGE_STATE_MASK;
1956         int dir = new_bit & LOCK_USAGE_DIR_MASK;
1957
1958         /*
1959          * keep state, bit flip the direction and strip read.
1960          */
1961         return state | (dir ^ LOCK_USAGE_DIR_MASK);
1962 }
1963
1964 /*
1965  * Observe that when given a bitmask where each bitnr is encoded as above, a
1966  * right shift of the mask transforms the individual bitnrs as -1 and
1967  * conversely, a left shift transforms into +1 for the individual bitnrs.
1968  *
1969  * So for all bits whose number have LOCK_ENABLED_* set (bitnr1 == 1), we can
1970  * create the mask with those bit numbers using LOCK_USED_IN_* (bitnr1 == 0)
1971  * instead by subtracting the bit number by 2, or shifting the mask right by 2.
1972  *
1973  * Similarly, bitnr1 == 0 becomes bitnr1 == 1 by adding 2, or shifting left 2.
1974  *
1975  * So split the mask (note that LOCKF_ENABLED_IRQ_ALL|LOCKF_USED_IN_IRQ_ALL is
1976  * all bits set) and recompose with bitnr1 flipped.
1977  */
1978 static unsigned long invert_dir_mask(unsigned long mask)
1979 {
1980         unsigned long excl = 0;
1981
1982         /* Invert dir */
1983         excl |= (mask & LOCKF_ENABLED_IRQ_ALL) >> LOCK_USAGE_DIR_MASK;
1984         excl |= (mask & LOCKF_USED_IN_IRQ_ALL) << LOCK_USAGE_DIR_MASK;
1985
1986         return excl;
1987 }
1988
1989 /*
1990  * As above, we clear bitnr0 (LOCK_*_READ off) with bitmask ops. First, for all
1991  * bits with bitnr0 set (LOCK_*_READ), add those with bitnr0 cleared (LOCK_*).
1992  * And then mask out all bitnr0.
1993  */
1994 static unsigned long exclusive_mask(unsigned long mask)
1995 {
1996         unsigned long excl = invert_dir_mask(mask);
1997
1998         /* Strip read */
1999         excl |= (excl & LOCKF_IRQ_READ) >> LOCK_USAGE_READ_MASK;
2000         excl &= ~LOCKF_IRQ_READ;
2001
2002         return excl;
2003 }
2004
2005 /*
2006  * Retrieve the _possible_ original mask to which @mask is
2007  * exclusive. Ie: this is the opposite of exclusive_mask().
2008  * Note that 2 possible original bits can match an exclusive
2009  * bit: one has LOCK_USAGE_READ_MASK set, the other has it
2010  * cleared. So both are returned for each exclusive bit.
2011  */
2012 static unsigned long original_mask(unsigned long mask)
2013 {
2014         unsigned long excl = invert_dir_mask(mask);
2015
2016         /* Include read in existing usages */
2017         excl |= (excl & LOCKF_IRQ) << LOCK_USAGE_READ_MASK;
2018
2019         return excl;
2020 }
2021
2022 /*
2023  * Find the first pair of bit match between an original
2024  * usage mask and an exclusive usage mask.
2025  */
2026 static int find_exclusive_match(unsigned long mask,
2027                                 unsigned long excl_mask,
2028                                 enum lock_usage_bit *bitp,
2029                                 enum lock_usage_bit *excl_bitp)
2030 {
2031         int bit, excl;
2032
2033         for_each_set_bit(bit, &mask, LOCK_USED) {
2034                 excl = exclusive_bit(bit);
2035                 if (excl_mask & lock_flag(excl)) {
2036                         *bitp = bit;
2037                         *excl_bitp = excl;
2038                         return 0;
2039                 }
2040         }
2041         return -1;
2042 }
2043
2044 /*
2045  * Prove that the new dependency does not connect a hardirq-safe(-read)
2046  * lock with a hardirq-unsafe lock - to achieve this we search
2047  * the backwards-subgraph starting at <prev>, and the
2048  * forwards-subgraph starting at <next>:
2049  */
2050 static int check_irq_usage(struct task_struct *curr, struct held_lock *prev,
2051                            struct held_lock *next)
2052 {
2053         unsigned long usage_mask = 0, forward_mask, backward_mask;
2054         enum lock_usage_bit forward_bit = 0, backward_bit = 0;
2055         struct lock_list *uninitialized_var(target_entry1);
2056         struct lock_list *uninitialized_var(target_entry);
2057         struct lock_list this, that;
2058         int ret;
2059
2060         /*
2061          * Step 1: gather all hard/soft IRQs usages backward in an
2062          * accumulated usage mask.
2063          */
2064         this.parent = NULL;
2065         this.class = hlock_class(prev);
2066
2067         ret = __bfs_backwards(&this, &usage_mask, usage_accumulate, NULL);
2068         if (ret < 0)
2069                 return print_bfs_bug(ret);
2070
2071         usage_mask &= LOCKF_USED_IN_IRQ_ALL;
2072         if (!usage_mask)
2073                 return 1;
2074
2075         /*
2076          * Step 2: find exclusive uses forward that match the previous
2077          * backward accumulated mask.
2078          */
2079         forward_mask = exclusive_mask(usage_mask);
2080
2081         that.parent = NULL;
2082         that.class = hlock_class(next);
2083
2084         ret = find_usage_forwards(&that, forward_mask, &target_entry1);
2085         if (ret < 0)
2086                 return print_bfs_bug(ret);
2087         if (ret == 1)
2088                 return ret;
2089
2090         /*
2091          * Step 3: we found a bad match! Now retrieve a lock from the backward
2092          * list whose usage mask matches the exclusive usage mask from the
2093          * lock found on the forward list.
2094          */
2095         backward_mask = original_mask(target_entry1->class->usage_mask);
2096
2097         ret = find_usage_backwards(&this, backward_mask, &target_entry);
2098         if (ret < 0)
2099                 return print_bfs_bug(ret);
2100         if (DEBUG_LOCKS_WARN_ON(ret == 1))
2101                 return 1;
2102
2103         /*
2104          * Step 4: narrow down to a pair of incompatible usage bits
2105          * and report it.
2106          */
2107         ret = find_exclusive_match(target_entry->class->usage_mask,
2108                                    target_entry1->class->usage_mask,
2109                                    &backward_bit, &forward_bit);
2110         if (DEBUG_LOCKS_WARN_ON(ret == -1))
2111                 return 1;
2112
2113         return print_bad_irq_dependency(curr, &this, &that,
2114                         target_entry, target_entry1,
2115                         prev, next,
2116                         backward_bit, forward_bit,
2117                         state_name(backward_bit));
2118 }
2119
2120 static void inc_chains(void)
2121 {
2122         if (current->hardirq_context)
2123                 nr_hardirq_chains++;
2124         else {
2125                 if (current->softirq_context)
2126                         nr_softirq_chains++;
2127                 else
2128                         nr_process_chains++;
2129         }
2130 }
2131
2132 #else
2133
2134 static inline int check_irq_usage(struct task_struct *curr,
2135                                   struct held_lock *prev, struct held_lock *next)
2136 {
2137         return 1;
2138 }
2139
2140 static inline void inc_chains(void)
2141 {
2142         nr_process_chains++;
2143 }
2144
2145 #endif
2146
2147 static void
2148 print_deadlock_scenario(struct held_lock *nxt,
2149                              struct held_lock *prv)
2150 {
2151         struct lock_class *next = hlock_class(nxt);
2152         struct lock_class *prev = hlock_class(prv);
2153
2154         printk(" Possible unsafe locking scenario:\n\n");
2155         printk("       CPU0\n");
2156         printk("       ----\n");
2157         printk("  lock(");
2158         __print_lock_name(prev);
2159         printk(KERN_CONT ");\n");
2160         printk("  lock(");
2161         __print_lock_name(next);
2162         printk(KERN_CONT ");\n");
2163         printk("\n *** DEADLOCK ***\n\n");
2164         printk(" May be due to missing lock nesting notation\n\n");
2165 }
2166
2167 static int
2168 print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
2169                    struct held_lock *next)
2170 {
2171         if (!debug_locks_off_graph_unlock() || debug_locks_silent)
2172                 return 0;
2173
2174         pr_warn("\n");
2175         pr_warn("============================================\n");
2176         pr_warn("WARNING: possible recursive locking detected\n");
2177         print_kernel_ident();
2178         pr_warn("--------------------------------------------\n");
2179         pr_warn("%s/%d is trying to acquire lock:\n",
2180                 curr->comm, task_pid_nr(curr));
2181         print_lock(next);
2182         pr_warn("\nbut task is already holding lock:\n");
2183         print_lock(prev);
2184
2185         pr_warn("\nother info that might help us debug this:\n");
2186         print_deadlock_scenario(next, prev);
2187         lockdep_print_held_locks(curr);
2188
2189         pr_warn("\nstack backtrace:\n");
2190         dump_stack();
2191
2192         return 0;
2193 }
2194
2195 /*
2196  * Check whether we are holding such a class already.
2197  *
2198  * (Note that this has to be done separately, because the graph cannot
2199  * detect such classes of deadlocks.)
2200  *
2201  * Returns: 0 on deadlock detected, 1 on OK, 2 on recursive read
2202  */
2203 static int
2204 check_deadlock(struct task_struct *curr, struct held_lock *next,
2205                struct lockdep_map *next_instance, int read)
2206 {
2207         struct held_lock *prev;
2208         struct held_lock *nest = NULL;
2209         int i;
2210
2211         for (i = 0; i < curr->lockdep_depth; i++) {
2212                 prev = curr->held_locks + i;
2213
2214                 if (prev->instance == next->nest_lock)
2215                         nest = prev;
2216
2217                 if (hlock_class(prev) != hlock_class(next))
2218                         continue;
2219
2220                 /*
2221                  * Allow read-after-read recursion of the same
2222                  * lock class (i.e. read_lock(lock)+read_lock(lock)):
2223                  */
2224                 if ((read == 2) && prev->read)
2225                         return 2;
2226
2227                 /*
2228                  * We're holding the nest_lock, which serializes this lock's
2229                  * nesting behaviour.
2230                  */
2231                 if (nest)
2232                         return 2;
2233
2234                 return print_deadlock_bug(curr, prev, next);
2235         }
2236         return 1;
2237 }
2238
2239 /*
2240  * There was a chain-cache miss, and we are about to add a new dependency
2241  * to a previous lock. We recursively validate the following rules:
2242  *
2243  *  - would the adding of the <prev> -> <next> dependency create a
2244  *    circular dependency in the graph? [== circular deadlock]
2245  *
2246  *  - does the new prev->next dependency connect any hardirq-safe lock
2247  *    (in the full backwards-subgraph starting at <prev>) with any
2248  *    hardirq-unsafe lock (in the full forwards-subgraph starting at
2249  *    <next>)? [== illegal lock inversion with hardirq contexts]
2250  *
2251  *  - does the new prev->next dependency connect any softirq-safe lock
2252  *    (in the full backwards-subgraph starting at <prev>) with any
2253  *    softirq-unsafe lock (in the full forwards-subgraph starting at
2254  *    <next>)? [== illegal lock inversion with softirq contexts]
2255  *
2256  * any of these scenarios could lead to a deadlock.
2257  *
2258  * Then if all the validations pass, we add the forwards and backwards
2259  * dependency.
2260  */
2261 static int
2262 check_prev_add(struct task_struct *curr, struct held_lock *prev,
2263                struct held_lock *next, int distance, struct lock_trace *trace)
2264 {
2265         struct lock_list *uninitialized_var(target_entry);
2266         struct lock_list *entry;
2267         struct lock_list this;
2268         int ret;
2269
2270         if (!hlock_class(prev)->key || !hlock_class(next)->key) {
2271                 /*
2272                  * The warning statements below may trigger a use-after-free
2273                  * of the class name. It is better to trigger a use-after free
2274                  * and to have the class name most of the time instead of not
2275                  * having the class name available.
2276                  */
2277                 WARN_ONCE(!debug_locks_silent && !hlock_class(prev)->key,
2278                           "Detected use-after-free of lock class %px/%s\n",
2279                           hlock_class(prev),
2280                           hlock_class(prev)->name);
2281                 WARN_ONCE(!debug_locks_silent && !hlock_class(next)->key,
2282                           "Detected use-after-free of lock class %px/%s\n",
2283                           hlock_class(next),
2284                           hlock_class(next)->name);
2285                 return 2;
2286         }
2287
2288         /*
2289          * Prove that the new <prev> -> <next> dependency would not
2290          * create a circular dependency in the graph. (We do this by
2291          * forward-recursing into the graph starting at <next>, and
2292          * checking whether we can reach <prev>.)
2293          *
2294          * We are using global variables to control the recursion, to
2295          * keep the stackframe size of the recursive functions low:
2296          */
2297         this.class = hlock_class(next);
2298         this.parent = NULL;
2299         ret = check_noncircular(&this, hlock_class(prev), &target_entry);
2300         if (unlikely(!ret)) {
2301                 if (!trace->nr_entries) {
2302                         /*
2303                          * If save_trace fails here, the printing might
2304                          * trigger a WARN but because of the !nr_entries it
2305                          * should not do bad things.
2306                          */
2307                         save_trace(trace);
2308                 }
2309                 return print_circular_bug(&this, target_entry, next, prev);
2310         }
2311         else if (unlikely(ret < 0))
2312                 return print_bfs_bug(ret);
2313
2314         if (!check_irq_usage(curr, prev, next))
2315                 return 0;
2316
2317         /*
2318          * For recursive read-locks we do all the dependency checks,
2319          * but we dont store read-triggered dependencies (only
2320          * write-triggered dependencies). This ensures that only the
2321          * write-side dependencies matter, and that if for example a
2322          * write-lock never takes any other locks, then the reads are
2323          * equivalent to a NOP.
2324          */
2325         if (next->read == 2 || prev->read == 2)
2326                 return 1;
2327         /*
2328          * Is the <prev> -> <next> dependency already present?
2329          *
2330          * (this may occur even though this is a new chain: consider
2331          *  e.g. the L1 -> L2 -> L3 -> L4 and the L5 -> L1 -> L2 -> L3
2332          *  chains - the second one will be new, but L1 already has
2333          *  L2 added to its dependency list, due to the first chain.)
2334          */
2335         list_for_each_entry(entry, &hlock_class(prev)->locks_after, entry) {
2336                 if (entry->class == hlock_class(next)) {
2337                         if (distance == 1)
2338                                 entry->distance = 1;
2339                         return 1;
2340                 }
2341         }
2342
2343         /*
2344          * Is the <prev> -> <next> link redundant?
2345          */
2346         this.class = hlock_class(prev);
2347         this.parent = NULL;
2348         ret = check_redundant(&this, hlock_class(next), &target_entry);
2349         if (!ret) {
2350                 debug_atomic_inc(nr_redundant);
2351                 return 2;
2352         }
2353         if (ret < 0)
2354                 return print_bfs_bug(ret);
2355
2356
2357         if (!trace->nr_entries && !save_trace(trace))
2358                 return 0;
2359
2360         /*
2361          * Ok, all validations passed, add the new lock
2362          * to the previous lock's dependency list:
2363          */
2364         ret = add_lock_to_list(hlock_class(next), hlock_class(prev),
2365                                &hlock_class(prev)->locks_after,
2366                                next->acquire_ip, distance, trace);
2367
2368         if (!ret)
2369                 return 0;
2370
2371         ret = add_lock_to_list(hlock_class(prev), hlock_class(next),
2372                                &hlock_class(next)->locks_before,
2373                                next->acquire_ip, distance, trace);
2374         if (!ret)
2375                 return 0;
2376
2377         return 2;
2378 }
2379
2380 /*
2381  * Add the dependency to all directly-previous locks that are 'relevant'.
2382  * The ones that are relevant are (in increasing distance from curr):
2383  * all consecutive trylock entries and the final non-trylock entry - or
2384  * the end of this context's lock-chain - whichever comes first.
2385  */
2386 static int
2387 check_prevs_add(struct task_struct *curr, struct held_lock *next)
2388 {
2389         struct lock_trace trace = { .nr_entries = 0 };
2390         int depth = curr->lockdep_depth;
2391         struct held_lock *hlock;
2392
2393         /*
2394          * Debugging checks.
2395          *
2396          * Depth must not be zero for a non-head lock:
2397          */
2398         if (!depth)
2399                 goto out_bug;
2400         /*
2401          * At least two relevant locks must exist for this
2402          * to be a head:
2403          */
2404         if (curr->held_locks[depth].irq_context !=
2405                         curr->held_locks[depth-1].irq_context)
2406                 goto out_bug;
2407
2408         for (;;) {
2409                 int distance = curr->lockdep_depth - depth + 1;
2410                 hlock = curr->held_locks + depth - 1;
2411
2412                 /*
2413                  * Only non-recursive-read entries get new dependencies
2414                  * added:
2415                  */
2416                 if (hlock->read != 2 && hlock->check) {
2417                         int ret = check_prev_add(curr, hlock, next, distance,
2418                                                  &trace);
2419                         if (!ret)
2420                                 return 0;
2421
2422                         /*
2423                          * Stop after the first non-trylock entry,
2424                          * as non-trylock entries have added their
2425                          * own direct dependencies already, so this
2426                          * lock is connected to them indirectly:
2427                          */
2428                         if (!hlock->trylock)
2429                                 break;
2430                 }
2431
2432                 depth--;
2433                 /*
2434                  * End of lock-stack?
2435                  */
2436                 if (!depth)
2437                         break;
2438                 /*
2439                  * Stop the search if we cross into another context:
2440                  */
2441                 if (curr->held_locks[depth].irq_context !=
2442                                 curr->held_locks[depth-1].irq_context)
2443                         break;
2444         }
2445         return 1;
2446 out_bug:
2447         if (!debug_locks_off_graph_unlock())
2448                 return 0;
2449
2450         /*
2451          * Clearly we all shouldn't be here, but since we made it we
2452          * can reliable say we messed up our state. See the above two
2453          * gotos for reasons why we could possibly end up here.
2454          */
2455         WARN_ON(1);
2456
2457         return 0;
2458 }
2459
2460 struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS];
2461 static DECLARE_BITMAP(lock_chains_in_use, MAX_LOCKDEP_CHAINS);
2462 int nr_chain_hlocks;
2463 static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS];
2464
2465 struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i)
2466 {
2467         return lock_classes + chain_hlocks[chain->base + i];
2468 }
2469
2470 /*
2471  * Returns the index of the first held_lock of the current chain
2472  */
2473 static inline int get_first_held_lock(struct task_struct *curr,
2474                                         struct held_lock *hlock)
2475 {
2476         int i;
2477         struct held_lock *hlock_curr;
2478
2479         for (i = curr->lockdep_depth - 1; i >= 0; i--) {
2480                 hlock_curr = curr->held_locks + i;
2481                 if (hlock_curr->irq_context != hlock->irq_context)
2482                         break;
2483
2484         }
2485
2486         return ++i;
2487 }
2488
2489 #ifdef CONFIG_DEBUG_LOCKDEP
2490 /*
2491  * Returns the next chain_key iteration
2492  */
2493 static u64 print_chain_key_iteration(int class_idx, u64 chain_key)
2494 {
2495         u64 new_chain_key = iterate_chain_key(chain_key, class_idx);
2496
2497         printk(" class_idx:%d -> chain_key:%016Lx",
2498                 class_idx,
2499                 (unsigned long long)new_chain_key);
2500         return new_chain_key;
2501 }
2502
2503 static void
2504 print_chain_keys_held_locks(struct task_struct *curr, struct held_lock *hlock_next)
2505 {
2506         struct held_lock *hlock;
2507         u64 chain_key = 0;
2508         int depth = curr->lockdep_depth;
2509         int i;
2510
2511         printk("depth: %u\n", depth + 1);
2512         for (i = get_first_held_lock(curr, hlock_next); i < depth; i++) {
2513                 hlock = curr->held_locks + i;
2514                 chain_key = print_chain_key_iteration(hlock->class_idx, chain_key);
2515
2516                 print_lock(hlock);
2517         }
2518
2519         print_chain_key_iteration(hlock_next->class_idx, chain_key);
2520         print_lock(hlock_next);
2521 }
2522
2523 static void print_chain_keys_chain(struct lock_chain *chain)
2524 {
2525         int i;
2526         u64 chain_key = 0;
2527         int class_id;
2528
2529         printk("depth: %u\n", chain->depth);
2530         for (i = 0; i < chain->depth; i++) {
2531                 class_id = chain_hlocks[chain->base + i];
2532                 chain_key = print_chain_key_iteration(class_id + 1, chain_key);
2533
2534                 print_lock_name(lock_classes + class_id);
2535                 printk("\n");
2536         }
2537 }
2538
2539 static void print_collision(struct task_struct *curr,
2540                         struct held_lock *hlock_next,
2541                         struct lock_chain *chain)
2542 {
2543         pr_warn("\n");
2544         pr_warn("============================\n");
2545         pr_warn("WARNING: chain_key collision\n");
2546         print_kernel_ident();
2547         pr_warn("----------------------------\n");
2548         pr_warn("%s/%d: ", current->comm, task_pid_nr(current));
2549         pr_warn("Hash chain already cached but the contents don't match!\n");
2550
2551         pr_warn("Held locks:");
2552         print_chain_keys_held_locks(curr, hlock_next);
2553
2554         pr_warn("Locks in cached chain:");
2555         print_chain_keys_chain(chain);
2556
2557         pr_warn("\nstack backtrace:\n");
2558         dump_stack();
2559 }
2560 #endif
2561
2562 /*
2563  * Checks whether the chain and the current held locks are consistent
2564  * in depth and also in content. If they are not it most likely means
2565  * that there was a collision during the calculation of the chain_key.
2566  * Returns: 0 not passed, 1 passed
2567  */
2568 static int check_no_collision(struct task_struct *curr,
2569                         struct held_lock *hlock,
2570                         struct lock_chain *chain)
2571 {
2572 #ifdef CONFIG_DEBUG_LOCKDEP
2573         int i, j, id;
2574
2575         i = get_first_held_lock(curr, hlock);
2576
2577         if (DEBUG_LOCKS_WARN_ON(chain->depth != curr->lockdep_depth - (i - 1))) {
2578                 print_collision(curr, hlock, chain);
2579                 return 0;
2580         }
2581
2582         for (j = 0; j < chain->depth - 1; j++, i++) {
2583                 id = curr->held_locks[i].class_idx - 1;
2584
2585                 if (DEBUG_LOCKS_WARN_ON(chain_hlocks[chain->base + j] != id)) {
2586                         print_collision(curr, hlock, chain);
2587                         return 0;
2588                 }
2589         }
2590 #endif
2591         return 1;
2592 }
2593
2594 /*
2595  * Given an index that is >= -1, return the index of the next lock chain.
2596  * Return -2 if there is no next lock chain.
2597  */
2598 long lockdep_next_lockchain(long i)
2599 {
2600         i = find_next_bit(lock_chains_in_use, ARRAY_SIZE(lock_chains), i + 1);
2601         return i < ARRAY_SIZE(lock_chains) ? i : -2;
2602 }
2603
2604 unsigned long lock_chain_count(void)
2605 {
2606         return bitmap_weight(lock_chains_in_use, ARRAY_SIZE(lock_chains));
2607 }
2608
2609 /* Must be called with the graph lock held. */
2610 static struct lock_chain *alloc_lock_chain(void)
2611 {
2612         int idx = find_first_zero_bit(lock_chains_in_use,
2613                                       ARRAY_SIZE(lock_chains));
2614
2615         if (unlikely(idx >= ARRAY_SIZE(lock_chains)))
2616                 return NULL;
2617         __set_bit(idx, lock_chains_in_use);
2618         return lock_chains + idx;
2619 }
2620
2621 /*
2622  * Adds a dependency chain into chain hashtable. And must be called with
2623  * graph_lock held.
2624  *
2625  * Return 0 if fail, and graph_lock is released.
2626  * Return 1 if succeed, with graph_lock held.
2627  */
2628 static inline int add_chain_cache(struct task_struct *curr,
2629                                   struct held_lock *hlock,
2630                                   u64 chain_key)
2631 {
2632         struct lock_class *class = hlock_class(hlock);
2633         struct hlist_head *hash_head = chainhashentry(chain_key);
2634         struct lock_chain *chain;
2635         int i, j;
2636
2637         /*
2638          * The caller must hold the graph lock, ensure we've got IRQs
2639          * disabled to make this an IRQ-safe lock.. for recursion reasons
2640          * lockdep won't complain about its own locking errors.
2641          */
2642         if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2643                 return 0;
2644
2645         chain = alloc_lock_chain();
2646         if (!chain) {
2647                 if (!debug_locks_off_graph_unlock())
2648                         return 0;
2649
2650                 print_lockdep_off("BUG: MAX_LOCKDEP_CHAINS too low!");
2651                 dump_stack();
2652                 return 0;
2653         }
2654         chain->chain_key = chain_key;
2655         chain->irq_context = hlock->irq_context;
2656         i = get_first_held_lock(curr, hlock);
2657         chain->depth = curr->lockdep_depth + 1 - i;
2658
2659         BUILD_BUG_ON((1UL << 24) <= ARRAY_SIZE(chain_hlocks));
2660         BUILD_BUG_ON((1UL << 6)  <= ARRAY_SIZE(curr->held_locks));
2661         BUILD_BUG_ON((1UL << 8*sizeof(chain_hlocks[0])) <= ARRAY_SIZE(lock_classes));
2662
2663         if (likely(nr_chain_hlocks + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) {
2664                 chain->base = nr_chain_hlocks;
2665                 for (j = 0; j < chain->depth - 1; j++, i++) {
2666                         int lock_id = curr->held_locks[i].class_idx - 1;
2667                         chain_hlocks[chain->base + j] = lock_id;
2668                 }
2669                 chain_hlocks[chain->base + j] = class - lock_classes;
2670                 nr_chain_hlocks += chain->depth;
2671         } else {
2672                 if (!debug_locks_off_graph_unlock())
2673                         return 0;
2674
2675                 print_lockdep_off("BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!");
2676                 dump_stack();
2677                 return 0;
2678         }
2679
2680         hlist_add_head_rcu(&chain->entry, hash_head);
2681         debug_atomic_inc(chain_lookup_misses);
2682         inc_chains();
2683
2684         return 1;
2685 }
2686
2687 /*
2688  * Look up a dependency chain. Must be called with either the graph lock or
2689  * the RCU read lock held.
2690  */
2691 static inline struct lock_chain *lookup_chain_cache(u64 chain_key)
2692 {
2693         struct hlist_head *hash_head = chainhashentry(chain_key);
2694         struct lock_chain *chain;
2695
2696         hlist_for_each_entry_rcu(chain, hash_head, entry) {
2697                 if (READ_ONCE(chain->chain_key) == chain_key) {
2698                         debug_atomic_inc(chain_lookup_hits);
2699                         return chain;
2700                 }
2701         }
2702         return NULL;
2703 }
2704
2705 /*
2706  * If the key is not present yet in dependency chain cache then
2707  * add it and return 1 - in this case the new dependency chain is
2708  * validated. If the key is already hashed, return 0.
2709  * (On return with 1 graph_lock is held.)
2710  */
2711 static inline int lookup_chain_cache_add(struct task_struct *curr,
2712                                          struct held_lock *hlock,
2713                                          u64 chain_key)
2714 {
2715         struct lock_class *class = hlock_class(hlock);
2716         struct lock_chain *chain = lookup_chain_cache(chain_key);
2717
2718         if (chain) {
2719 cache_hit:
2720                 if (!check_no_collision(curr, hlock, chain))
2721                         return 0;
2722
2723                 if (very_verbose(class)) {
2724                         printk("\nhash chain already cached, key: "
2725                                         "%016Lx tail class: [%px] %s\n",
2726                                         (unsigned long long)chain_key,
2727                                         class->key, class->name);
2728                 }
2729
2730                 return 0;
2731         }
2732
2733         if (very_verbose(class)) {
2734                 printk("\nnew hash chain, key: %016Lx tail class: [%px] %s\n",
2735                         (unsigned long long)chain_key, class->key, class->name);
2736         }
2737
2738         if (!graph_lock())
2739                 return 0;
2740
2741         /*
2742          * We have to walk the chain again locked - to avoid duplicates:
2743          */
2744         chain = lookup_chain_cache(chain_key);
2745         if (chain) {
2746                 graph_unlock();
2747                 goto cache_hit;
2748         }
2749
2750         if (!add_chain_cache(curr, hlock, chain_key))
2751                 return 0;
2752
2753         return 1;
2754 }
2755
2756 static int validate_chain(struct task_struct *curr, struct lockdep_map *lock,
2757                 struct held_lock *hlock, int chain_head, u64 chain_key)
2758 {
2759         /*
2760          * Trylock needs to maintain the stack of held locks, but it
2761          * does not add new dependencies, because trylock can be done
2762          * in any order.
2763          *
2764          * We look up the chain_key and do the O(N^2) check and update of
2765          * the dependencies only if this is a new dependency chain.
2766          * (If lookup_chain_cache_add() return with 1 it acquires
2767          * graph_lock for us)
2768          */
2769         if (!hlock->trylock && hlock->check &&
2770             lookup_chain_cache_add(curr, hlock, chain_key)) {
2771                 /*
2772                  * Check whether last held lock:
2773                  *
2774                  * - is irq-safe, if this lock is irq-unsafe
2775                  * - is softirq-safe, if this lock is hardirq-unsafe
2776                  *
2777                  * And check whether the new lock's dependency graph
2778                  * could lead back to the previous lock.
2779                  *
2780                  * any of these scenarios could lead to a deadlock. If
2781                  * All validations
2782                  */
2783                 int ret = check_deadlock(curr, hlock, lock, hlock->read);
2784
2785                 if (!ret)
2786                         return 0;
2787                 /*
2788                  * Mark recursive read, as we jump over it when
2789                  * building dependencies (just like we jump over
2790                  * trylock entries):
2791                  */
2792                 if (ret == 2)
2793                         hlock->read = 2;
2794                 /*
2795                  * Add dependency only if this lock is not the head
2796                  * of the chain, and if it's not a secondary read-lock:
2797                  */
2798                 if (!chain_head && ret != 2) {
2799                         if (!check_prevs_add(curr, hlock))
2800                                 return 0;
2801                 }
2802
2803                 graph_unlock();
2804         } else {
2805                 /* after lookup_chain_cache_add(): */
2806                 if (unlikely(!debug_locks))
2807                         return 0;
2808         }
2809
2810         return 1;
2811 }
2812 #else
2813 static inline int validate_chain(struct task_struct *curr,
2814                 struct lockdep_map *lock, struct held_lock *hlock,
2815                 int chain_head, u64 chain_key)
2816 {
2817         return 1;
2818 }
2819
2820 static void print_lock_trace(struct lock_trace *trace, unsigned int spaces)
2821 {
2822 }
2823 #endif
2824
2825 /*
2826  * We are building curr_chain_key incrementally, so double-check
2827  * it from scratch, to make sure that it's done correctly:
2828  */
2829 static void check_chain_key(struct task_struct *curr)
2830 {
2831 #ifdef CONFIG_DEBUG_LOCKDEP
2832         struct held_lock *hlock, *prev_hlock = NULL;
2833         unsigned int i;
2834         u64 chain_key = 0;
2835
2836         for (i = 0; i < curr->lockdep_depth; i++) {
2837                 hlock = curr->held_locks + i;
2838                 if (chain_key != hlock->prev_chain_key) {
2839                         debug_locks_off();
2840                         /*
2841                          * We got mighty confused, our chain keys don't match
2842                          * with what we expect, someone trample on our task state?
2843                          */
2844                         WARN(1, "hm#1, depth: %u [%u], %016Lx != %016Lx\n",
2845                                 curr->lockdep_depth, i,
2846                                 (unsigned long long)chain_key,
2847                                 (unsigned long long)hlock->prev_chain_key);
2848                         return;
2849                 }
2850                 /*
2851                  * Whoops ran out of static storage again?
2852                  */
2853                 if (DEBUG_LOCKS_WARN_ON(hlock->class_idx > MAX_LOCKDEP_KEYS))
2854                         return;
2855
2856                 if (prev_hlock && (prev_hlock->irq_context !=
2857                                                         hlock->irq_context))
2858                         chain_key = 0;
2859                 chain_key = iterate_chain_key(chain_key, hlock->class_idx);
2860                 prev_hlock = hlock;
2861         }
2862         if (chain_key != curr->curr_chain_key) {
2863                 debug_locks_off();
2864                 /*
2865                  * More smoking hash instead of calculating it, damn see these
2866                  * numbers float.. I bet that a pink elephant stepped on my memory.
2867                  */
2868                 WARN(1, "hm#2, depth: %u [%u], %016Lx != %016Lx\n",
2869                         curr->lockdep_depth, i,
2870                         (unsigned long long)chain_key,
2871                         (unsigned long long)curr->curr_chain_key);
2872         }
2873 #endif
2874 }
2875
2876 static int mark_lock(struct task_struct *curr, struct held_lock *this,
2877                      enum lock_usage_bit new_bit);
2878
2879 #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
2880
2881
2882 static void
2883 print_usage_bug_scenario(struct held_lock *lock)
2884 {
2885         struct lock_class *class = hlock_class(lock);
2886
2887         printk(" Possible unsafe locking scenario:\n\n");
2888         printk("       CPU0\n");
2889         printk("       ----\n");
2890         printk("  lock(");
2891         __print_lock_name(class);
2892         printk(KERN_CONT ");\n");
2893         printk("  <Interrupt>\n");
2894         printk("    lock(");
2895         __print_lock_name(class);
2896         printk(KERN_CONT ");\n");
2897         printk("\n *** DEADLOCK ***\n\n");
2898 }
2899
2900 static int
2901 print_usage_bug(struct task_struct *curr, struct held_lock *this,
2902                 enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit)
2903 {
2904         if (!debug_locks_off_graph_unlock() || debug_locks_silent)
2905                 return 0;
2906
2907         pr_warn("\n");
2908         pr_warn("================================\n");
2909         pr_warn("WARNING: inconsistent lock state\n");
2910         print_kernel_ident();
2911         pr_warn("--------------------------------\n");
2912
2913         pr_warn("inconsistent {%s} -> {%s} usage.\n",
2914                 usage_str[prev_bit], usage_str[new_bit]);
2915
2916         pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n",
2917                 curr->comm, task_pid_nr(curr),
2918                 trace_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT,
2919                 trace_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT,
2920                 trace_hardirqs_enabled(curr),
2921                 trace_softirqs_enabled(curr));
2922         print_lock(this);
2923
2924         pr_warn("{%s} state was registered at:\n", usage_str[prev_bit]);
2925         print_lock_trace(hlock_class(this)->usage_traces + prev_bit, 1);
2926
2927         print_irqtrace_events(curr);
2928         pr_warn("\nother info that might help us debug this:\n");
2929         print_usage_bug_scenario(this);
2930
2931         lockdep_print_held_locks(curr);
2932
2933         pr_warn("\nstack backtrace:\n");
2934         dump_stack();
2935
2936         return 0;
2937 }
2938
2939 /*
2940  * Print out an error if an invalid bit is set:
2941  */
2942 static inline int
2943 valid_state(struct task_struct *curr, struct held_lock *this,
2944             enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit)
2945 {
2946         if (unlikely(hlock_class(this)->usage_mask & (1 << bad_bit)))
2947                 return print_usage_bug(curr, this, bad_bit, new_bit);
2948         return 1;
2949 }
2950
2951
2952 /*
2953  * print irq inversion bug:
2954  */
2955 static int
2956 print_irq_inversion_bug(struct task_struct *curr,
2957                         struct lock_list *root, struct lock_list *other,
2958                         struct held_lock *this, int forwards,
2959                         const char *irqclass)
2960 {
2961         struct lock_list *entry = other;
2962         struct lock_list *middle = NULL;
2963         int depth;
2964
2965         if (!debug_locks_off_graph_unlock() || debug_locks_silent)
2966                 return 0;
2967
2968         pr_warn("\n");
2969         pr_warn("========================================================\n");
2970         pr_warn("WARNING: possible irq lock inversion dependency detected\n");
2971         print_kernel_ident();
2972         pr_warn("--------------------------------------------------------\n");
2973         pr_warn("%s/%d just changed the state of lock:\n",
2974                 curr->comm, task_pid_nr(curr));
2975         print_lock(this);
2976         if (forwards)
2977                 pr_warn("but this lock took another, %s-unsafe lock in the past:\n", irqclass);
2978         else
2979                 pr_warn("but this lock was taken by another, %s-safe lock in the past:\n", irqclass);
2980         print_lock_name(other->class);
2981         pr_warn("\n\nand interrupts could create inverse lock ordering between them.\n\n");
2982
2983         pr_warn("\nother info that might help us debug this:\n");
2984
2985         /* Find a middle lock (if one exists) */
2986         depth = get_lock_depth(other);
2987         do {
2988                 if (depth == 0 && (entry != root)) {
2989                         pr_warn("lockdep:%s bad path found in chain graph\n", __func__);
2990                         break;
2991                 }
2992                 middle = entry;
2993                 entry = get_lock_parent(entry);
2994                 depth--;
2995         } while (entry && entry != root && (depth >= 0));
2996         if (forwards)
2997                 print_irq_lock_scenario(root, other,
2998                         middle ? middle->class : root->class, other->class);
2999         else
3000                 print_irq_lock_scenario(other, root,
3001                         middle ? middle->class : other->class, root->class);
3002
3003         lockdep_print_held_locks(curr);
3004
3005         pr_warn("\nthe shortest dependencies between 2nd lock and 1st lock:\n");
3006         if (!save_trace(&root->trace))
3007                 return 0;
3008         print_shortest_lock_dependencies(other, root);
3009
3010         pr_warn("\nstack backtrace:\n");
3011         dump_stack();
3012
3013         return 0;
3014 }
3015
3016 /*
3017  * Prove that in the forwards-direction subgraph starting at <this>
3018  * there is no lock matching <mask>:
3019  */
3020 static int
3021 check_usage_forwards(struct task_struct *curr, struct held_lock *this,
3022                      enum lock_usage_bit bit, const char *irqclass)
3023 {
3024         int ret;
3025         struct lock_list root;
3026         struct lock_list *uninitialized_var(target_entry);
3027
3028         root.parent = NULL;
3029         root.class = hlock_class(this);
3030         ret = find_usage_forwards(&root, lock_flag(bit), &target_entry);
3031         if (ret < 0)
3032                 return print_bfs_bug(ret);
3033         if (ret == 1)
3034                 return ret;
3035
3036         return print_irq_inversion_bug(curr, &root, target_entry,
3037                                         this, 1, irqclass);
3038 }
3039
3040 /*
3041  * Prove that in the backwards-direction subgraph starting at <this>
3042  * there is no lock matching <mask>:
3043  */
3044 static int
3045 check_usage_backwards(struct task_struct *curr, struct held_lock *this,
3046                       enum lock_usage_bit bit, const char *irqclass)
3047 {
3048         int ret;
3049         struct lock_list root;
3050         struct lock_list *uninitialized_var(target_entry);
3051
3052         root.parent = NULL;
3053         root.class = hlock_class(this);
3054         ret = find_usage_backwards(&root, lock_flag(bit), &target_entry);
3055         if (ret < 0)
3056                 return print_bfs_bug(ret);
3057         if (ret == 1)
3058                 return ret;
3059
3060         return print_irq_inversion_bug(curr, &root, target_entry,
3061                                         this, 0, irqclass);
3062 }
3063
3064 void print_irqtrace_events(struct task_struct *curr)
3065 {
3066         printk("irq event stamp: %u\n", curr->irq_events);
3067         printk("hardirqs last  enabled at (%u): [<%px>] %pS\n",
3068                 curr->hardirq_enable_event, (void *)curr->hardirq_enable_ip,
3069                 (void *)curr->hardirq_enable_ip);
3070         printk("hardirqs last disabled at (%u): [<%px>] %pS\n",
3071                 curr->hardirq_disable_event, (void *)curr->hardirq_disable_ip,
3072                 (void *)curr->hardirq_disable_ip);
3073         printk("softirqs last  enabled at (%u): [<%px>] %pS\n",
3074                 curr->softirq_enable_event, (void *)curr->softirq_enable_ip,
3075                 (void *)curr->softirq_enable_ip);
3076         printk("softirqs last disabled at (%u): [<%px>] %pS\n",
3077                 curr->softirq_disable_event, (void *)curr->softirq_disable_ip,
3078                 (void *)curr->softirq_disable_ip);
3079 }
3080
3081 static int HARDIRQ_verbose(struct lock_class *class)
3082 {
3083 #if HARDIRQ_VERBOSE
3084         return class_filter(class);
3085 #endif
3086         return 0;
3087 }
3088
3089 static int SOFTIRQ_verbose(struct lock_class *class)
3090 {
3091 #if SOFTIRQ_VERBOSE
3092         return class_filter(class);
3093 #endif
3094         return 0;
3095 }
3096
3097 #define STRICT_READ_CHECKS      1
3098
3099 static int (*state_verbose_f[])(struct lock_class *class) = {
3100 #define LOCKDEP_STATE(__STATE) \
3101         __STATE##_verbose,
3102 #include "lockdep_states.h"
3103 #undef LOCKDEP_STATE
3104 };
3105
3106 static inline int state_verbose(enum lock_usage_bit bit,
3107                                 struct lock_class *class)
3108 {
3109         return state_verbose_f[bit >> LOCK_USAGE_DIR_MASK](class);
3110 }
3111
3112 typedef int (*check_usage_f)(struct task_struct *, struct held_lock *,
3113                              enum lock_usage_bit bit, const char *name);
3114
3115 static int
3116 mark_lock_irq(struct task_struct *curr, struct held_lock *this,
3117                 enum lock_usage_bit new_bit)
3118 {
3119         int excl_bit = exclusive_bit(new_bit);
3120         int read = new_bit & LOCK_USAGE_READ_MASK;
3121         int dir = new_bit & LOCK_USAGE_DIR_MASK;
3122
3123         /*
3124          * mark USED_IN has to look forwards -- to ensure no dependency
3125          * has ENABLED state, which would allow recursion deadlocks.
3126          *
3127          * mark ENABLED has to look backwards -- to ensure no dependee
3128          * has USED_IN state, which, again, would allow  recursion deadlocks.
3129          */
3130         check_usage_f usage = dir ?
3131                 check_usage_backwards : check_usage_forwards;
3132
3133         /*
3134          * Validate that this particular lock does not have conflicting
3135          * usage states.
3136          */
3137         if (!valid_state(curr, this, new_bit, excl_bit))
3138                 return 0;
3139
3140         /*
3141          * Validate that the lock dependencies don't have conflicting usage
3142          * states.
3143          */
3144         if ((!read || !dir || STRICT_READ_CHECKS) &&
3145                         !usage(curr, this, excl_bit, state_name(new_bit & ~LOCK_USAGE_READ_MASK)))
3146                 return 0;
3147
3148         /*
3149          * Check for read in write conflicts
3150          */
3151         if (!read) {
3152                 if (!valid_state(curr, this, new_bit, excl_bit + LOCK_USAGE_READ_MASK))
3153                         return 0;
3154
3155                 if (STRICT_READ_CHECKS &&
3156                         !usage(curr, this, excl_bit + LOCK_USAGE_READ_MASK,
3157                                 state_name(new_bit + LOCK_USAGE_READ_MASK)))
3158                         return 0;
3159         }
3160
3161         if (state_verbose(new_bit, hlock_class(this)))
3162                 return 2;
3163
3164         return 1;
3165 }
3166
3167 /*
3168  * Mark all held locks with a usage bit:
3169  */
3170 static int
3171 mark_held_locks(struct task_struct *curr, enum lock_usage_bit base_bit)
3172 {
3173         struct held_lock *hlock;
3174         int i;
3175
3176         for (i = 0; i < curr->lockdep_depth; i++) {
3177                 enum lock_usage_bit hlock_bit = base_bit;
3178                 hlock = curr->held_locks + i;
3179
3180                 if (hlock->read)
3181                         hlock_bit += LOCK_USAGE_READ_MASK;
3182
3183                 BUG_ON(hlock_bit >= LOCK_USAGE_STATES);
3184
3185                 if (!hlock->check)
3186                         continue;
3187
3188                 if (!mark_lock(curr, hlock, hlock_bit))
3189                         return 0;
3190         }
3191
3192         return 1;
3193 }
3194
3195 /*
3196  * Hardirqs will be enabled:
3197  */
3198 static void __trace_hardirqs_on_caller(unsigned long ip)
3199 {
3200         struct task_struct *curr = current;
3201
3202         /* we'll do an OFF -> ON transition: */
3203         curr->hardirqs_enabled = 1;
3204
3205         /*
3206          * We are going to turn hardirqs on, so set the
3207          * usage bit for all held locks:
3208          */
3209         if (!mark_held_locks(curr, LOCK_ENABLED_HARDIRQ))
3210                 return;
3211         /*
3212          * If we have softirqs enabled, then set the usage
3213          * bit for all held locks. (disabled hardirqs prevented
3214          * this bit from being set before)
3215          */
3216         if (curr->softirqs_enabled)
3217                 if (!mark_held_locks(curr, LOCK_ENABLED_SOFTIRQ))
3218                         return;
3219
3220         curr->hardirq_enable_ip = ip;
3221         curr->hardirq_enable_event = ++curr->irq_events;
3222         debug_atomic_inc(hardirqs_on_events);
3223 }
3224
3225 void lockdep_hardirqs_on(unsigned long ip)
3226 {
3227         if (unlikely(!debug_locks || current->lockdep_recursion))
3228                 return;
3229
3230         if (unlikely(current->hardirqs_enabled)) {
3231                 /*
3232                  * Neither irq nor preemption are disabled here
3233                  * so this is racy by nature but losing one hit
3234                  * in a stat is not a big deal.
3235                  */
3236                 __debug_atomic_inc(redundant_hardirqs_on);
3237                 return;
3238         }
3239
3240         /*
3241          * We're enabling irqs and according to our state above irqs weren't
3242          * already enabled, yet we find the hardware thinks they are in fact
3243          * enabled.. someone messed up their IRQ state tracing.
3244          */
3245         if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
3246                 return;
3247
3248         /*
3249          * See the fine text that goes along with this variable definition.
3250          */
3251         if (DEBUG_LOCKS_WARN_ON(early_boot_irqs_disabled))
3252                 return;
3253
3254         /*
3255          * Can't allow enabling interrupts while in an interrupt handler,
3256          * that's general bad form and such. Recursion, limited stack etc..
3257          */
3258         if (DEBUG_LOCKS_WARN_ON(current->hardirq_context))
3259                 return;
3260
3261         current->lockdep_recursion = 1;
3262         __trace_hardirqs_on_caller(ip);
3263         current->lockdep_recursion = 0;
3264 }
3265 NOKPROBE_SYMBOL(lockdep_hardirqs_on);
3266
3267 /*
3268  * Hardirqs were disabled:
3269  */
3270 void lockdep_hardirqs_off(unsigned long ip)
3271 {
3272         struct task_struct *curr = current;
3273
3274         if (unlikely(!debug_locks || current->lockdep_recursion))
3275                 return;
3276
3277         /*
3278          * So we're supposed to get called after you mask local IRQs, but for
3279          * some reason the hardware doesn't quite think you did a proper job.
3280          */
3281         if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
3282                 return;
3283
3284         if (curr->hardirqs_enabled) {
3285                 /*
3286                  * We have done an ON -> OFF transition:
3287                  */
3288                 curr->hardirqs_enabled = 0;
3289                 curr->hardirq_disable_ip = ip;
3290                 curr->hardirq_disable_event = ++curr->irq_events;
3291                 debug_atomic_inc(hardirqs_off_events);
3292         } else
3293                 debug_atomic_inc(redundant_hardirqs_off);
3294 }
3295 NOKPROBE_SYMBOL(lockdep_hardirqs_off);
3296
3297 /*
3298  * Softirqs will be enabled:
3299  */
3300 void trace_softirqs_on(unsigned long ip)
3301 {
3302         struct task_struct *curr = current;
3303
3304         if (unlikely(!debug_locks || current->lockdep_recursion))
3305                 return;
3306
3307         /*
3308          * We fancy IRQs being disabled here, see softirq.c, avoids
3309          * funny state and nesting things.
3310          */
3311         if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
3312                 return;
3313
3314         if (curr->softirqs_enabled) {
3315                 debug_atomic_inc(redundant_softirqs_on);
3316                 return;
3317         }
3318
3319         current->lockdep_recursion = 1;
3320         /*
3321          * We'll do an OFF -> ON transition:
3322          */
3323         curr->softirqs_enabled = 1;
3324         curr->softirq_enable_ip = ip;
3325         curr->softirq_enable_event = ++curr->irq_events;
3326         debug_atomic_inc(softirqs_on_events);
3327         /*
3328          * We are going to turn softirqs on, so set the
3329          * usage bit for all held locks, if hardirqs are
3330          * enabled too:
3331          */
3332         if (curr->hardirqs_enabled)
3333                 mark_held_locks(curr, LOCK_ENABLED_SOFTIRQ);
3334         current->lockdep_recursion = 0;
3335 }
3336
3337 /*
3338  * Softirqs were disabled:
3339  */
3340 void trace_softirqs_off(unsigned long ip)
3341 {
3342         struct task_struct *curr = current;
3343
3344         if (unlikely(!debug_locks || current->lockdep_recursion))
3345                 return;
3346
3347         /*
3348          * We fancy IRQs being disabled here, see softirq.c
3349          */
3350         if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
3351                 return;
3352
3353         if (curr->softirqs_enabled) {
3354                 /*
3355                  * We have done an ON -> OFF transition:
3356                  */
3357                 curr->softirqs_enabled = 0;
3358                 curr->softirq_disable_ip = ip;
3359                 curr->softirq_disable_event = ++curr->irq_events;
3360                 debug_atomic_inc(softirqs_off_events);
3361                 /*
3362                  * Whoops, we wanted softirqs off, so why aren't they?
3363                  */
3364                 DEBUG_LOCKS_WARN_ON(!softirq_count());
3365         } else
3366                 debug_atomic_inc(redundant_softirqs_off);
3367 }
3368
3369 static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock)
3370 {
3371         /*
3372          * If non-trylock use in a hardirq or softirq context, then
3373          * mark the lock as used in these contexts:
3374          */
3375         if (!hlock->trylock) {
3376                 if (hlock->read) {
3377                         if (curr->hardirq_context)
3378                                 if (!mark_lock(curr, hlock,
3379                                                 LOCK_USED_IN_HARDIRQ_READ))
3380                                         return 0;
3381                         if (curr->softirq_context)
3382                                 if (!mark_lock(curr, hlock,
3383                                                 LOCK_USED_IN_SOFTIRQ_READ))
3384                                         return 0;
3385                 } else {
3386                         if (curr->hardirq_context)
3387                                 if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ))
3388                                         return 0;
3389                         if (curr->softirq_context)
3390                                 if (!mark_lock(curr, hlock, LOCK_USED_IN_SOFTIRQ))
3391                                         return 0;
3392                 }
3393         }
3394         if (!hlock->hardirqs_off) {
3395                 if (hlock->read) {
3396                         if (!mark_lock(curr, hlock,
3397                                         LOCK_ENABLED_HARDIRQ_READ))
3398                                 return 0;
3399                         if (curr->softirqs_enabled)
3400                                 if (!mark_lock(curr, hlock,
3401                                                 LOCK_ENABLED_SOFTIRQ_READ))
3402                                         return 0;
3403                 } else {
3404                         if (!mark_lock(curr, hlock,
3405                                         LOCK_ENABLED_HARDIRQ))
3406                                 return 0;
3407                         if (curr->softirqs_enabled)
3408                                 if (!mark_lock(curr, hlock,
3409                                                 LOCK_ENABLED_SOFTIRQ))
3410                                         return 0;
3411                 }
3412         }
3413
3414         return 1;
3415 }
3416
3417 static inline unsigned int task_irq_context(struct task_struct *task)
3418 {
3419         return 2 * !!task->hardirq_context + !!task->softirq_context;
3420 }
3421
3422 static int separate_irq_context(struct task_struct *curr,
3423                 struct held_lock *hlock)
3424 {
3425         unsigned int depth = curr->lockdep_depth;
3426
3427         /*
3428          * Keep track of points where we cross into an interrupt context:
3429          */
3430         if (depth) {
3431                 struct held_lock *prev_hlock;
3432
3433                 prev_hlock = curr->held_locks + depth-1;
3434                 /*
3435                  * If we cross into another context, reset the
3436                  * hash key (this also prevents the checking and the
3437                  * adding of the dependency to 'prev'):
3438                  */
3439                 if (prev_hlock->irq_context != hlock->irq_context)
3440                         return 1;
3441         }
3442         return 0;
3443 }
3444
3445 #else /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
3446
3447 static inline
3448 int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
3449                 enum lock_usage_bit new_bit)
3450 {
3451         WARN_ON(1); /* Impossible innit? when we don't have TRACE_IRQFLAG */
3452         return 1;
3453 }
3454
3455 static inline int mark_irqflags(struct task_struct *curr,
3456                 struct held_lock *hlock)
3457 {
3458         return 1;
3459 }
3460
3461 static inline unsigned int task_irq_context(struct task_struct *task)
3462 {
3463         return 0;
3464 }
3465
3466 static inline int separate_irq_context(struct task_struct *curr,
3467                 struct held_lock *hlock)
3468 {
3469         return 0;
3470 }
3471
3472 #endif /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
3473
3474 /*
3475  * Mark a lock with a usage bit, and validate the state transition:
3476  */
3477 static int mark_lock(struct task_struct *curr, struct held_lock *this,
3478                              enum lock_usage_bit new_bit)
3479 {
3480         unsigned int new_mask = 1 << new_bit, ret = 1;
3481
3482         /*
3483          * If already set then do not dirty the cacheline,
3484          * nor do any checks:
3485          */
3486         if (likely(hlock_class(this)->usage_mask & new_mask))
3487                 return 1;
3488
3489         if (!graph_lock())
3490                 return 0;
3491         /*
3492          * Make sure we didn't race:
3493          */
3494         if (unlikely(hlock_class(this)->usage_mask & new_mask)) {
3495                 graph_unlock();
3496                 return 1;
3497         }
3498
3499         hlock_class(this)->usage_mask |= new_mask;
3500
3501         if (!save_trace(hlock_class(this)->usage_traces + new_bit))
3502                 return 0;
3503
3504         switch (new_bit) {
3505 #define LOCKDEP_STATE(__STATE)                  \
3506         case LOCK_USED_IN_##__STATE:            \
3507         case LOCK_USED_IN_##__STATE##_READ:     \
3508         case LOCK_ENABLED_##__STATE:            \
3509         case LOCK_ENABLED_##__STATE##_READ:
3510 #include "lockdep_states.h"
3511 #undef LOCKDEP_STATE
3512                 ret = mark_lock_irq(curr, this, new_bit);
3513                 if (!ret)
3514                         return 0;
3515                 break;
3516         case LOCK_USED:
3517                 debug_atomic_dec(nr_unused_locks);
3518                 break;
3519         default:
3520                 if (!debug_locks_off_graph_unlock())
3521                         return 0;
3522                 WARN_ON(1);
3523                 return 0;
3524         }
3525
3526         graph_unlock();
3527
3528         /*
3529          * We must printk outside of the graph_lock:
3530          */
3531         if (ret == 2) {
3532                 printk("\nmarked lock as {%s}:\n", usage_str[new_bit]);
3533                 print_lock(this);
3534                 print_irqtrace_events(curr);
3535                 dump_stack();
3536         }
3537
3538         return ret;
3539 }
3540
3541 /*
3542  * Initialize a lock instance's lock-class mapping info:
3543  */
3544 void lockdep_init_map(struct lockdep_map *lock, const char *name,
3545                       struct lock_class_key *key, int subclass)
3546 {
3547         int i;
3548
3549         for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
3550                 lock->class_cache[i] = NULL;
3551
3552 #ifdef CONFIG_LOCK_STAT
3553         lock->cpu = raw_smp_processor_id();
3554 #endif
3555
3556         /*
3557          * Can't be having no nameless bastards around this place!
3558          */
3559         if (DEBUG_LOCKS_WARN_ON(!name)) {
3560                 lock->name = "NULL";
3561                 return;
3562         }
3563
3564         lock->name = name;
3565
3566         /*
3567          * No key, no joy, we need to hash something.
3568          */
3569         if (DEBUG_LOCKS_WARN_ON(!key))
3570                 return;
3571         /*
3572          * Sanity check, the lock-class key must either have been allocated
3573          * statically or must have been registered as a dynamic key.
3574          */
3575         if (!static_obj(key) && !is_dynamic_key(key)) {
3576                 if (debug_locks)
3577                         printk(KERN_ERR "BUG: key %px has not been registered!\n", key);
3578                 DEBUG_LOCKS_WARN_ON(1);
3579                 return;
3580         }
3581         lock->key = key;
3582
3583         if (unlikely(!debug_locks))
3584                 return;
3585
3586         if (subclass) {
3587                 unsigned long flags;
3588
3589                 if (DEBUG_LOCKS_WARN_ON(current->lockdep_recursion))
3590                         return;
3591
3592                 raw_local_irq_save(flags);
3593                 current->lockdep_recursion = 1;
3594                 register_lock_class(lock, subclass, 1);
3595                 current->lockdep_recursion = 0;
3596                 raw_local_irq_restore(flags);
3597         }
3598 }
3599 EXPORT_SYMBOL_GPL(lockdep_init_map);
3600
3601 struct lock_class_key __lockdep_no_validate__;
3602 EXPORT_SYMBOL_GPL(__lockdep_no_validate__);
3603
3604 static int
3605 print_lock_nested_lock_not_held(struct task_struct *curr,
3606                                 struct held_lock *hlock,
3607                                 unsigned long ip)
3608 {
3609         if (!debug_locks_off())
3610                 return 0;
3611         if (debug_locks_silent)
3612                 return 0;
3613
3614         pr_warn("\n");
3615         pr_warn("==================================\n");
3616         pr_warn("WARNING: Nested lock was not taken\n");
3617         print_kernel_ident();
3618         pr_warn("----------------------------------\n");
3619
3620         pr_warn("%s/%d is trying to lock:\n", curr->comm, task_pid_nr(curr));
3621         print_lock(hlock);
3622
3623         pr_warn("\nbut this task is not holding:\n");
3624         pr_warn("%s\n", hlock->nest_lock->name);
3625
3626         pr_warn("\nstack backtrace:\n");
3627         dump_stack();
3628
3629         pr_warn("\nother info that might help us debug this:\n");
3630         lockdep_print_held_locks(curr);
3631
3632         pr_warn("\nstack backtrace:\n");
3633         dump_stack();
3634
3635         return 0;
3636 }
3637
3638 static int __lock_is_held(const struct lockdep_map *lock, int read);
3639
3640 /*
3641  * This gets called for every mutex_lock*()/spin_lock*() operation.
3642  * We maintain the dependency maps and validate the locking attempt:
3643  *
3644  * The callers must make sure that IRQs are disabled before calling it,
3645  * otherwise we could get an interrupt which would want to take locks,
3646  * which would end up in lockdep again.
3647  */
3648 static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
3649                           int trylock, int read, int check, int hardirqs_off,
3650                           struct lockdep_map *nest_lock, unsigned long ip,
3651                           int references, int pin_count)
3652 {
3653         struct task_struct *curr = current;
3654         struct lock_class *class = NULL;
3655         struct held_lock *hlock;
3656         unsigned int depth;
3657         int chain_head = 0;
3658         int class_idx;
3659         u64 chain_key;
3660
3661         if (unlikely(!debug_locks))
3662                 return 0;
3663
3664         if (!prove_locking || lock->key == &__lockdep_no_validate__)
3665                 check = 0;
3666
3667         if (subclass < NR_LOCKDEP_CACHING_CLASSES)
3668                 class = lock->class_cache[subclass];
3669         /*
3670          * Not cached?
3671          */
3672         if (unlikely(!class)) {
3673                 class = register_lock_class(lock, subclass, 0);
3674                 if (!class)
3675                         return 0;
3676         }
3677
3678         debug_class_ops_inc(class);
3679
3680         if (very_verbose(class)) {
3681                 printk("\nacquire class [%px] %s", class->key, class->name);
3682                 if (class->name_version > 1)
3683                         printk(KERN_CONT "#%d", class->name_version);
3684                 printk(KERN_CONT "\n");
3685                 dump_stack();
3686         }
3687
3688         /*
3689          * Add the lock to the list of currently held locks.
3690          * (we dont increase the depth just yet, up until the
3691          * dependency checks are done)
3692          */
3693         depth = curr->lockdep_depth;
3694         /*
3695          * Ran out of static storage for our per-task lock stack again have we?
3696          */
3697         if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH))
3698                 return 0;
3699
3700         class_idx = class - lock_classes + 1;
3701
3702         if (depth) {
3703                 hlock = curr->held_locks + depth - 1;
3704                 if (hlock->class_idx == class_idx && nest_lock) {
3705                         if (hlock->references) {
3706                                 /*
3707                                  * Check: unsigned int references:12, overflow.
3708                                  */
3709                                 if (DEBUG_LOCKS_WARN_ON(hlock->references == (1 << 12)-1))
3710                                         return 0;
3711
3712                                 hlock->references++;
3713                         } else {
3714                                 hlock->references = 2;
3715                         }
3716
3717                         return 1;
3718                 }
3719         }
3720
3721         hlock = curr->held_locks + depth;
3722         /*
3723          * Plain impossible, we just registered it and checked it weren't no
3724          * NULL like.. I bet this mushroom I ate was good!
3725          */
3726         if (DEBUG_LOCKS_WARN_ON(!class))
3727                 return 0;
3728         hlock->class_idx = class_idx;
3729         hlock->acquire_ip = ip;
3730         hlock->instance = lock;
3731         hlock->nest_lock = nest_lock;
3732         hlock->irq_context = task_irq_context(curr);
3733         hlock->trylock = trylock;
3734         hlock->read = read;
3735         hlock->check = check;
3736         hlock->hardirqs_off = !!hardirqs_off;
3737         hlock->references = references;
3738 #ifdef CONFIG_LOCK_STAT
3739         hlock->waittime_stamp = 0;
3740         hlock->holdtime_stamp = lockstat_clock();
3741 #endif
3742         hlock->pin_count = pin_count;
3743
3744         if (check && !mark_irqflags(curr, hlock))
3745                 return 0;
3746
3747         /* mark it as used: */
3748         if (!mark_lock(curr, hlock, LOCK_USED))
3749                 return 0;
3750
3751         /*
3752          * Calculate the chain hash: it's the combined hash of all the
3753          * lock keys along the dependency chain. We save the hash value
3754          * at every step so that we can get the current hash easily
3755          * after unlock. The chain hash is then used to cache dependency
3756          * results.
3757          *
3758          * The 'key ID' is what is the most compact key value to drive
3759          * the hash, not class->key.
3760          */
3761         /*
3762          * Whoops, we did it again.. ran straight out of our static allocation.
3763          */
3764         if (DEBUG_LOCKS_WARN_ON(class_idx > MAX_LOCKDEP_KEYS))
3765                 return 0;
3766
3767         chain_key = curr->curr_chain_key;
3768         if (!depth) {
3769                 /*
3770                  * How can we have a chain hash when we ain't got no keys?!
3771                  */
3772                 if (DEBUG_LOCKS_WARN_ON(chain_key != 0))
3773                         return 0;
3774                 chain_head = 1;
3775         }
3776
3777         hlock->prev_chain_key = chain_key;
3778         if (separate_irq_context(curr, hlock)) {
3779                 chain_key = 0;
3780                 chain_head = 1;
3781         }
3782         chain_key = iterate_chain_key(chain_key, class_idx);
3783
3784         if (nest_lock && !__lock_is_held(nest_lock, -1))
3785                 return print_lock_nested_lock_not_held(curr, hlock, ip);
3786
3787         if (!debug_locks_silent) {
3788                 WARN_ON_ONCE(depth && !hlock_class(hlock - 1)->key);
3789                 WARN_ON_ONCE(!hlock_class(hlock)->key);
3790         }
3791
3792         if (!validate_chain(curr, lock, hlock, chain_head, chain_key))
3793                 return 0;
3794
3795         curr->curr_chain_key = chain_key;
3796         curr->lockdep_depth++;
3797         check_chain_key(curr);
3798 #ifdef CONFIG_DEBUG_LOCKDEP
3799         if (unlikely(!debug_locks))
3800                 return 0;
3801 #endif
3802         if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) {
3803                 debug_locks_off();
3804                 print_lockdep_off("BUG: MAX_LOCK_DEPTH too low!");
3805                 printk(KERN_DEBUG "depth: %i  max: %lu!\n",
3806                        curr->lockdep_depth, MAX_LOCK_DEPTH);
3807
3808                 lockdep_print_held_locks(current);
3809                 debug_show_all_locks();
3810                 dump_stack();
3811
3812                 return 0;
3813         }
3814
3815         if (unlikely(curr->lockdep_depth > max_lockdep_depth))
3816                 max_lockdep_depth = curr->lockdep_depth;
3817
3818         return 1;
3819 }
3820
3821 static int
3822 print_unlock_imbalance_bug(struct task_struct *curr, struct lockdep_map *lock,
3823                            unsigned long ip)
3824 {
3825         if (!debug_locks_off())
3826                 return 0;
3827         if (debug_locks_silent)
3828                 return 0;
3829
3830         pr_warn("\n");
3831         pr_warn("=====================================\n");
3832         pr_warn("WARNING: bad unlock balance detected!\n");
3833         print_kernel_ident();
3834         pr_warn("-------------------------------------\n");
3835         pr_warn("%s/%d is trying to release lock (",
3836                 curr->comm, task_pid_nr(curr));
3837         print_lockdep_cache(lock);
3838         pr_cont(") at:\n");
3839         print_ip_sym(ip);
3840         pr_warn("but there are no more locks to release!\n");
3841         pr_warn("\nother info that might help us debug this:\n");
3842         lockdep_print_held_locks(curr);
3843
3844         pr_warn("\nstack backtrace:\n");
3845         dump_stack();
3846
3847         return 0;
3848 }
3849
3850 static int match_held_lock(const struct held_lock *hlock,
3851                                         const struct lockdep_map *lock)
3852 {
3853         if (hlock->instance == lock)
3854                 return 1;
3855
3856         if (hlock->references) {
3857                 const struct lock_class *class = lock->class_cache[0];
3858
3859                 if (!class)
3860                         class = look_up_lock_class(lock, 0);
3861
3862                 /*
3863                  * If look_up_lock_class() failed to find a class, we're trying
3864                  * to test if we hold a lock that has never yet been acquired.
3865                  * Clearly if the lock hasn't been acquired _ever_, we're not
3866                  * holding it either, so report failure.
3867                  */
3868                 if (!class)
3869                         return 0;
3870
3871                 /*
3872                  * References, but not a lock we're actually ref-counting?
3873                  * State got messed up, follow the sites that change ->references
3874                  * and try to make sense of it.
3875                  */
3876                 if (DEBUG_LOCKS_WARN_ON(!hlock->nest_lock))
3877                         return 0;
3878
3879                 if (hlock->class_idx == class - lock_classes + 1)
3880                         return 1;
3881         }
3882
3883         return 0;
3884 }
3885
3886 /* @depth must not be zero */
3887 static struct held_lock *find_held_lock(struct task_struct *curr,
3888                                         struct lockdep_map *lock,
3889                                         unsigned int depth, int *idx)
3890 {
3891         struct held_lock *ret, *hlock, *prev_hlock;
3892         int i;
3893
3894         i = depth - 1;
3895         hlock = curr->held_locks + i;
3896         ret = hlock;
3897         if (match_held_lock(hlock, lock))
3898                 goto out;
3899
3900         ret = NULL;
3901         for (i--, prev_hlock = hlock--;
3902              i >= 0;
3903              i--, prev_hlock = hlock--) {
3904                 /*
3905                  * We must not cross into another context:
3906                  */
3907                 if (prev_hlock->irq_context != hlock->irq_context) {
3908                         ret = NULL;
3909                         break;
3910                 }
3911                 if (match_held_lock(hlock, lock)) {
3912                         ret = hlock;
3913                         break;
3914                 }
3915         }
3916
3917 out:
3918         *idx = i;
3919         return ret;
3920 }
3921
3922 static int reacquire_held_locks(struct task_struct *curr, unsigned int depth,
3923                               int idx)
3924 {
3925         struct held_lock *hlock;
3926
3927         if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
3928                 return 0;
3929
3930         for (hlock = curr->held_locks + idx; idx < depth; idx++, hlock++) {
3931                 if (!__lock_acquire(hlock->instance,
3932                                     hlock_class(hlock)->subclass,
3933                                     hlock->trylock,
3934                                     hlock->read, hlock->check,
3935                                     hlock->hardirqs_off,
3936                                     hlock->nest_lock, hlock->acquire_ip,
3937                                     hlock->references, hlock->pin_count))
3938                         return 1;
3939         }
3940         return 0;
3941 }
3942
3943 static int
3944 __lock_set_class(struct lockdep_map *lock, const char *name,
3945                  struct lock_class_key *key, unsigned int subclass,
3946                  unsigned long ip)
3947 {
3948         struct task_struct *curr = current;
3949         struct held_lock *hlock;
3950         struct lock_class *class;
3951         unsigned int depth;
3952         int i;
3953
3954         if (unlikely(!debug_locks))
3955                 return 0;
3956
3957         depth = curr->lockdep_depth;
3958         /*
3959          * This function is about (re)setting the class of a held lock,
3960          * yet we're not actually holding any locks. Naughty user!
3961          */
3962         if (DEBUG_LOCKS_WARN_ON(!depth))
3963                 return 0;
3964
3965         hlock = find_held_lock(curr, lock, depth, &i);
3966         if (!hlock)
3967                 return print_unlock_imbalance_bug(curr, lock, ip);
3968
3969         lockdep_init_map(lock, name, key, 0);
3970         class = register_lock_class(lock, subclass, 0);
3971         hlock->class_idx = class - lock_classes + 1;
3972
3973         curr->lockdep_depth = i;
3974         curr->curr_chain_key = hlock->prev_chain_key;
3975
3976         if (reacquire_held_locks(curr, depth, i))
3977                 return 0;
3978
3979         /*
3980          * I took it apart and put it back together again, except now I have
3981          * these 'spare' parts.. where shall I put them.
3982          */
3983         if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth))
3984                 return 0;
3985         return 1;
3986 }
3987
3988 static int __lock_downgrade(struct lockdep_map *lock, unsigned long ip)
3989 {
3990         struct task_struct *curr = current;
3991         struct held_lock *hlock;
3992         unsigned int depth;
3993         int i;
3994
3995         if (unlikely(!debug_locks))
3996                 return 0;
3997
3998         depth = curr->lockdep_depth;
3999         /*
4000          * This function is about (re)setting the class of a held lock,
4001          * yet we're not actually holding any locks. Naughty user!
4002          */
4003         if (DEBUG_LOCKS_WARN_ON(!depth))
4004                 return 0;
4005
4006         hlock = find_held_lock(curr, lock, depth, &i);
4007         if (!hlock)
4008                 return print_unlock_imbalance_bug(curr, lock, ip);
4009
4010         curr->lockdep_depth = i;
4011         curr->curr_chain_key = hlock->prev_chain_key;
4012
4013         WARN(hlock->read, "downgrading a read lock");
4014         hlock->read = 1;
4015         hlock->acquire_ip = ip;
4016
4017         if (reacquire_held_locks(curr, depth, i))
4018                 return 0;
4019
4020         /*
4021          * I took it apart and put it back together again, except now I have
4022          * these 'spare' parts.. where shall I put them.
4023          */
4024         if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth))
4025                 return 0;
4026         return 1;
4027 }
4028
4029 /*
4030  * Remove the lock to the list of currently held locks - this gets
4031  * called on mutex_unlock()/spin_unlock*() (or on a failed
4032  * mutex_lock_interruptible()).
4033  *
4034  * @nested is an hysterical artifact, needs a tree wide cleanup.
4035  */
4036 static int
4037 __lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
4038 {
4039         struct task_struct *curr = current;
4040         struct held_lock *hlock;
4041         unsigned int depth;
4042         int i;
4043
4044         if (unlikely(!debug_locks))
4045                 return 0;
4046
4047         depth = curr->lockdep_depth;
4048         /*
4049          * So we're all set to release this lock.. wait what lock? We don't
4050          * own any locks, you've been drinking again?
4051          */
4052         if (DEBUG_LOCKS_WARN_ON(depth <= 0))
4053                  return print_unlock_imbalance_bug(curr, lock, ip);
4054
4055         /*
4056          * Check whether the lock exists in the current stack
4057          * of held locks:
4058          */
4059         hlock = find_held_lock(curr, lock, depth, &i);
4060         if (!hlock)
4061                 return print_unlock_imbalance_bug(curr, lock, ip);
4062
4063         if (hlock->instance == lock)
4064                 lock_release_holdtime(hlock);
4065
4066         WARN(hlock->pin_count, "releasing a pinned lock\n");
4067
4068         if (hlock->references) {
4069                 hlock->references--;
4070                 if (hlock->references) {
4071                         /*
4072                          * We had, and after removing one, still have
4073                          * references, the current lock stack is still
4074                          * valid. We're done!
4075                          */
4076                         return 1;
4077                 }
4078         }
4079
4080         /*
4081          * We have the right lock to unlock, 'hlock' points to it.
4082          * Now we remove it from the stack, and add back the other
4083          * entries (if any), recalculating the hash along the way:
4084          */
4085
4086         curr->lockdep_depth = i;
4087         curr->curr_chain_key = hlock->prev_chain_key;
4088
4089         /*
4090          * The most likely case is when the unlock is on the innermost
4091          * lock. In this case, we are done!
4092          */
4093         if (i == depth-1)
4094                 return 1;
4095
4096         if (reacquire_held_locks(curr, depth, i + 1))
4097                 return 0;
4098
4099         /*
4100          * We had N bottles of beer on the wall, we drank one, but now
4101          * there's not N-1 bottles of beer left on the wall...
4102          */
4103         DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth-1);
4104
4105         /*
4106          * Since reacquire_held_locks() would have called check_chain_key()
4107          * indirectly via __lock_acquire(), we don't need to do it again
4108          * on return.
4109          */
4110         return 0;
4111 }
4112
4113 static nokprobe_inline
4114 int __lock_is_held(const struct lockdep_map *lock, int read)
4115 {
4116         struct task_struct *curr = current;
4117         int i;
4118
4119         for (i = 0; i < curr->lockdep_depth; i++) {
4120                 struct held_lock *hlock = curr->held_locks + i;
4121
4122                 if (match_held_lock(hlock, lock)) {
4123                         if (read == -1 || hlock->read == read)
4124                                 return 1;
4125
4126                         return 0;
4127                 }
4128         }
4129
4130         return 0;
4131 }
4132
4133 static struct pin_cookie __lock_pin_lock(struct lockdep_map *lock)
4134 {
4135         struct pin_cookie cookie = NIL_COOKIE;
4136         struct task_struct *curr = current;
4137         int i;
4138
4139         if (unlikely(!debug_locks))
4140                 return cookie;
4141
4142         for (i = 0; i < curr->lockdep_depth; i++) {
4143                 struct held_lock *hlock = curr->held_locks + i;
4144
4145                 if (match_held_lock(hlock, lock)) {
4146                         /*
4147                          * Grab 16bits of randomness; this is sufficient to not
4148                          * be guessable and still allows some pin nesting in
4149                          * our u32 pin_count.
4150                          */
4151                         cookie.val = 1 + (prandom_u32() >> 16);
4152                         hlock->pin_count += cookie.val;
4153                         return cookie;
4154                 }
4155         }
4156
4157         WARN(1, "pinning an unheld lock\n");
4158         return cookie;
4159 }
4160
4161 static void __lock_repin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
4162 {
4163         struct task_struct *curr = current;
4164         int i;
4165
4166         if (unlikely(!debug_locks))
4167                 return;
4168
4169         for (i = 0; i < curr->lockdep_depth; i++) {
4170                 struct held_lock *hlock = curr->held_locks + i;
4171
4172                 if (match_held_lock(hlock, lock)) {
4173                         hlock->pin_count += cookie.val;
4174                         return;
4175                 }
4176         }
4177
4178         WARN(1, "pinning an unheld lock\n");
4179 }
4180
4181 static void __lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
4182 {
4183         struct task_struct *curr = current;
4184         int i;
4185
4186         if (unlikely(!debug_locks))
4187                 return;
4188
4189         for (i = 0; i < curr->lockdep_depth; i++) {
4190                 struct held_lock *hlock = curr->held_locks + i;
4191
4192                 if (match_held_lock(hlock, lock)) {
4193                         if (WARN(!hlock->pin_count, "unpinning an unpinned lock\n"))
4194                                 return;
4195
4196                         hlock->pin_count -= cookie.val;
4197
4198                         if (WARN((int)hlock->pin_count < 0, "pin count corrupted\n"))
4199                                 hlock->pin_count = 0;
4200
4201                         return;
4202                 }
4203         }
4204
4205         WARN(1, "unpinning an unheld lock\n");
4206 }
4207
4208 /*
4209  * Check whether we follow the irq-flags state precisely:
4210  */
4211 static void check_flags(unsigned long flags)
4212 {
4213 #if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP) && \
4214     defined(CONFIG_TRACE_IRQFLAGS)
4215         if (!debug_locks)
4216                 return;
4217
4218         if (irqs_disabled_flags(flags)) {
4219                 if (DEBUG_LOCKS_WARN_ON(current->hardirqs_enabled)) {
4220                         printk("possible reason: unannotated irqs-off.\n");
4221                 }
4222         } else {
4223                 if (DEBUG_LOCKS_WARN_ON(!current->hardirqs_enabled)) {
4224                         printk("possible reason: unannotated irqs-on.\n");
4225                 }
4226         }
4227
4228         /*
4229          * We dont accurately track softirq state in e.g.
4230          * hardirq contexts (such as on 4KSTACKS), so only
4231          * check if not in hardirq contexts:
4232          */
4233         if (!hardirq_count()) {
4234                 if (softirq_count()) {
4235                         /* like the above, but with softirqs */
4236                         DEBUG_LOCKS_WARN_ON(current->softirqs_enabled);
4237                 } else {
4238                         /* lick the above, does it taste good? */
4239                         DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
4240                 }
4241         }
4242
4243         if (!debug_locks)
4244                 print_irqtrace_events(current);
4245 #endif
4246 }
4247
4248 void lock_set_class(struct lockdep_map *lock, const char *name,
4249                     struct lock_class_key *key, unsigned int subclass,
4250                     unsigned long ip)
4251 {
4252         unsigned long flags;
4253
4254         if (unlikely(current->lockdep_recursion))
4255                 return;
4256
4257         raw_local_irq_save(flags);
4258         current->lockdep_recursion = 1;
4259         check_flags(flags);
4260         if (__lock_set_class(lock, name, key, subclass, ip))
4261                 check_chain_key(current);
4262         current->lockdep_recursion = 0;
4263         raw_local_irq_restore(flags);
4264 }
4265 EXPORT_SYMBOL_GPL(lock_set_class);
4266
4267 void lock_downgrade(struct lockdep_map *lock, unsigned long ip)
4268 {
4269         unsigned long flags;
4270
4271         if (unlikely(current->lockdep_recursion))
4272                 return;
4273
4274         raw_local_irq_save(flags);
4275         current->lockdep_recursion = 1;
4276         check_flags(flags);
4277         if (__lock_downgrade(lock, ip))
4278                 check_chain_key(current);
4279         current->lockdep_recursion = 0;
4280         raw_local_irq_restore(flags);
4281 }
4282 EXPORT_SYMBOL_GPL(lock_downgrade);
4283
4284 /*
4285  * We are not always called with irqs disabled - do that here,
4286  * and also avoid lockdep recursion:
4287  */
4288 void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
4289                           int trylock, int read, int check,
4290                           struct lockdep_map *nest_lock, unsigned long ip)
4291 {
4292         unsigned long flags;
4293
4294         if (unlikely(current->lockdep_recursion))
4295                 return;
4296
4297         raw_local_irq_save(flags);
4298         check_flags(flags);
4299
4300         current->lockdep_recursion = 1;
4301         trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
4302         __lock_acquire(lock, subclass, trylock, read, check,
4303                        irqs_disabled_flags(flags), nest_lock, ip, 0, 0);
4304         current->lockdep_recursion = 0;
4305         raw_local_irq_restore(flags);
4306 }
4307 EXPORT_SYMBOL_GPL(lock_acquire);
4308
4309 void lock_release(struct lockdep_map *lock, int nested,
4310                           unsigned long ip)
4311 {
4312         unsigned long flags;
4313
4314         if (unlikely(current->lockdep_recursion))
4315                 return;
4316
4317         raw_local_irq_save(flags);
4318         check_flags(flags);
4319         current->lockdep_recursion = 1;
4320         trace_lock_release(lock, ip);
4321         if (__lock_release(lock, nested, ip))
4322                 check_chain_key(current);
4323         current->lockdep_recursion = 0;
4324         raw_local_irq_restore(flags);
4325 }
4326 EXPORT_SYMBOL_GPL(lock_release);
4327
4328 int lock_is_held_type(const struct lockdep_map *lock, int read)
4329 {
4330         unsigned long flags;
4331         int ret = 0;
4332
4333         if (unlikely(current->lockdep_recursion))
4334                 return 1; /* avoid false negative lockdep_assert_held() */
4335
4336         raw_local_irq_save(flags);
4337         check_flags(flags);
4338
4339         current->lockdep_recursion = 1;
4340         ret = __lock_is_held(lock, read);
4341         current->lockdep_recursion = 0;
4342         raw_local_irq_restore(flags);
4343
4344         return ret;
4345 }
4346 EXPORT_SYMBOL_GPL(lock_is_held_type);
4347 NOKPROBE_SYMBOL(lock_is_held_type);
4348
4349 struct pin_cookie lock_pin_lock(struct lockdep_map *lock)
4350 {
4351         struct pin_cookie cookie = NIL_COOKIE;
4352         unsigned long flags;
4353
4354         if (unlikely(current->lockdep_recursion))
4355                 return cookie;
4356
4357         raw_local_irq_save(flags);
4358         check_flags(flags);
4359
4360         current->lockdep_recursion = 1;
4361         cookie = __lock_pin_lock(lock);
4362         current->lockdep_recursion = 0;
4363         raw_local_irq_restore(flags);
4364
4365         return cookie;
4366 }
4367 EXPORT_SYMBOL_GPL(lock_pin_lock);
4368
4369 void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
4370 {
4371         unsigned long flags;
4372
4373         if (unlikely(current->lockdep_recursion))
4374                 return;
4375
4376         raw_local_irq_save(flags);
4377         check_flags(flags);
4378
4379         current->lockdep_recursion = 1;
4380         __lock_repin_lock(lock, cookie);
4381         current->lockdep_recursion = 0;
4382         raw_local_irq_restore(flags);
4383 }
4384 EXPORT_SYMBOL_GPL(lock_repin_lock);
4385
4386 void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
4387 {
4388         unsigned long flags;
4389
4390         if (unlikely(current->lockdep_recursion))
4391                 return;
4392
4393         raw_local_irq_save(flags);
4394         check_flags(flags);
4395
4396         current->lockdep_recursion = 1;
4397         __lock_unpin_lock(lock, cookie);
4398         current->lockdep_recursion = 0;
4399         raw_local_irq_restore(flags);
4400 }
4401 EXPORT_SYMBOL_GPL(lock_unpin_lock);
4402
4403 #ifdef CONFIG_LOCK_STAT
4404 static int
4405 print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock,
4406                            unsigned long ip)
4407 {
4408         if (!debug_locks_off())
4409                 return 0;
4410         if (debug_locks_silent)
4411                 return 0;
4412
4413         pr_warn("\n");
4414         pr_warn("=================================\n");
4415         pr_warn("WARNING: bad contention detected!\n");
4416         print_kernel_ident();
4417         pr_warn("---------------------------------\n");
4418         pr_warn("%s/%d is trying to contend lock (",
4419                 curr->comm, task_pid_nr(curr));
4420         print_lockdep_cache(lock);
4421         pr_cont(") at:\n");
4422         print_ip_sym(ip);
4423         pr_warn("but there are no locks held!\n");
4424         pr_warn("\nother info that might help us debug this:\n");
4425         lockdep_print_held_locks(curr);
4426
4427         pr_warn("\nstack backtrace:\n");
4428         dump_stack();
4429
4430         return 0;
4431 }
4432
4433 static void
4434 __lock_contended(struct lockdep_map *lock, unsigned long ip)
4435 {
4436         struct task_struct *curr = current;
4437         struct held_lock *hlock;
4438         struct lock_class_stats *stats;
4439         unsigned int depth;
4440         int i, contention_point, contending_point;
4441
4442         depth = curr->lockdep_depth;
4443         /*
4444          * Whee, we contended on this lock, except it seems we're not
4445          * actually trying to acquire anything much at all..
4446          */
4447         if (DEBUG_LOCKS_WARN_ON(!depth))
4448                 return;
4449
4450         hlock = find_held_lock(curr, lock, depth, &i);
4451         if (!hlock) {
4452                 print_lock_contention_bug(curr, lock, ip);
4453                 return;
4454         }
4455
4456         if (hlock->instance != lock)
4457                 return;
4458
4459         hlock->waittime_stamp = lockstat_clock();
4460
4461         contention_point = lock_point(hlock_class(hlock)->contention_point, ip);
4462         contending_point = lock_point(hlock_class(hlock)->contending_point,
4463                                       lock->ip);
4464
4465         stats = get_lock_stats(hlock_class(hlock));
4466         if (contention_point < LOCKSTAT_POINTS)
4467                 stats->contention_point[contention_point]++;
4468         if (contending_point < LOCKSTAT_POINTS)
4469                 stats->contending_point[contending_point]++;
4470         if (lock->cpu != smp_processor_id())
4471                 stats->bounces[bounce_contended + !!hlock->read]++;
4472 }
4473
4474 static void
4475 __lock_acquired(struct lockdep_map *lock, unsigned long ip)
4476 {
4477         struct task_struct *curr = current;
4478         struct held_lock *hlock;
4479         struct lock_class_stats *stats;
4480         unsigned int depth;
4481         u64 now, waittime = 0;
4482         int i, cpu;
4483
4484         depth = curr->lockdep_depth;
4485         /*
4486          * Yay, we acquired ownership of this lock we didn't try to
4487          * acquire, how the heck did that happen?
4488          */
4489         if (DEBUG_LOCKS_WARN_ON(!depth))
4490                 return;
4491
4492         hlock = find_held_lock(curr, lock, depth, &i);
4493         if (!hlock) {
4494                 print_lock_contention_bug(curr, lock, _RET_IP_);
4495                 return;
4496         }
4497
4498         if (hlock->instance != lock)
4499                 return;
4500
4501         cpu = smp_processor_id();
4502         if (hlock->waittime_stamp) {
4503                 now = lockstat_clock();
4504                 waittime = now - hlock->waittime_stamp;
4505                 hlock->holdtime_stamp = now;
4506         }
4507
4508         trace_lock_acquired(lock, ip);
4509
4510         stats = get_lock_stats(hlock_class(hlock));
4511         if (waittime) {
4512                 if (hlock->read)
4513                         lock_time_inc(&stats->read_waittime, waittime);
4514                 else
4515                         lock_time_inc(&stats->write_waittime, waittime);
4516         }
4517         if (lock->cpu != cpu)
4518                 stats->bounces[bounce_acquired + !!hlock->read]++;
4519
4520         lock->cpu = cpu;
4521         lock->ip = ip;
4522 }
4523
4524 void lock_contended(struct lockdep_map *lock, unsigned long ip)
4525 {
4526         unsigned long flags;
4527
4528         if (unlikely(!lock_stat || !debug_locks))
4529                 return;
4530
4531         if (unlikely(current->lockdep_recursion))
4532                 return;
4533
4534         raw_local_irq_save(flags);
4535         check_flags(flags);
4536         current->lockdep_recursion = 1;
4537         trace_lock_contended(lock, ip);
4538         __lock_contended(lock, ip);
4539         current->lockdep_recursion = 0;
4540         raw_local_irq_restore(flags);
4541 }
4542 EXPORT_SYMBOL_GPL(lock_contended);
4543
4544 void lock_acquired(struct lockdep_map *lock, unsigned long ip)
4545 {
4546         unsigned long flags;
4547
4548         if (unlikely(!lock_stat || !debug_locks))
4549                 return;
4550
4551         if (unlikely(current->lockdep_recursion))
4552                 return;
4553
4554         raw_local_irq_save(flags);
4555         check_flags(flags);
4556         current->lockdep_recursion = 1;
4557         __lock_acquired(lock, ip);
4558         current->lockdep_recursion = 0;
4559         raw_local_irq_restore(flags);
4560 }
4561 EXPORT_SYMBOL_GPL(lock_acquired);
4562 #endif
4563
4564 /*
4565  * Used by the testsuite, sanitize the validator state
4566  * after a simulated failure:
4567  */
4568
4569 void lockdep_reset(void)
4570 {
4571         unsigned long flags;
4572         int i;
4573
4574         raw_local_irq_save(flags);
4575         current->curr_chain_key = 0;
4576         current->lockdep_depth = 0;
4577         current->lockdep_recursion = 0;
4578         memset(current->held_locks, 0, MAX_LOCK_DEPTH*sizeof(struct held_lock));
4579         nr_hardirq_chains = 0;
4580         nr_softirq_chains = 0;
4581         nr_process_chains = 0;
4582         debug_locks = 1;
4583         for (i = 0; i < CHAINHASH_SIZE; i++)
4584                 INIT_HLIST_HEAD(chainhash_table + i);
4585         raw_local_irq_restore(flags);
4586 }
4587
4588 /* Remove a class from a lock chain. Must be called with the graph lock held. */
4589 static void remove_class_from_lock_chain(struct pending_free *pf,
4590                                          struct lock_chain *chain,
4591                                          struct lock_class *class)
4592 {
4593 #ifdef CONFIG_PROVE_LOCKING
4594         struct lock_chain *new_chain;
4595         u64 chain_key;
4596         int i;
4597
4598         for (i = chain->base; i < chain->base + chain->depth; i++) {
4599                 if (chain_hlocks[i] != class - lock_classes)
4600                         continue;
4601                 /* The code below leaks one chain_hlock[] entry. */
4602                 if (--chain->depth > 0) {
4603                         memmove(&chain_hlocks[i], &chain_hlocks[i + 1],
4604                                 (chain->base + chain->depth - i) *
4605                                 sizeof(chain_hlocks[0]));
4606                 }
4607                 /*
4608                  * Each lock class occurs at most once in a lock chain so once
4609                  * we found a match we can break out of this loop.
4610                  */
4611                 goto recalc;
4612         }
4613         /* Since the chain has not been modified, return. */
4614         return;
4615
4616 recalc:
4617         chain_key = 0;
4618         for (i = chain->base; i < chain->base + chain->depth; i++)
4619                 chain_key = iterate_chain_key(chain_key, chain_hlocks[i] + 1);
4620         if (chain->depth && chain->chain_key == chain_key)
4621                 return;
4622         /* Overwrite the chain key for concurrent RCU readers. */
4623         WRITE_ONCE(chain->chain_key, chain_key);
4624         /*
4625          * Note: calling hlist_del_rcu() from inside a
4626          * hlist_for_each_entry_rcu() loop is safe.
4627          */
4628         hlist_del_rcu(&chain->entry);
4629         __set_bit(chain - lock_chains, pf->lock_chains_being_freed);
4630         if (chain->depth == 0)
4631                 return;
4632         /*
4633          * If the modified lock chain matches an existing lock chain, drop
4634          * the modified lock chain.
4635          */
4636         if (lookup_chain_cache(chain_key))
4637                 return;
4638         new_chain = alloc_lock_chain();
4639         if (WARN_ON_ONCE(!new_chain)) {
4640                 debug_locks_off();
4641                 return;
4642         }
4643         *new_chain = *chain;
4644         hlist_add_head_rcu(&new_chain->entry, chainhashentry(chain_key));
4645 #endif
4646 }
4647
4648 /* Must be called with the graph lock held. */
4649 static void remove_class_from_lock_chains(struct pending_free *pf,
4650                                           struct lock_class *class)
4651 {
4652         struct lock_chain *chain;
4653         struct hlist_head *head;
4654         int i;
4655
4656         for (i = 0; i < ARRAY_SIZE(chainhash_table); i++) {
4657                 head = chainhash_table + i;
4658                 hlist_for_each_entry_rcu(chain, head, entry) {
4659                         remove_class_from_lock_chain(pf, chain, class);
4660                 }
4661         }
4662 }
4663
4664 /*
4665  * Remove all references to a lock class. The caller must hold the graph lock.
4666  */
4667 static void zap_class(struct pending_free *pf, struct lock_class *class)
4668 {
4669         struct lock_list *entry;
4670         int i;
4671
4672         WARN_ON_ONCE(!class->key);
4673
4674         /*
4675          * Remove all dependencies this lock is
4676          * involved in:
4677          */
4678         for_each_set_bit(i, list_entries_in_use, ARRAY_SIZE(list_entries)) {
4679                 entry = list_entries + i;
4680                 if (entry->class != class && entry->links_to != class)
4681                         continue;
4682                 __clear_bit(i, list_entries_in_use);
4683                 nr_list_entries--;
4684                 list_del_rcu(&entry->entry);
4685         }
4686         if (list_empty(&class->locks_after) &&
4687             list_empty(&class->locks_before)) {
4688                 list_move_tail(&class->lock_entry, &pf->zapped);
4689                 hlist_del_rcu(&class->hash_entry);
4690                 WRITE_ONCE(class->key, NULL);
4691                 WRITE_ONCE(class->name, NULL);
4692                 nr_lock_classes--;
4693         } else {
4694                 WARN_ONCE(true, "%s() failed for class %s\n", __func__,
4695                           class->name);
4696         }
4697
4698         remove_class_from_lock_chains(pf, class);
4699 }
4700
4701 static void reinit_class(struct lock_class *class)
4702 {
4703         void *const p = class;
4704         const unsigned int offset = offsetof(struct lock_class, key);
4705
4706         WARN_ON_ONCE(!class->lock_entry.next);
4707         WARN_ON_ONCE(!list_empty(&class->locks_after));
4708         WARN_ON_ONCE(!list_empty(&class->locks_before));
4709         memset(p + offset, 0, sizeof(*class) - offset);
4710         WARN_ON_ONCE(!class->lock_entry.next);
4711         WARN_ON_ONCE(!list_empty(&class->locks_after));
4712         WARN_ON_ONCE(!list_empty(&class->locks_before));
4713 }
4714
4715 static inline int within(const void *addr, void *start, unsigned long size)
4716 {
4717         return addr >= start && addr < start + size;
4718 }
4719
4720 static bool inside_selftest(void)
4721 {
4722         return current == lockdep_selftest_task_struct;
4723 }
4724
4725 /* The caller must hold the graph lock. */
4726 static struct pending_free *get_pending_free(void)
4727 {
4728         return delayed_free.pf + delayed_free.index;
4729 }
4730
4731 static void free_zapped_rcu(struct rcu_head *cb);
4732
4733 /*
4734  * Schedule an RCU callback if no RCU callback is pending. Must be called with
4735  * the graph lock held.
4736  */
4737 static void call_rcu_zapped(struct pending_free *pf)
4738 {
4739         WARN_ON_ONCE(inside_selftest());
4740
4741         if (list_empty(&pf->zapped))
4742                 return;
4743
4744         if (delayed_free.scheduled)
4745                 return;
4746
4747         delayed_free.scheduled = true;
4748
4749         WARN_ON_ONCE(delayed_free.pf + delayed_free.index != pf);
4750         delayed_free.index ^= 1;
4751
4752         call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
4753 }
4754
4755 /* The caller must hold the graph lock. May be called from RCU context. */
4756 static void __free_zapped_classes(struct pending_free *pf)
4757 {
4758         struct lock_class *class;
4759
4760         check_data_structures();
4761
4762         list_for_each_entry(class, &pf->zapped, lock_entry)
4763                 reinit_class(class);
4764
4765         list_splice_init(&pf->zapped, &free_lock_classes);
4766
4767 #ifdef CONFIG_PROVE_LOCKING
4768         bitmap_andnot(lock_chains_in_use, lock_chains_in_use,
4769                       pf->lock_chains_being_freed, ARRAY_SIZE(lock_chains));
4770         bitmap_clear(pf->lock_chains_being_freed, 0, ARRAY_SIZE(lock_chains));
4771 #endif
4772 }
4773
4774 static void free_zapped_rcu(struct rcu_head *ch)
4775 {
4776         struct pending_free *pf;
4777         unsigned long flags;
4778
4779         if (WARN_ON_ONCE(ch != &delayed_free.rcu_head))
4780                 return;
4781
4782         raw_local_irq_save(flags);
4783         arch_spin_lock(&lockdep_lock);
4784         current->lockdep_recursion = 1;
4785
4786         /* closed head */
4787         pf = delayed_free.pf + (delayed_free.index ^ 1);
4788         __free_zapped_classes(pf);
4789         delayed_free.scheduled = false;
4790
4791         /*
4792          * If there's anything on the open list, close and start a new callback.
4793          */
4794         call_rcu_zapped(delayed_free.pf + delayed_free.index);
4795
4796         current->lockdep_recursion = 0;
4797         arch_spin_unlock(&lockdep_lock);
4798         raw_local_irq_restore(flags);
4799 }
4800
4801 /*
4802  * Remove all lock classes from the class hash table and from the
4803  * all_lock_classes list whose key or name is in the address range [start,
4804  * start + size). Move these lock classes to the zapped_classes list. Must
4805  * be called with the graph lock held.
4806  */
4807 static void __lockdep_free_key_range(struct pending_free *pf, void *start,
4808                                      unsigned long size)
4809 {
4810         struct lock_class *class;
4811         struct hlist_head *head;
4812         int i;
4813
4814         /* Unhash all classes that were created by a module. */
4815         for (i = 0; i < CLASSHASH_SIZE; i++) {
4816                 head = classhash_table + i;
4817                 hlist_for_each_entry_rcu(class, head, hash_entry) {
4818                         if (!within(class->key, start, size) &&
4819                             !within(class->name, start, size))
4820                                 continue;
4821                         zap_class(pf, class);
4822                 }
4823         }
4824 }
4825
4826 /*
4827  * Used in module.c to remove lock classes from memory that is going to be
4828  * freed; and possibly re-used by other modules.
4829  *
4830  * We will have had one synchronize_rcu() before getting here, so we're
4831  * guaranteed nobody will look up these exact classes -- they're properly dead
4832  * but still allocated.
4833  */
4834 static void lockdep_free_key_range_reg(void *start, unsigned long size)
4835 {
4836         struct pending_free *pf;
4837         unsigned long flags;
4838
4839         init_data_structures_once();
4840
4841         raw_local_irq_save(flags);
4842         arch_spin_lock(&lockdep_lock);
4843         current->lockdep_recursion = 1;
4844         pf = get_pending_free();
4845         __lockdep_free_key_range(pf, start, size);
4846         call_rcu_zapped(pf);
4847         current->lockdep_recursion = 0;
4848         arch_spin_unlock(&lockdep_lock);
4849         raw_local_irq_restore(flags);
4850
4851         /*
4852          * Wait for any possible iterators from look_up_lock_class() to pass
4853          * before continuing to free the memory they refer to.
4854          */
4855         synchronize_rcu();
4856 }
4857
4858 /*
4859  * Free all lockdep keys in the range [start, start+size). Does not sleep.
4860  * Ignores debug_locks. Must only be used by the lockdep selftests.
4861  */
4862 static void lockdep_free_key_range_imm(void *start, unsigned long size)
4863 {
4864         struct pending_free *pf = delayed_free.pf;
4865         unsigned long flags;
4866
4867         init_data_structures_once();
4868
4869         raw_local_irq_save(flags);
4870         arch_spin_lock(&lockdep_lock);
4871         __lockdep_free_key_range(pf, start, size);
4872         __free_zapped_classes(pf);
4873         arch_spin_unlock(&lockdep_lock);
4874         raw_local_irq_restore(flags);
4875 }
4876
4877 void lockdep_free_key_range(void *start, unsigned long size)
4878 {
4879         init_data_structures_once();
4880
4881         if (inside_selftest())
4882                 lockdep_free_key_range_imm(start, size);
4883         else
4884                 lockdep_free_key_range_reg(start, size);
4885 }
4886
4887 /*
4888  * Check whether any element of the @lock->class_cache[] array refers to a
4889  * registered lock class. The caller must hold either the graph lock or the
4890  * RCU read lock.
4891  */
4892 static bool lock_class_cache_is_registered(struct lockdep_map *lock)
4893 {
4894         struct lock_class *class;
4895         struct hlist_head *head;
4896         int i, j;
4897
4898         for (i = 0; i < CLASSHASH_SIZE; i++) {
4899                 head = classhash_table + i;
4900                 hlist_for_each_entry_rcu(class, head, hash_entry) {
4901                         for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++)
4902                                 if (lock->class_cache[j] == class)
4903                                         return true;
4904                 }
4905         }
4906         return false;
4907 }
4908
4909 /* The caller must hold the graph lock. Does not sleep. */
4910 static void __lockdep_reset_lock(struct pending_free *pf,
4911                                  struct lockdep_map *lock)
4912 {
4913         struct lock_class *class;
4914         int j;
4915
4916         /*
4917          * Remove all classes this lock might have:
4918          */
4919         for (j = 0; j < MAX_LOCKDEP_SUBCLASSES; j++) {
4920                 /*
4921                  * If the class exists we look it up and zap it:
4922                  */
4923                 class = look_up_lock_class(lock, j);
4924                 if (class)
4925                         zap_class(pf, class);
4926         }
4927         /*
4928          * Debug check: in the end all mapped classes should
4929          * be gone.
4930          */
4931         if (WARN_ON_ONCE(lock_class_cache_is_registered(lock)))
4932                 debug_locks_off();
4933 }
4934
4935 /*
4936  * Remove all information lockdep has about a lock if debug_locks == 1. Free
4937  * released data structures from RCU context.
4938  */
4939 static void lockdep_reset_lock_reg(struct lockdep_map *lock)
4940 {
4941         struct pending_free *pf;
4942         unsigned long flags;
4943         int locked;
4944
4945         raw_local_irq_save(flags);
4946         locked = graph_lock();
4947         if (!locked)
4948                 goto out_irq;
4949
4950         pf = get_pending_free();
4951         __lockdep_reset_lock(pf, lock);
4952         call_rcu_zapped(pf);
4953
4954         graph_unlock();
4955 out_irq:
4956         raw_local_irq_restore(flags);
4957 }
4958
4959 /*
4960  * Reset a lock. Does not sleep. Ignores debug_locks. Must only be used by the
4961  * lockdep selftests.
4962  */
4963 static void lockdep_reset_lock_imm(struct lockdep_map *lock)
4964 {
4965         struct pending_free *pf = delayed_free.pf;
4966         unsigned long flags;
4967
4968         raw_local_irq_save(flags);
4969         arch_spin_lock(&lockdep_lock);
4970         __lockdep_reset_lock(pf, lock);
4971         __free_zapped_classes(pf);
4972         arch_spin_unlock(&lockdep_lock);
4973         raw_local_irq_restore(flags);
4974 }
4975
4976 void lockdep_reset_lock(struct lockdep_map *lock)
4977 {
4978         init_data_structures_once();
4979
4980         if (inside_selftest())
4981                 lockdep_reset_lock_imm(lock);
4982         else
4983                 lockdep_reset_lock_reg(lock);
4984 }
4985
4986 /* Unregister a dynamically allocated key. */
4987 void lockdep_unregister_key(struct lock_class_key *key)
4988 {
4989         struct hlist_head *hash_head = keyhashentry(key);
4990         struct lock_class_key *k;
4991         struct pending_free *pf;
4992         unsigned long flags;
4993         bool found = false;
4994
4995         might_sleep();
4996
4997         if (WARN_ON_ONCE(static_obj(key)))
4998                 return;
4999
5000         raw_local_irq_save(flags);
5001         if (!graph_lock())
5002                 goto out_irq;
5003
5004         pf = get_pending_free();
5005         hlist_for_each_entry_rcu(k, hash_head, hash_entry) {
5006                 if (k == key) {
5007                         hlist_del_rcu(&k->hash_entry);
5008                         found = true;
5009                         break;
5010                 }
5011         }
5012         WARN_ON_ONCE(!found);
5013         __lockdep_free_key_range(pf, key, 1);
5014         call_rcu_zapped(pf);
5015         graph_unlock();
5016 out_irq:
5017         raw_local_irq_restore(flags);
5018
5019         /* Wait until is_dynamic_key() has finished accessing k->hash_entry. */
5020         synchronize_rcu();
5021 }
5022 EXPORT_SYMBOL_GPL(lockdep_unregister_key);
5023
5024 void __init lockdep_init(void)
5025 {
5026         printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n");
5027
5028         printk("... MAX_LOCKDEP_SUBCLASSES:  %lu\n", MAX_LOCKDEP_SUBCLASSES);
5029         printk("... MAX_LOCK_DEPTH:          %lu\n", MAX_LOCK_DEPTH);
5030         printk("... MAX_LOCKDEP_KEYS:        %lu\n", MAX_LOCKDEP_KEYS);
5031         printk("... CLASSHASH_SIZE:          %lu\n", CLASSHASH_SIZE);
5032         printk("... MAX_LOCKDEP_ENTRIES:     %lu\n", MAX_LOCKDEP_ENTRIES);
5033         printk("... MAX_LOCKDEP_CHAINS:      %lu\n", MAX_LOCKDEP_CHAINS);
5034         printk("... CHAINHASH_SIZE:          %lu\n", CHAINHASH_SIZE);
5035
5036         printk(" memory used by lock dependency info: %zu kB\n",
5037                (sizeof(lock_classes) +
5038                 sizeof(classhash_table) +
5039                 sizeof(list_entries) +
5040                 sizeof(list_entries_in_use) +
5041                 sizeof(chainhash_table) +
5042                 sizeof(delayed_free)
5043 #ifdef CONFIG_PROVE_LOCKING
5044                 + sizeof(lock_cq)
5045                 + sizeof(lock_chains)
5046                 + sizeof(lock_chains_in_use)
5047                 + sizeof(chain_hlocks)
5048 #endif
5049                 ) / 1024
5050                 );
5051
5052         printk(" per task-struct memory footprint: %zu bytes\n",
5053                sizeof(((struct task_struct *)NULL)->held_locks));
5054 }
5055
5056 static void
5057 print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
5058                      const void *mem_to, struct held_lock *hlock)
5059 {
5060         if (!debug_locks_off())
5061                 return;
5062         if (debug_locks_silent)
5063                 return;
5064
5065         pr_warn("\n");
5066         pr_warn("=========================\n");
5067         pr_warn("WARNING: held lock freed!\n");
5068         print_kernel_ident();
5069         pr_warn("-------------------------\n");
5070         pr_warn("%s/%d is freeing memory %px-%px, with a lock still held there!\n",
5071                 curr->comm, task_pid_nr(curr), mem_from, mem_to-1);
5072         print_lock(hlock);
5073         lockdep_print_held_locks(curr);
5074
5075         pr_warn("\nstack backtrace:\n");
5076         dump_stack();
5077 }
5078
5079 static inline int not_in_range(const void* mem_from, unsigned long mem_len,
5080                                 const void* lock_from, unsigned long lock_len)
5081 {
5082         return lock_from + lock_len <= mem_from ||
5083                 mem_from + mem_len <= lock_from;
5084 }
5085
5086 /*
5087  * Called when kernel memory is freed (or unmapped), or if a lock
5088  * is destroyed or reinitialized - this code checks whether there is
5089  * any held lock in the memory range of <from> to <to>:
5090  */
5091 void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
5092 {
5093         struct task_struct *curr = current;
5094         struct held_lock *hlock;
5095         unsigned long flags;
5096         int i;
5097
5098         if (unlikely(!debug_locks))
5099                 return;
5100
5101         raw_local_irq_save(flags);
5102         for (i = 0; i < curr->lockdep_depth; i++) {
5103                 hlock = curr->held_locks + i;
5104
5105                 if (not_in_range(mem_from, mem_len, hlock->instance,
5106                                         sizeof(*hlock->instance)))
5107                         continue;
5108
5109                 print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock);
5110                 break;
5111         }
5112         raw_local_irq_restore(flags);
5113 }
5114 EXPORT_SYMBOL_GPL(debug_check_no_locks_freed);
5115
5116 static void print_held_locks_bug(void)
5117 {
5118         if (!debug_locks_off())
5119                 return;
5120         if (debug_locks_silent)
5121                 return;
5122
5123         pr_warn("\n");
5124         pr_warn("====================================\n");
5125         pr_warn("WARNING: %s/%d still has locks held!\n",
5126                current->comm, task_pid_nr(current));
5127         print_kernel_ident();
5128         pr_warn("------------------------------------\n");
5129         lockdep_print_held_locks(current);
5130         pr_warn("\nstack backtrace:\n");
5131         dump_stack();
5132 }
5133
5134 void debug_check_no_locks_held(void)
5135 {
5136         if (unlikely(current->lockdep_depth > 0))
5137                 print_held_locks_bug();
5138 }
5139 EXPORT_SYMBOL_GPL(debug_check_no_locks_held);
5140
5141 #ifdef __KERNEL__
5142 void debug_show_all_locks(void)
5143 {
5144         struct task_struct *g, *p;
5145
5146         if (unlikely(!debug_locks)) {
5147                 pr_warn("INFO: lockdep is turned off.\n");
5148                 return;
5149         }
5150         pr_warn("\nShowing all locks held in the system:\n");
5151
5152         rcu_read_lock();
5153         for_each_process_thread(g, p) {
5154                 if (!p->lockdep_depth)
5155                         continue;
5156                 lockdep_print_held_locks(p);
5157                 touch_nmi_watchdog();
5158                 touch_all_softlockup_watchdogs();
5159         }
5160         rcu_read_unlock();
5161
5162         pr_warn("\n");
5163         pr_warn("=============================================\n\n");
5164 }
5165 EXPORT_SYMBOL_GPL(debug_show_all_locks);
5166 #endif
5167
5168 /*
5169  * Careful: only use this function if you are sure that
5170  * the task cannot run in parallel!
5171  */
5172 void debug_show_held_locks(struct task_struct *task)
5173 {
5174         if (unlikely(!debug_locks)) {
5175                 printk("INFO: lockdep is turned off.\n");
5176                 return;
5177         }
5178         lockdep_print_held_locks(task);
5179 }
5180 EXPORT_SYMBOL_GPL(debug_show_held_locks);
5181
5182 asmlinkage __visible void lockdep_sys_exit(void)
5183 {
5184         struct task_struct *curr = current;
5185
5186         if (unlikely(curr->lockdep_depth)) {
5187                 if (!debug_locks_off())
5188                         return;
5189                 pr_warn("\n");
5190                 pr_warn("================================================\n");
5191                 pr_warn("WARNING: lock held when returning to user space!\n");
5192                 print_kernel_ident();
5193                 pr_warn("------------------------------------------------\n");
5194                 pr_warn("%s/%d is leaving the kernel with locks still held!\n",
5195                                 curr->comm, curr->pid);
5196                 lockdep_print_held_locks(curr);
5197         }
5198
5199         /*
5200          * The lock history for each syscall should be independent. So wipe the
5201          * slate clean on return to userspace.
5202          */
5203         lockdep_invariant_state(false);
5204 }
5205
5206 void lockdep_rcu_suspicious(const char *file, const int line, const char *s)
5207 {
5208         struct task_struct *curr = current;
5209
5210         /* Note: the following can be executed concurrently, so be careful. */
5211         pr_warn("\n");
5212         pr_warn("=============================\n");
5213         pr_warn("WARNING: suspicious RCU usage\n");
5214         print_kernel_ident();
5215         pr_warn("-----------------------------\n");
5216         pr_warn("%s:%d %s!\n", file, line, s);
5217         pr_warn("\nother info that might help us debug this:\n\n");
5218         pr_warn("\n%srcu_scheduler_active = %d, debug_locks = %d\n",
5219                !rcu_lockdep_current_cpu_online()
5220                         ? "RCU used illegally from offline CPU!\n"
5221                         : !rcu_is_watching()
5222                                 ? "RCU used illegally from idle CPU!\n"
5223                                 : "",
5224                rcu_scheduler_active, debug_locks);
5225
5226         /*
5227          * If a CPU is in the RCU-free window in idle (ie: in the section
5228          * between rcu_idle_enter() and rcu_idle_exit(), then RCU
5229          * considers that CPU to be in an "extended quiescent state",
5230          * which means that RCU will be completely ignoring that CPU.
5231          * Therefore, rcu_read_lock() and friends have absolutely no
5232          * effect on a CPU running in that state. In other words, even if
5233          * such an RCU-idle CPU has called rcu_read_lock(), RCU might well
5234          * delete data structures out from under it.  RCU really has no
5235          * choice here: we need to keep an RCU-free window in idle where
5236          * the CPU may possibly enter into low power mode. This way we can
5237          * notice an extended quiescent state to other CPUs that started a grace
5238          * period. Otherwise we would delay any grace period as long as we run
5239          * in the idle task.
5240          *
5241          * So complain bitterly if someone does call rcu_read_lock(),
5242          * rcu_read_lock_bh() and so on from extended quiescent states.
5243          */
5244         if (!rcu_is_watching())
5245                 pr_warn("RCU used illegally from extended quiescent state!\n");
5246
5247         lockdep_print_held_locks(curr);
5248         pr_warn("\nstack backtrace:\n");
5249         dump_stack();
5250 }
5251 EXPORT_SYMBOL_GPL(lockdep_rcu_suspicious);