Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux...
[sfrench/cifs-2.6.git] / kernel / lockdep.c
1 /*
2  * kernel/lockdep.c
3  *
4  * Runtime locking correctness validator
5  *
6  * Started by Ingo Molnar:
7  *
8  *  Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
9  *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
10  *
11  * this code maps all the lock dependencies as they occur in a live kernel
12  * and will warn about the following classes of locking bugs:
13  *
14  * - lock inversion scenarios
15  * - circular lock dependencies
16  * - hardirq/softirq safe/unsafe locking bugs
17  *
18  * Bugs are reported even if the current locking scenario does not cause
19  * any deadlock at this point.
20  *
21  * I.e. if anytime in the past two locks were taken in a different order,
22  * even if it happened for another task, even if those were different
23  * locks (but of the same class as this lock), this code will detect it.
24  *
25  * Thanks to Arjan van de Ven for coming up with the initial idea of
26  * mapping lock dependencies runtime.
27  */
28 #define DISABLE_BRANCH_PROFILING
29 #include <linux/mutex.h>
30 #include <linux/sched.h>
31 #include <linux/delay.h>
32 #include <linux/module.h>
33 #include <linux/proc_fs.h>
34 #include <linux/seq_file.h>
35 #include <linux/spinlock.h>
36 #include <linux/kallsyms.h>
37 #include <linux/interrupt.h>
38 #include <linux/stacktrace.h>
39 #include <linux/debug_locks.h>
40 #include <linux/irqflags.h>
41 #include <linux/utsname.h>
42 #include <linux/hash.h>
43 #include <linux/ftrace.h>
44 #include <linux/stringify.h>
45 #include <trace/lockdep.h>
46
47 #include <asm/sections.h>
48
49 #include "lockdep_internals.h"
50
51 #ifdef CONFIG_PROVE_LOCKING
52 int prove_locking = 1;
53 module_param(prove_locking, int, 0644);
54 #else
55 #define prove_locking 0
56 #endif
57
58 #ifdef CONFIG_LOCK_STAT
59 int lock_stat = 1;
60 module_param(lock_stat, int, 0644);
61 #else
62 #define lock_stat 0
63 #endif
64
65 /*
66  * lockdep_lock: protects the lockdep graph, the hashes and the
67  *               class/list/hash allocators.
68  *
69  * This is one of the rare exceptions where it's justified
70  * to use a raw spinlock - we really dont want the spinlock
71  * code to recurse back into the lockdep code...
72  */
73 static raw_spinlock_t lockdep_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
74
75 static int graph_lock(void)
76 {
77         __raw_spin_lock(&lockdep_lock);
78         /*
79          * Make sure that if another CPU detected a bug while
80          * walking the graph we dont change it (while the other
81          * CPU is busy printing out stuff with the graph lock
82          * dropped already)
83          */
84         if (!debug_locks) {
85                 __raw_spin_unlock(&lockdep_lock);
86                 return 0;
87         }
88         /* prevent any recursions within lockdep from causing deadlocks */
89         current->lockdep_recursion++;
90         return 1;
91 }
92
93 static inline int graph_unlock(void)
94 {
95         if (debug_locks && !__raw_spin_is_locked(&lockdep_lock))
96                 return DEBUG_LOCKS_WARN_ON(1);
97
98         current->lockdep_recursion--;
99         __raw_spin_unlock(&lockdep_lock);
100         return 0;
101 }
102
103 /*
104  * Turn lock debugging off and return with 0 if it was off already,
105  * and also release the graph lock:
106  */
107 static inline int debug_locks_off_graph_unlock(void)
108 {
109         int ret = debug_locks_off();
110
111         __raw_spin_unlock(&lockdep_lock);
112
113         return ret;
114 }
115
116 static int lockdep_initialized;
117
118 unsigned long nr_list_entries;
119 static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
120
121 /*
122  * All data structures here are protected by the global debug_lock.
123  *
124  * Mutex key structs only get allocated, once during bootup, and never
125  * get freed - this significantly simplifies the debugging code.
126  */
127 unsigned long nr_lock_classes;
128 static struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
129
130 static inline struct lock_class *hlock_class(struct held_lock *hlock)
131 {
132         if (!hlock->class_idx) {
133                 DEBUG_LOCKS_WARN_ON(1);
134                 return NULL;
135         }
136         return lock_classes + hlock->class_idx - 1;
137 }
138
139 #ifdef CONFIG_LOCK_STAT
140 static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats);
141
142 static int lock_point(unsigned long points[], unsigned long ip)
143 {
144         int i;
145
146         for (i = 0; i < LOCKSTAT_POINTS; i++) {
147                 if (points[i] == 0) {
148                         points[i] = ip;
149                         break;
150                 }
151                 if (points[i] == ip)
152                         break;
153         }
154
155         return i;
156 }
157
158 static void lock_time_inc(struct lock_time *lt, s64 time)
159 {
160         if (time > lt->max)
161                 lt->max = time;
162
163         if (time < lt->min || !lt->min)
164                 lt->min = time;
165
166         lt->total += time;
167         lt->nr++;
168 }
169
170 static inline void lock_time_add(struct lock_time *src, struct lock_time *dst)
171 {
172         dst->min += src->min;
173         dst->max += src->max;
174         dst->total += src->total;
175         dst->nr += src->nr;
176 }
177
178 struct lock_class_stats lock_stats(struct lock_class *class)
179 {
180         struct lock_class_stats stats;
181         int cpu, i;
182
183         memset(&stats, 0, sizeof(struct lock_class_stats));
184         for_each_possible_cpu(cpu) {
185                 struct lock_class_stats *pcs =
186                         &per_cpu(lock_stats, cpu)[class - lock_classes];
187
188                 for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++)
189                         stats.contention_point[i] += pcs->contention_point[i];
190
191                 for (i = 0; i < ARRAY_SIZE(stats.contending_point); i++)
192                         stats.contending_point[i] += pcs->contending_point[i];
193
194                 lock_time_add(&pcs->read_waittime, &stats.read_waittime);
195                 lock_time_add(&pcs->write_waittime, &stats.write_waittime);
196
197                 lock_time_add(&pcs->read_holdtime, &stats.read_holdtime);
198                 lock_time_add(&pcs->write_holdtime, &stats.write_holdtime);
199
200                 for (i = 0; i < ARRAY_SIZE(stats.bounces); i++)
201                         stats.bounces[i] += pcs->bounces[i];
202         }
203
204         return stats;
205 }
206
207 void clear_lock_stats(struct lock_class *class)
208 {
209         int cpu;
210
211         for_each_possible_cpu(cpu) {
212                 struct lock_class_stats *cpu_stats =
213                         &per_cpu(lock_stats, cpu)[class - lock_classes];
214
215                 memset(cpu_stats, 0, sizeof(struct lock_class_stats));
216         }
217         memset(class->contention_point, 0, sizeof(class->contention_point));
218         memset(class->contending_point, 0, sizeof(class->contending_point));
219 }
220
221 static struct lock_class_stats *get_lock_stats(struct lock_class *class)
222 {
223         return &get_cpu_var(lock_stats)[class - lock_classes];
224 }
225
226 static void put_lock_stats(struct lock_class_stats *stats)
227 {
228         put_cpu_var(lock_stats);
229 }
230
231 static void lock_release_holdtime(struct held_lock *hlock)
232 {
233         struct lock_class_stats *stats;
234         s64 holdtime;
235
236         if (!lock_stat)
237                 return;
238
239         holdtime = sched_clock() - hlock->holdtime_stamp;
240
241         stats = get_lock_stats(hlock_class(hlock));
242         if (hlock->read)
243                 lock_time_inc(&stats->read_holdtime, holdtime);
244         else
245                 lock_time_inc(&stats->write_holdtime, holdtime);
246         put_lock_stats(stats);
247 }
248 #else
249 static inline void lock_release_holdtime(struct held_lock *hlock)
250 {
251 }
252 #endif
253
254 /*
255  * We keep a global list of all lock classes. The list only grows,
256  * never shrinks. The list is only accessed with the lockdep
257  * spinlock lock held.
258  */
259 LIST_HEAD(all_lock_classes);
260
261 /*
262  * The lockdep classes are in a hash-table as well, for fast lookup:
263  */
264 #define CLASSHASH_BITS          (MAX_LOCKDEP_KEYS_BITS - 1)
265 #define CLASSHASH_SIZE          (1UL << CLASSHASH_BITS)
266 #define __classhashfn(key)      hash_long((unsigned long)key, CLASSHASH_BITS)
267 #define classhashentry(key)     (classhash_table + __classhashfn((key)))
268
269 static struct list_head classhash_table[CLASSHASH_SIZE];
270
271 /*
272  * We put the lock dependency chains into a hash-table as well, to cache
273  * their existence:
274  */
275 #define CHAINHASH_BITS          (MAX_LOCKDEP_CHAINS_BITS-1)
276 #define CHAINHASH_SIZE          (1UL << CHAINHASH_BITS)
277 #define __chainhashfn(chain)    hash_long(chain, CHAINHASH_BITS)
278 #define chainhashentry(chain)   (chainhash_table + __chainhashfn((chain)))
279
280 static struct list_head chainhash_table[CHAINHASH_SIZE];
281
282 /*
283  * The hash key of the lock dependency chains is a hash itself too:
284  * it's a hash of all locks taken up to that lock, including that lock.
285  * It's a 64-bit hash, because it's important for the keys to be
286  * unique.
287  */
288 #define iterate_chain_key(key1, key2) \
289         (((key1) << MAX_LOCKDEP_KEYS_BITS) ^ \
290         ((key1) >> (64-MAX_LOCKDEP_KEYS_BITS)) ^ \
291         (key2))
292
293 void lockdep_off(void)
294 {
295         current->lockdep_recursion++;
296 }
297 EXPORT_SYMBOL(lockdep_off);
298
299 void lockdep_on(void)
300 {
301         current->lockdep_recursion--;
302 }
303 EXPORT_SYMBOL(lockdep_on);
304
305 /*
306  * Debugging switches:
307  */
308
309 #define VERBOSE                 0
310 #define VERY_VERBOSE            0
311
312 #if VERBOSE
313 # define HARDIRQ_VERBOSE        1
314 # define SOFTIRQ_VERBOSE        1
315 # define RECLAIM_VERBOSE        1
316 #else
317 # define HARDIRQ_VERBOSE        0
318 # define SOFTIRQ_VERBOSE        0
319 # define RECLAIM_VERBOSE        0
320 #endif
321
322 #if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE || RECLAIM_VERBOSE
323 /*
324  * Quick filtering for interesting events:
325  */
326 static int class_filter(struct lock_class *class)
327 {
328 #if 0
329         /* Example */
330         if (class->name_version == 1 &&
331                         !strcmp(class->name, "lockname"))
332                 return 1;
333         if (class->name_version == 1 &&
334                         !strcmp(class->name, "&struct->lockfield"))
335                 return 1;
336 #endif
337         /* Filter everything else. 1 would be to allow everything else */
338         return 0;
339 }
340 #endif
341
342 static int verbose(struct lock_class *class)
343 {
344 #if VERBOSE
345         return class_filter(class);
346 #endif
347         return 0;
348 }
349
350 /*
351  * Stack-trace: tightly packed array of stack backtrace
352  * addresses. Protected by the graph_lock.
353  */
354 unsigned long nr_stack_trace_entries;
355 static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES];
356
357 static int save_trace(struct stack_trace *trace)
358 {
359         trace->nr_entries = 0;
360         trace->max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries;
361         trace->entries = stack_trace + nr_stack_trace_entries;
362
363         trace->skip = 3;
364
365         save_stack_trace(trace);
366
367         trace->max_entries = trace->nr_entries;
368
369         nr_stack_trace_entries += trace->nr_entries;
370
371         if (nr_stack_trace_entries == MAX_STACK_TRACE_ENTRIES) {
372                 if (!debug_locks_off_graph_unlock())
373                         return 0;
374
375                 printk("BUG: MAX_STACK_TRACE_ENTRIES too low!\n");
376                 printk("turning off the locking correctness validator.\n");
377                 dump_stack();
378
379                 return 0;
380         }
381
382         return 1;
383 }
384
385 unsigned int nr_hardirq_chains;
386 unsigned int nr_softirq_chains;
387 unsigned int nr_process_chains;
388 unsigned int max_lockdep_depth;
389 unsigned int max_recursion_depth;
390
391 static unsigned int lockdep_dependency_gen_id;
392
393 static bool lockdep_dependency_visit(struct lock_class *source,
394                                      unsigned int depth)
395 {
396         if (!depth)
397                 lockdep_dependency_gen_id++;
398         if (source->dep_gen_id == lockdep_dependency_gen_id)
399                 return true;
400         source->dep_gen_id = lockdep_dependency_gen_id;
401         return false;
402 }
403
404 #ifdef CONFIG_DEBUG_LOCKDEP
405 /*
406  * We cannot printk in early bootup code. Not even early_printk()
407  * might work. So we mark any initialization errors and printk
408  * about it later on, in lockdep_info().
409  */
410 static int lockdep_init_error;
411 static unsigned long lockdep_init_trace_data[20];
412 static struct stack_trace lockdep_init_trace = {
413         .max_entries = ARRAY_SIZE(lockdep_init_trace_data),
414         .entries = lockdep_init_trace_data,
415 };
416
417 /*
418  * Various lockdep statistics:
419  */
420 atomic_t chain_lookup_hits;
421 atomic_t chain_lookup_misses;
422 atomic_t hardirqs_on_events;
423 atomic_t hardirqs_off_events;
424 atomic_t redundant_hardirqs_on;
425 atomic_t redundant_hardirqs_off;
426 atomic_t softirqs_on_events;
427 atomic_t softirqs_off_events;
428 atomic_t redundant_softirqs_on;
429 atomic_t redundant_softirqs_off;
430 atomic_t nr_unused_locks;
431 atomic_t nr_cyclic_checks;
432 atomic_t nr_cyclic_check_recursions;
433 atomic_t nr_find_usage_forwards_checks;
434 atomic_t nr_find_usage_forwards_recursions;
435 atomic_t nr_find_usage_backwards_checks;
436 atomic_t nr_find_usage_backwards_recursions;
437 #endif
438
439 /*
440  * Locking printouts:
441  */
442
443 #define __USAGE(__STATE)                                                \
444         [LOCK_USED_IN_##__STATE] = "IN-"__stringify(__STATE)"-W",       \
445         [LOCK_ENABLED_##__STATE] = __stringify(__STATE)"-ON-W",         \
446         [LOCK_USED_IN_##__STATE##_READ] = "IN-"__stringify(__STATE)"-R",\
447         [LOCK_ENABLED_##__STATE##_READ] = __stringify(__STATE)"-ON-R",
448
449 static const char *usage_str[] =
450 {
451 #define LOCKDEP_STATE(__STATE) __USAGE(__STATE)
452 #include "lockdep_states.h"
453 #undef LOCKDEP_STATE
454         [LOCK_USED] = "INITIAL USE",
455 };
456
457 const char * __get_key_name(struct lockdep_subclass_key *key, char *str)
458 {
459         return kallsyms_lookup((unsigned long)key, NULL, NULL, NULL, str);
460 }
461
462 static inline unsigned long lock_flag(enum lock_usage_bit bit)
463 {
464         return 1UL << bit;
465 }
466
467 static char get_usage_char(struct lock_class *class, enum lock_usage_bit bit)
468 {
469         char c = '.';
470
471         if (class->usage_mask & lock_flag(bit + 2))
472                 c = '+';
473         if (class->usage_mask & lock_flag(bit)) {
474                 c = '-';
475                 if (class->usage_mask & lock_flag(bit + 2))
476                         c = '?';
477         }
478
479         return c;
480 }
481
482 void get_usage_chars(struct lock_class *class, char usage[LOCK_USAGE_CHARS])
483 {
484         int i = 0;
485
486 #define LOCKDEP_STATE(__STATE)                                          \
487         usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE);     \
488         usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE##_READ);
489 #include "lockdep_states.h"
490 #undef LOCKDEP_STATE
491
492         usage[i] = '\0';
493 }
494
495 static void print_lock_name(struct lock_class *class)
496 {
497         char str[KSYM_NAME_LEN], usage[LOCK_USAGE_CHARS];
498         const char *name;
499
500         get_usage_chars(class, usage);
501
502         name = class->name;
503         if (!name) {
504                 name = __get_key_name(class->key, str);
505                 printk(" (%s", name);
506         } else {
507                 printk(" (%s", name);
508                 if (class->name_version > 1)
509                         printk("#%d", class->name_version);
510                 if (class->subclass)
511                         printk("/%d", class->subclass);
512         }
513         printk("){%s}", usage);
514 }
515
516 static void print_lockdep_cache(struct lockdep_map *lock)
517 {
518         const char *name;
519         char str[KSYM_NAME_LEN];
520
521         name = lock->name;
522         if (!name)
523                 name = __get_key_name(lock->key->subkeys, str);
524
525         printk("%s", name);
526 }
527
528 static void print_lock(struct held_lock *hlock)
529 {
530         print_lock_name(hlock_class(hlock));
531         printk(", at: ");
532         print_ip_sym(hlock->acquire_ip);
533 }
534
535 static void lockdep_print_held_locks(struct task_struct *curr)
536 {
537         int i, depth = curr->lockdep_depth;
538
539         if (!depth) {
540                 printk("no locks held by %s/%d.\n", curr->comm, task_pid_nr(curr));
541                 return;
542         }
543         printk("%d lock%s held by %s/%d:\n",
544                 depth, depth > 1 ? "s" : "", curr->comm, task_pid_nr(curr));
545
546         for (i = 0; i < depth; i++) {
547                 printk(" #%d: ", i);
548                 print_lock(curr->held_locks + i);
549         }
550 }
551
552 static void print_lock_class_header(struct lock_class *class, int depth)
553 {
554         int bit;
555
556         printk("%*s->", depth, "");
557         print_lock_name(class);
558         printk(" ops: %lu", class->ops);
559         printk(" {\n");
560
561         for (bit = 0; bit < LOCK_USAGE_STATES; bit++) {
562                 if (class->usage_mask & (1 << bit)) {
563                         int len = depth;
564
565                         len += printk("%*s   %s", depth, "", usage_str[bit]);
566                         len += printk(" at:\n");
567                         print_stack_trace(class->usage_traces + bit, len);
568                 }
569         }
570         printk("%*s }\n", depth, "");
571
572         printk("%*s ... key      at: ",depth,"");
573         print_ip_sym((unsigned long)class->key);
574 }
575
576 /*
577  * printk all lock dependencies starting at <entry>:
578  */
579 static void __used
580 print_lock_dependencies(struct lock_class *class, int depth)
581 {
582         struct lock_list *entry;
583
584         if (lockdep_dependency_visit(class, depth))
585                 return;
586
587         if (DEBUG_LOCKS_WARN_ON(depth >= 20))
588                 return;
589
590         print_lock_class_header(class, depth);
591
592         list_for_each_entry(entry, &class->locks_after, entry) {
593                 if (DEBUG_LOCKS_WARN_ON(!entry->class))
594                         return;
595
596                 print_lock_dependencies(entry->class, depth + 1);
597
598                 printk("%*s ... acquired at:\n",depth,"");
599                 print_stack_trace(&entry->trace, 2);
600                 printk("\n");
601         }
602 }
603
604 static void print_kernel_version(void)
605 {
606         printk("%s %.*s\n", init_utsname()->release,
607                 (int)strcspn(init_utsname()->version, " "),
608                 init_utsname()->version);
609 }
610
611 static int very_verbose(struct lock_class *class)
612 {
613 #if VERY_VERBOSE
614         return class_filter(class);
615 #endif
616         return 0;
617 }
618
619 /*
620  * Is this the address of a static object:
621  */
622 static int static_obj(void *obj)
623 {
624         unsigned long start = (unsigned long) &_stext,
625                       end   = (unsigned long) &_end,
626                       addr  = (unsigned long) obj;
627 #ifdef CONFIG_SMP
628         int i;
629 #endif
630
631         /*
632          * static variable?
633          */
634         if ((addr >= start) && (addr < end))
635                 return 1;
636
637 #ifdef CONFIG_SMP
638         /*
639          * percpu var?
640          */
641         for_each_possible_cpu(i) {
642                 start = (unsigned long) &__per_cpu_start + per_cpu_offset(i);
643                 end   = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM
644                                         + per_cpu_offset(i);
645
646                 if ((addr >= start) && (addr < end))
647                         return 1;
648         }
649 #endif
650
651         /*
652          * module var?
653          */
654         return is_module_address(addr);
655 }
656
657 /*
658  * To make lock name printouts unique, we calculate a unique
659  * class->name_version generation counter:
660  */
661 static int count_matching_names(struct lock_class *new_class)
662 {
663         struct lock_class *class;
664         int count = 0;
665
666         if (!new_class->name)
667                 return 0;
668
669         list_for_each_entry(class, &all_lock_classes, lock_entry) {
670                 if (new_class->key - new_class->subclass == class->key)
671                         return class->name_version;
672                 if (class->name && !strcmp(class->name, new_class->name))
673                         count = max(count, class->name_version);
674         }
675
676         return count + 1;
677 }
678
679 /*
680  * Register a lock's class in the hash-table, if the class is not present
681  * yet. Otherwise we look it up. We cache the result in the lock object
682  * itself, so actual lookup of the hash should be once per lock object.
683  */
684 static inline struct lock_class *
685 look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
686 {
687         struct lockdep_subclass_key *key;
688         struct list_head *hash_head;
689         struct lock_class *class;
690
691 #ifdef CONFIG_DEBUG_LOCKDEP
692         /*
693          * If the architecture calls into lockdep before initializing
694          * the hashes then we'll warn about it later. (we cannot printk
695          * right now)
696          */
697         if (unlikely(!lockdep_initialized)) {
698                 lockdep_init();
699                 lockdep_init_error = 1;
700                 save_stack_trace(&lockdep_init_trace);
701         }
702 #endif
703
704         /*
705          * Static locks do not have their class-keys yet - for them the key
706          * is the lock object itself:
707          */
708         if (unlikely(!lock->key))
709                 lock->key = (void *)lock;
710
711         /*
712          * NOTE: the class-key must be unique. For dynamic locks, a static
713          * lock_class_key variable is passed in through the mutex_init()
714          * (or spin_lock_init()) call - which acts as the key. For static
715          * locks we use the lock object itself as the key.
716          */
717         BUILD_BUG_ON(sizeof(struct lock_class_key) >
718                         sizeof(struct lockdep_map));
719
720         key = lock->key->subkeys + subclass;
721
722         hash_head = classhashentry(key);
723
724         /*
725          * We can walk the hash lockfree, because the hash only
726          * grows, and we are careful when adding entries to the end:
727          */
728         list_for_each_entry(class, hash_head, hash_entry) {
729                 if (class->key == key) {
730                         WARN_ON_ONCE(class->name != lock->name);
731                         return class;
732                 }
733         }
734
735         return NULL;
736 }
737
738 /*
739  * Register a lock's class in the hash-table, if the class is not present
740  * yet. Otherwise we look it up. We cache the result in the lock object
741  * itself, so actual lookup of the hash should be once per lock object.
742  */
743 static inline struct lock_class *
744 register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
745 {
746         struct lockdep_subclass_key *key;
747         struct list_head *hash_head;
748         struct lock_class *class;
749         unsigned long flags;
750
751         class = look_up_lock_class(lock, subclass);
752         if (likely(class))
753                 return class;
754
755         /*
756          * Debug-check: all keys must be persistent!
757          */
758         if (!static_obj(lock->key)) {
759                 debug_locks_off();
760                 printk("INFO: trying to register non-static key.\n");
761                 printk("the code is fine but needs lockdep annotation.\n");
762                 printk("turning off the locking correctness validator.\n");
763                 dump_stack();
764
765                 return NULL;
766         }
767
768         key = lock->key->subkeys + subclass;
769         hash_head = classhashentry(key);
770
771         raw_local_irq_save(flags);
772         if (!graph_lock()) {
773                 raw_local_irq_restore(flags);
774                 return NULL;
775         }
776         /*
777          * We have to do the hash-walk again, to avoid races
778          * with another CPU:
779          */
780         list_for_each_entry(class, hash_head, hash_entry)
781                 if (class->key == key)
782                         goto out_unlock_set;
783         /*
784          * Allocate a new key from the static array, and add it to
785          * the hash:
786          */
787         if (nr_lock_classes >= MAX_LOCKDEP_KEYS) {
788                 if (!debug_locks_off_graph_unlock()) {
789                         raw_local_irq_restore(flags);
790                         return NULL;
791                 }
792                 raw_local_irq_restore(flags);
793
794                 printk("BUG: MAX_LOCKDEP_KEYS too low!\n");
795                 printk("turning off the locking correctness validator.\n");
796                 return NULL;
797         }
798         class = lock_classes + nr_lock_classes++;
799         debug_atomic_inc(&nr_unused_locks);
800         class->key = key;
801         class->name = lock->name;
802         class->subclass = subclass;
803         INIT_LIST_HEAD(&class->lock_entry);
804         INIT_LIST_HEAD(&class->locks_before);
805         INIT_LIST_HEAD(&class->locks_after);
806         class->name_version = count_matching_names(class);
807         /*
808          * We use RCU's safe list-add method to make
809          * parallel walking of the hash-list safe:
810          */
811         list_add_tail_rcu(&class->hash_entry, hash_head);
812         /*
813          * Add it to the global list of classes:
814          */
815         list_add_tail_rcu(&class->lock_entry, &all_lock_classes);
816
817         if (verbose(class)) {
818                 graph_unlock();
819                 raw_local_irq_restore(flags);
820
821                 printk("\nnew class %p: %s", class->key, class->name);
822                 if (class->name_version > 1)
823                         printk("#%d", class->name_version);
824                 printk("\n");
825                 dump_stack();
826
827                 raw_local_irq_save(flags);
828                 if (!graph_lock()) {
829                         raw_local_irq_restore(flags);
830                         return NULL;
831                 }
832         }
833 out_unlock_set:
834         graph_unlock();
835         raw_local_irq_restore(flags);
836
837         if (!subclass || force)
838                 lock->class_cache = class;
839
840         if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass))
841                 return NULL;
842
843         return class;
844 }
845
846 #ifdef CONFIG_PROVE_LOCKING
847 /*
848  * Allocate a lockdep entry. (assumes the graph_lock held, returns
849  * with NULL on failure)
850  */
851 static struct lock_list *alloc_list_entry(void)
852 {
853         if (nr_list_entries >= MAX_LOCKDEP_ENTRIES) {
854                 if (!debug_locks_off_graph_unlock())
855                         return NULL;
856
857                 printk("BUG: MAX_LOCKDEP_ENTRIES too low!\n");
858                 printk("turning off the locking correctness validator.\n");
859                 return NULL;
860         }
861         return list_entries + nr_list_entries++;
862 }
863
864 /*
865  * Add a new dependency to the head of the list:
866  */
867 static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
868                             struct list_head *head, unsigned long ip, int distance)
869 {
870         struct lock_list *entry;
871         /*
872          * Lock not present yet - get a new dependency struct and
873          * add it to the list:
874          */
875         entry = alloc_list_entry();
876         if (!entry)
877                 return 0;
878
879         if (!save_trace(&entry->trace))
880                 return 0;
881
882         entry->class = this;
883         entry->distance = distance;
884         /*
885          * Since we never remove from the dependency list, the list can
886          * be walked lockless by other CPUs, it's only allocation
887          * that must be protected by the spinlock. But this also means
888          * we must make new entries visible only once writes to the
889          * entry become visible - hence the RCU op:
890          */
891         list_add_tail_rcu(&entry->entry, head);
892
893         return 1;
894 }
895
896 /*
897  * Recursive, forwards-direction lock-dependency checking, used for
898  * both noncyclic checking and for hardirq-unsafe/softirq-unsafe
899  * checking.
900  *
901  * (to keep the stackframe of the recursive functions small we
902  *  use these global variables, and we also mark various helper
903  *  functions as noinline.)
904  */
905 static struct held_lock *check_source, *check_target;
906
907 /*
908  * Print a dependency chain entry (this is only done when a deadlock
909  * has been detected):
910  */
911 static noinline int
912 print_circular_bug_entry(struct lock_list *target, unsigned int depth)
913 {
914         if (debug_locks_silent)
915                 return 0;
916         printk("\n-> #%u", depth);
917         print_lock_name(target->class);
918         printk(":\n");
919         print_stack_trace(&target->trace, 6);
920
921         return 0;
922 }
923
924 /*
925  * When a circular dependency is detected, print the
926  * header first:
927  */
928 static noinline int
929 print_circular_bug_header(struct lock_list *entry, unsigned int depth)
930 {
931         struct task_struct *curr = current;
932
933         if (!debug_locks_off_graph_unlock() || debug_locks_silent)
934                 return 0;
935
936         printk("\n=======================================================\n");
937         printk(  "[ INFO: possible circular locking dependency detected ]\n");
938         print_kernel_version();
939         printk(  "-------------------------------------------------------\n");
940         printk("%s/%d is trying to acquire lock:\n",
941                 curr->comm, task_pid_nr(curr));
942         print_lock(check_source);
943         printk("\nbut task is already holding lock:\n");
944         print_lock(check_target);
945         printk("\nwhich lock already depends on the new lock.\n\n");
946         printk("\nthe existing dependency chain (in reverse order) is:\n");
947
948         print_circular_bug_entry(entry, depth);
949
950         return 0;
951 }
952
953 static noinline int print_circular_bug_tail(void)
954 {
955         struct task_struct *curr = current;
956         struct lock_list this;
957
958         if (debug_locks_silent)
959                 return 0;
960
961         this.class = hlock_class(check_source);
962         if (!save_trace(&this.trace))
963                 return 0;
964
965         print_circular_bug_entry(&this, 0);
966
967         printk("\nother info that might help us debug this:\n\n");
968         lockdep_print_held_locks(curr);
969
970         printk("\nstack backtrace:\n");
971         dump_stack();
972
973         return 0;
974 }
975
976 #define RECURSION_LIMIT 40
977
978 static int noinline print_infinite_recursion_bug(void)
979 {
980         if (!debug_locks_off_graph_unlock())
981                 return 0;
982
983         WARN_ON(1);
984
985         return 0;
986 }
987
988 unsigned long __lockdep_count_forward_deps(struct lock_class *class,
989                                            unsigned int depth)
990 {
991         struct lock_list *entry;
992         unsigned long ret = 1;
993
994         if (lockdep_dependency_visit(class, depth))
995                 return 0;
996
997         /*
998          * Recurse this class's dependency list:
999          */
1000         list_for_each_entry(entry, &class->locks_after, entry)
1001                 ret += __lockdep_count_forward_deps(entry->class, depth + 1);
1002
1003         return ret;
1004 }
1005
1006 unsigned long lockdep_count_forward_deps(struct lock_class *class)
1007 {
1008         unsigned long ret, flags;
1009
1010         local_irq_save(flags);
1011         __raw_spin_lock(&lockdep_lock);
1012         ret = __lockdep_count_forward_deps(class, 0);
1013         __raw_spin_unlock(&lockdep_lock);
1014         local_irq_restore(flags);
1015
1016         return ret;
1017 }
1018
1019 unsigned long __lockdep_count_backward_deps(struct lock_class *class,
1020                                             unsigned int depth)
1021 {
1022         struct lock_list *entry;
1023         unsigned long ret = 1;
1024
1025         if (lockdep_dependency_visit(class, depth))
1026                 return 0;
1027         /*
1028          * Recurse this class's dependency list:
1029          */
1030         list_for_each_entry(entry, &class->locks_before, entry)
1031                 ret += __lockdep_count_backward_deps(entry->class, depth + 1);
1032
1033         return ret;
1034 }
1035
1036 unsigned long lockdep_count_backward_deps(struct lock_class *class)
1037 {
1038         unsigned long ret, flags;
1039
1040         local_irq_save(flags);
1041         __raw_spin_lock(&lockdep_lock);
1042         ret = __lockdep_count_backward_deps(class, 0);
1043         __raw_spin_unlock(&lockdep_lock);
1044         local_irq_restore(flags);
1045
1046         return ret;
1047 }
1048
1049 /*
1050  * Prove that the dependency graph starting at <entry> can not
1051  * lead to <target>. Print an error and return 0 if it does.
1052  */
1053 static noinline int
1054 check_noncircular(struct lock_class *source, unsigned int depth)
1055 {
1056         struct lock_list *entry;
1057
1058         if (lockdep_dependency_visit(source, depth))
1059                 return 1;
1060
1061         debug_atomic_inc(&nr_cyclic_check_recursions);
1062         if (depth > max_recursion_depth)
1063                 max_recursion_depth = depth;
1064         if (depth >= RECURSION_LIMIT)
1065                 return print_infinite_recursion_bug();
1066         /*
1067          * Check this lock's dependency list:
1068          */
1069         list_for_each_entry(entry, &source->locks_after, entry) {
1070                 if (entry->class == hlock_class(check_target))
1071                         return print_circular_bug_header(entry, depth+1);
1072                 debug_atomic_inc(&nr_cyclic_checks);
1073                 if (!check_noncircular(entry->class, depth+1))
1074                         return print_circular_bug_entry(entry, depth+1);
1075         }
1076         return 1;
1077 }
1078
1079 #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
1080 /*
1081  * Forwards and backwards subgraph searching, for the purposes of
1082  * proving that two subgraphs can be connected by a new dependency
1083  * without creating any illegal irq-safe -> irq-unsafe lock dependency.
1084  */
1085 static enum lock_usage_bit find_usage_bit;
1086 static struct lock_class *forwards_match, *backwards_match;
1087
1088 /*
1089  * Find a node in the forwards-direction dependency sub-graph starting
1090  * at <source> that matches <find_usage_bit>.
1091  *
1092  * Return 2 if such a node exists in the subgraph, and put that node
1093  * into <forwards_match>.
1094  *
1095  * Return 1 otherwise and keep <forwards_match> unchanged.
1096  * Return 0 on error.
1097  */
1098 static noinline int
1099 find_usage_forwards(struct lock_class *source, unsigned int depth)
1100 {
1101         struct lock_list *entry;
1102         int ret;
1103
1104         if (lockdep_dependency_visit(source, depth))
1105                 return 1;
1106
1107         if (depth > max_recursion_depth)
1108                 max_recursion_depth = depth;
1109         if (depth >= RECURSION_LIMIT)
1110                 return print_infinite_recursion_bug();
1111
1112         debug_atomic_inc(&nr_find_usage_forwards_checks);
1113         if (source->usage_mask & (1 << find_usage_bit)) {
1114                 forwards_match = source;
1115                 return 2;
1116         }
1117
1118         /*
1119          * Check this lock's dependency list:
1120          */
1121         list_for_each_entry(entry, &source->locks_after, entry) {
1122                 debug_atomic_inc(&nr_find_usage_forwards_recursions);
1123                 ret = find_usage_forwards(entry->class, depth+1);
1124                 if (ret == 2 || ret == 0)
1125                         return ret;
1126         }
1127         return 1;
1128 }
1129
1130 /*
1131  * Find a node in the backwards-direction dependency sub-graph starting
1132  * at <source> that matches <find_usage_bit>.
1133  *
1134  * Return 2 if such a node exists in the subgraph, and put that node
1135  * into <backwards_match>.
1136  *
1137  * Return 1 otherwise and keep <backwards_match> unchanged.
1138  * Return 0 on error.
1139  */
1140 static noinline int
1141 find_usage_backwards(struct lock_class *source, unsigned int depth)
1142 {
1143         struct lock_list *entry;
1144         int ret;
1145
1146         if (lockdep_dependency_visit(source, depth))
1147                 return 1;
1148
1149         if (!__raw_spin_is_locked(&lockdep_lock))
1150                 return DEBUG_LOCKS_WARN_ON(1);
1151
1152         if (depth > max_recursion_depth)
1153                 max_recursion_depth = depth;
1154         if (depth >= RECURSION_LIMIT)
1155                 return print_infinite_recursion_bug();
1156
1157         debug_atomic_inc(&nr_find_usage_backwards_checks);
1158         if (source->usage_mask & (1 << find_usage_bit)) {
1159                 backwards_match = source;
1160                 return 2;
1161         }
1162
1163         if (!source && debug_locks_off_graph_unlock()) {
1164                 WARN_ON(1);
1165                 return 0;
1166         }
1167
1168         /*
1169          * Check this lock's dependency list:
1170          */
1171         list_for_each_entry(entry, &source->locks_before, entry) {
1172                 debug_atomic_inc(&nr_find_usage_backwards_recursions);
1173                 ret = find_usage_backwards(entry->class, depth+1);
1174                 if (ret == 2 || ret == 0)
1175                         return ret;
1176         }
1177         return 1;
1178 }
1179
1180 static int
1181 print_bad_irq_dependency(struct task_struct *curr,
1182                          struct held_lock *prev,
1183                          struct held_lock *next,
1184                          enum lock_usage_bit bit1,
1185                          enum lock_usage_bit bit2,
1186                          const char *irqclass)
1187 {
1188         if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1189                 return 0;
1190
1191         printk("\n======================================================\n");
1192         printk(  "[ INFO: %s-safe -> %s-unsafe lock order detected ]\n",
1193                 irqclass, irqclass);
1194         print_kernel_version();
1195         printk(  "------------------------------------------------------\n");
1196         printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n",
1197                 curr->comm, task_pid_nr(curr),
1198                 curr->hardirq_context, hardirq_count() >> HARDIRQ_SHIFT,
1199                 curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT,
1200                 curr->hardirqs_enabled,
1201                 curr->softirqs_enabled);
1202         print_lock(next);
1203
1204         printk("\nand this task is already holding:\n");
1205         print_lock(prev);
1206         printk("which would create a new lock dependency:\n");
1207         print_lock_name(hlock_class(prev));
1208         printk(" ->");
1209         print_lock_name(hlock_class(next));
1210         printk("\n");
1211
1212         printk("\nbut this new dependency connects a %s-irq-safe lock:\n",
1213                 irqclass);
1214         print_lock_name(backwards_match);
1215         printk("\n... which became %s-irq-safe at:\n", irqclass);
1216
1217         print_stack_trace(backwards_match->usage_traces + bit1, 1);
1218
1219         printk("\nto a %s-irq-unsafe lock:\n", irqclass);
1220         print_lock_name(forwards_match);
1221         printk("\n... which became %s-irq-unsafe at:\n", irqclass);
1222         printk("...");
1223
1224         print_stack_trace(forwards_match->usage_traces + bit2, 1);
1225
1226         printk("\nother info that might help us debug this:\n\n");
1227         lockdep_print_held_locks(curr);
1228
1229         printk("\nthe %s-irq-safe lock's dependencies:\n", irqclass);
1230         print_lock_dependencies(backwards_match, 0);
1231
1232         printk("\nthe %s-irq-unsafe lock's dependencies:\n", irqclass);
1233         print_lock_dependencies(forwards_match, 0);
1234
1235         printk("\nstack backtrace:\n");
1236         dump_stack();
1237
1238         return 0;
1239 }
1240
1241 static int
1242 check_usage(struct task_struct *curr, struct held_lock *prev,
1243             struct held_lock *next, enum lock_usage_bit bit_backwards,
1244             enum lock_usage_bit bit_forwards, const char *irqclass)
1245 {
1246         int ret;
1247
1248         find_usage_bit = bit_backwards;
1249         /* fills in <backwards_match> */
1250         ret = find_usage_backwards(hlock_class(prev), 0);
1251         if (!ret || ret == 1)
1252                 return ret;
1253
1254         find_usage_bit = bit_forwards;
1255         ret = find_usage_forwards(hlock_class(next), 0);
1256         if (!ret || ret == 1)
1257                 return ret;
1258         /* ret == 2 */
1259         return print_bad_irq_dependency(curr, prev, next,
1260                         bit_backwards, bit_forwards, irqclass);
1261 }
1262
1263 static const char *state_names[] = {
1264 #define LOCKDEP_STATE(__STATE) \
1265         __stringify(__STATE),
1266 #include "lockdep_states.h"
1267 #undef LOCKDEP_STATE
1268 };
1269
1270 static const char *state_rnames[] = {
1271 #define LOCKDEP_STATE(__STATE) \
1272         __stringify(__STATE)"-READ",
1273 #include "lockdep_states.h"
1274 #undef LOCKDEP_STATE
1275 };
1276
1277 static inline const char *state_name(enum lock_usage_bit bit)
1278 {
1279         return (bit & 1) ? state_rnames[bit >> 2] : state_names[bit >> 2];
1280 }
1281
1282 static int exclusive_bit(int new_bit)
1283 {
1284         /*
1285          * USED_IN
1286          * USED_IN_READ
1287          * ENABLED
1288          * ENABLED_READ
1289          *
1290          * bit 0 - write/read
1291          * bit 1 - used_in/enabled
1292          * bit 2+  state
1293          */
1294
1295         int state = new_bit & ~3;
1296         int dir = new_bit & 2;
1297
1298         /*
1299          * keep state, bit flip the direction and strip read.
1300          */
1301         return state | (dir ^ 2);
1302 }
1303
1304 static int check_irq_usage(struct task_struct *curr, struct held_lock *prev,
1305                            struct held_lock *next, enum lock_usage_bit bit)
1306 {
1307         /*
1308          * Prove that the new dependency does not connect a hardirq-safe
1309          * lock with a hardirq-unsafe lock - to achieve this we search
1310          * the backwards-subgraph starting at <prev>, and the
1311          * forwards-subgraph starting at <next>:
1312          */
1313         if (!check_usage(curr, prev, next, bit,
1314                            exclusive_bit(bit), state_name(bit)))
1315                 return 0;
1316
1317         bit++; /* _READ */
1318
1319         /*
1320          * Prove that the new dependency does not connect a hardirq-safe-read
1321          * lock with a hardirq-unsafe lock - to achieve this we search
1322          * the backwards-subgraph starting at <prev>, and the
1323          * forwards-subgraph starting at <next>:
1324          */
1325         if (!check_usage(curr, prev, next, bit,
1326                            exclusive_bit(bit), state_name(bit)))
1327                 return 0;
1328
1329         return 1;
1330 }
1331
1332 static int
1333 check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
1334                 struct held_lock *next)
1335 {
1336 #define LOCKDEP_STATE(__STATE)                                          \
1337         if (!check_irq_usage(curr, prev, next, LOCK_USED_IN_##__STATE)) \
1338                 return 0;
1339 #include "lockdep_states.h"
1340 #undef LOCKDEP_STATE
1341
1342         return 1;
1343 }
1344
1345 static void inc_chains(void)
1346 {
1347         if (current->hardirq_context)
1348                 nr_hardirq_chains++;
1349         else {
1350                 if (current->softirq_context)
1351                         nr_softirq_chains++;
1352                 else
1353                         nr_process_chains++;
1354         }
1355 }
1356
1357 #else
1358
1359 static inline int
1360 check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
1361                 struct held_lock *next)
1362 {
1363         return 1;
1364 }
1365
1366 static inline void inc_chains(void)
1367 {
1368         nr_process_chains++;
1369 }
1370
1371 #endif
1372
1373 static int
1374 print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
1375                    struct held_lock *next)
1376 {
1377         if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1378                 return 0;
1379
1380         printk("\n=============================================\n");
1381         printk(  "[ INFO: possible recursive locking detected ]\n");
1382         print_kernel_version();
1383         printk(  "---------------------------------------------\n");
1384         printk("%s/%d is trying to acquire lock:\n",
1385                 curr->comm, task_pid_nr(curr));
1386         print_lock(next);
1387         printk("\nbut task is already holding lock:\n");
1388         print_lock(prev);
1389
1390         printk("\nother info that might help us debug this:\n");
1391         lockdep_print_held_locks(curr);
1392
1393         printk("\nstack backtrace:\n");
1394         dump_stack();
1395
1396         return 0;
1397 }
1398
1399 /*
1400  * Check whether we are holding such a class already.
1401  *
1402  * (Note that this has to be done separately, because the graph cannot
1403  * detect such classes of deadlocks.)
1404  *
1405  * Returns: 0 on deadlock detected, 1 on OK, 2 on recursive read
1406  */
1407 static int
1408 check_deadlock(struct task_struct *curr, struct held_lock *next,
1409                struct lockdep_map *next_instance, int read)
1410 {
1411         struct held_lock *prev;
1412         struct held_lock *nest = NULL;
1413         int i;
1414
1415         for (i = 0; i < curr->lockdep_depth; i++) {
1416                 prev = curr->held_locks + i;
1417
1418                 if (prev->instance == next->nest_lock)
1419                         nest = prev;
1420
1421                 if (hlock_class(prev) != hlock_class(next))
1422                         continue;
1423
1424                 /*
1425                  * Allow read-after-read recursion of the same
1426                  * lock class (i.e. read_lock(lock)+read_lock(lock)):
1427                  */
1428                 if ((read == 2) && prev->read)
1429                         return 2;
1430
1431                 /*
1432                  * We're holding the nest_lock, which serializes this lock's
1433                  * nesting behaviour.
1434                  */
1435                 if (nest)
1436                         return 2;
1437
1438                 return print_deadlock_bug(curr, prev, next);
1439         }
1440         return 1;
1441 }
1442
1443 /*
1444  * There was a chain-cache miss, and we are about to add a new dependency
1445  * to a previous lock. We recursively validate the following rules:
1446  *
1447  *  - would the adding of the <prev> -> <next> dependency create a
1448  *    circular dependency in the graph? [== circular deadlock]
1449  *
1450  *  - does the new prev->next dependency connect any hardirq-safe lock
1451  *    (in the full backwards-subgraph starting at <prev>) with any
1452  *    hardirq-unsafe lock (in the full forwards-subgraph starting at
1453  *    <next>)? [== illegal lock inversion with hardirq contexts]
1454  *
1455  *  - does the new prev->next dependency connect any softirq-safe lock
1456  *    (in the full backwards-subgraph starting at <prev>) with any
1457  *    softirq-unsafe lock (in the full forwards-subgraph starting at
1458  *    <next>)? [== illegal lock inversion with softirq contexts]
1459  *
1460  * any of these scenarios could lead to a deadlock.
1461  *
1462  * Then if all the validations pass, we add the forwards and backwards
1463  * dependency.
1464  */
1465 static int
1466 check_prev_add(struct task_struct *curr, struct held_lock *prev,
1467                struct held_lock *next, int distance)
1468 {
1469         struct lock_list *entry;
1470         int ret;
1471
1472         /*
1473          * Prove that the new <prev> -> <next> dependency would not
1474          * create a circular dependency in the graph. (We do this by
1475          * forward-recursing into the graph starting at <next>, and
1476          * checking whether we can reach <prev>.)
1477          *
1478          * We are using global variables to control the recursion, to
1479          * keep the stackframe size of the recursive functions low:
1480          */
1481         check_source = next;
1482         check_target = prev;
1483         if (!(check_noncircular(hlock_class(next), 0)))
1484                 return print_circular_bug_tail();
1485
1486         if (!check_prev_add_irq(curr, prev, next))
1487                 return 0;
1488
1489         /*
1490          * For recursive read-locks we do all the dependency checks,
1491          * but we dont store read-triggered dependencies (only
1492          * write-triggered dependencies). This ensures that only the
1493          * write-side dependencies matter, and that if for example a
1494          * write-lock never takes any other locks, then the reads are
1495          * equivalent to a NOP.
1496          */
1497         if (next->read == 2 || prev->read == 2)
1498                 return 1;
1499         /*
1500          * Is the <prev> -> <next> dependency already present?
1501          *
1502          * (this may occur even though this is a new chain: consider
1503          *  e.g. the L1 -> L2 -> L3 -> L4 and the L5 -> L1 -> L2 -> L3
1504          *  chains - the second one will be new, but L1 already has
1505          *  L2 added to its dependency list, due to the first chain.)
1506          */
1507         list_for_each_entry(entry, &hlock_class(prev)->locks_after, entry) {
1508                 if (entry->class == hlock_class(next)) {
1509                         if (distance == 1)
1510                                 entry->distance = 1;
1511                         return 2;
1512                 }
1513         }
1514
1515         /*
1516          * Ok, all validations passed, add the new lock
1517          * to the previous lock's dependency list:
1518          */
1519         ret = add_lock_to_list(hlock_class(prev), hlock_class(next),
1520                                &hlock_class(prev)->locks_after,
1521                                next->acquire_ip, distance);
1522
1523         if (!ret)
1524                 return 0;
1525
1526         ret = add_lock_to_list(hlock_class(next), hlock_class(prev),
1527                                &hlock_class(next)->locks_before,
1528                                next->acquire_ip, distance);
1529         if (!ret)
1530                 return 0;
1531
1532         /*
1533          * Debugging printouts:
1534          */
1535         if (verbose(hlock_class(prev)) || verbose(hlock_class(next))) {
1536                 graph_unlock();
1537                 printk("\n new dependency: ");
1538                 print_lock_name(hlock_class(prev));
1539                 printk(" => ");
1540                 print_lock_name(hlock_class(next));
1541                 printk("\n");
1542                 dump_stack();
1543                 return graph_lock();
1544         }
1545         return 1;
1546 }
1547
1548 /*
1549  * Add the dependency to all directly-previous locks that are 'relevant'.
1550  * The ones that are relevant are (in increasing distance from curr):
1551  * all consecutive trylock entries and the final non-trylock entry - or
1552  * the end of this context's lock-chain - whichever comes first.
1553  */
1554 static int
1555 check_prevs_add(struct task_struct *curr, struct held_lock *next)
1556 {
1557         int depth = curr->lockdep_depth;
1558         struct held_lock *hlock;
1559
1560         /*
1561          * Debugging checks.
1562          *
1563          * Depth must not be zero for a non-head lock:
1564          */
1565         if (!depth)
1566                 goto out_bug;
1567         /*
1568          * At least two relevant locks must exist for this
1569          * to be a head:
1570          */
1571         if (curr->held_locks[depth].irq_context !=
1572                         curr->held_locks[depth-1].irq_context)
1573                 goto out_bug;
1574
1575         for (;;) {
1576                 int distance = curr->lockdep_depth - depth + 1;
1577                 hlock = curr->held_locks + depth-1;
1578                 /*
1579                  * Only non-recursive-read entries get new dependencies
1580                  * added:
1581                  */
1582                 if (hlock->read != 2) {
1583                         if (!check_prev_add(curr, hlock, next, distance))
1584                                 return 0;
1585                         /*
1586                          * Stop after the first non-trylock entry,
1587                          * as non-trylock entries have added their
1588                          * own direct dependencies already, so this
1589                          * lock is connected to them indirectly:
1590                          */
1591                         if (!hlock->trylock)
1592                                 break;
1593                 }
1594                 depth--;
1595                 /*
1596                  * End of lock-stack?
1597                  */
1598                 if (!depth)
1599                         break;
1600                 /*
1601                  * Stop the search if we cross into another context:
1602                  */
1603                 if (curr->held_locks[depth].irq_context !=
1604                                 curr->held_locks[depth-1].irq_context)
1605                         break;
1606         }
1607         return 1;
1608 out_bug:
1609         if (!debug_locks_off_graph_unlock())
1610                 return 0;
1611
1612         WARN_ON(1);
1613
1614         return 0;
1615 }
1616
1617 unsigned long nr_lock_chains;
1618 struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS];
1619 int nr_chain_hlocks;
1620 static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS];
1621
1622 struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i)
1623 {
1624         return lock_classes + chain_hlocks[chain->base + i];
1625 }
1626
1627 /*
1628  * Look up a dependency chain. If the key is not present yet then
1629  * add it and return 1 - in this case the new dependency chain is
1630  * validated. If the key is already hashed, return 0.
1631  * (On return with 1 graph_lock is held.)
1632  */
1633 static inline int lookup_chain_cache(struct task_struct *curr,
1634                                      struct held_lock *hlock,
1635                                      u64 chain_key)
1636 {
1637         struct lock_class *class = hlock_class(hlock);
1638         struct list_head *hash_head = chainhashentry(chain_key);
1639         struct lock_chain *chain;
1640         struct held_lock *hlock_curr, *hlock_next;
1641         int i, j, n, cn;
1642
1643         if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
1644                 return 0;
1645         /*
1646          * We can walk it lock-free, because entries only get added
1647          * to the hash:
1648          */
1649         list_for_each_entry(chain, hash_head, entry) {
1650                 if (chain->chain_key == chain_key) {
1651 cache_hit:
1652                         debug_atomic_inc(&chain_lookup_hits);
1653                         if (very_verbose(class))
1654                                 printk("\nhash chain already cached, key: "
1655                                         "%016Lx tail class: [%p] %s\n",
1656                                         (unsigned long long)chain_key,
1657                                         class->key, class->name);
1658                         return 0;
1659                 }
1660         }
1661         if (very_verbose(class))
1662                 printk("\nnew hash chain, key: %016Lx tail class: [%p] %s\n",
1663                         (unsigned long long)chain_key, class->key, class->name);
1664         /*
1665          * Allocate a new chain entry from the static array, and add
1666          * it to the hash:
1667          */
1668         if (!graph_lock())
1669                 return 0;
1670         /*
1671          * We have to walk the chain again locked - to avoid duplicates:
1672          */
1673         list_for_each_entry(chain, hash_head, entry) {
1674                 if (chain->chain_key == chain_key) {
1675                         graph_unlock();
1676                         goto cache_hit;
1677                 }
1678         }
1679         if (unlikely(nr_lock_chains >= MAX_LOCKDEP_CHAINS)) {
1680                 if (!debug_locks_off_graph_unlock())
1681                         return 0;
1682
1683                 printk("BUG: MAX_LOCKDEP_CHAINS too low!\n");
1684                 printk("turning off the locking correctness validator.\n");
1685                 return 0;
1686         }
1687         chain = lock_chains + nr_lock_chains++;
1688         chain->chain_key = chain_key;
1689         chain->irq_context = hlock->irq_context;
1690         /* Find the first held_lock of current chain */
1691         hlock_next = hlock;
1692         for (i = curr->lockdep_depth - 1; i >= 0; i--) {
1693                 hlock_curr = curr->held_locks + i;
1694                 if (hlock_curr->irq_context != hlock_next->irq_context)
1695                         break;
1696                 hlock_next = hlock;
1697         }
1698         i++;
1699         chain->depth = curr->lockdep_depth + 1 - i;
1700         cn = nr_chain_hlocks;
1701         while (cn + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS) {
1702                 n = cmpxchg(&nr_chain_hlocks, cn, cn + chain->depth);
1703                 if (n == cn)
1704                         break;
1705                 cn = n;
1706         }
1707         if (likely(cn + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) {
1708                 chain->base = cn;
1709                 for (j = 0; j < chain->depth - 1; j++, i++) {
1710                         int lock_id = curr->held_locks[i].class_idx - 1;
1711                         chain_hlocks[chain->base + j] = lock_id;
1712                 }
1713                 chain_hlocks[chain->base + j] = class - lock_classes;
1714         }
1715         list_add_tail_rcu(&chain->entry, hash_head);
1716         debug_atomic_inc(&chain_lookup_misses);
1717         inc_chains();
1718
1719         return 1;
1720 }
1721
1722 static int validate_chain(struct task_struct *curr, struct lockdep_map *lock,
1723                 struct held_lock *hlock, int chain_head, u64 chain_key)
1724 {
1725         /*
1726          * Trylock needs to maintain the stack of held locks, but it
1727          * does not add new dependencies, because trylock can be done
1728          * in any order.
1729          *
1730          * We look up the chain_key and do the O(N^2) check and update of
1731          * the dependencies only if this is a new dependency chain.
1732          * (If lookup_chain_cache() returns with 1 it acquires
1733          * graph_lock for us)
1734          */
1735         if (!hlock->trylock && (hlock->check == 2) &&
1736             lookup_chain_cache(curr, hlock, chain_key)) {
1737                 /*
1738                  * Check whether last held lock:
1739                  *
1740                  * - is irq-safe, if this lock is irq-unsafe
1741                  * - is softirq-safe, if this lock is hardirq-unsafe
1742                  *
1743                  * And check whether the new lock's dependency graph
1744                  * could lead back to the previous lock.
1745                  *
1746                  * any of these scenarios could lead to a deadlock. If
1747                  * All validations
1748                  */
1749                 int ret = check_deadlock(curr, hlock, lock, hlock->read);
1750
1751                 if (!ret)
1752                         return 0;
1753                 /*
1754                  * Mark recursive read, as we jump over it when
1755                  * building dependencies (just like we jump over
1756                  * trylock entries):
1757                  */
1758                 if (ret == 2)
1759                         hlock->read = 2;
1760                 /*
1761                  * Add dependency only if this lock is not the head
1762                  * of the chain, and if it's not a secondary read-lock:
1763                  */
1764                 if (!chain_head && ret != 2)
1765                         if (!check_prevs_add(curr, hlock))
1766                                 return 0;
1767                 graph_unlock();
1768         } else
1769                 /* after lookup_chain_cache(): */
1770                 if (unlikely(!debug_locks))
1771                         return 0;
1772
1773         return 1;
1774 }
1775 #else
1776 static inline int validate_chain(struct task_struct *curr,
1777                 struct lockdep_map *lock, struct held_lock *hlock,
1778                 int chain_head, u64 chain_key)
1779 {
1780         return 1;
1781 }
1782 #endif
1783
1784 /*
1785  * We are building curr_chain_key incrementally, so double-check
1786  * it from scratch, to make sure that it's done correctly:
1787  */
1788 static void check_chain_key(struct task_struct *curr)
1789 {
1790 #ifdef CONFIG_DEBUG_LOCKDEP
1791         struct held_lock *hlock, *prev_hlock = NULL;
1792         unsigned int i, id;
1793         u64 chain_key = 0;
1794
1795         for (i = 0; i < curr->lockdep_depth; i++) {
1796                 hlock = curr->held_locks + i;
1797                 if (chain_key != hlock->prev_chain_key) {
1798                         debug_locks_off();
1799                         WARN(1, "hm#1, depth: %u [%u], %016Lx != %016Lx\n",
1800                                 curr->lockdep_depth, i,
1801                                 (unsigned long long)chain_key,
1802                                 (unsigned long long)hlock->prev_chain_key);
1803                         return;
1804                 }
1805                 id = hlock->class_idx - 1;
1806                 if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS))
1807                         return;
1808
1809                 if (prev_hlock && (prev_hlock->irq_context !=
1810                                                         hlock->irq_context))
1811                         chain_key = 0;
1812                 chain_key = iterate_chain_key(chain_key, id);
1813                 prev_hlock = hlock;
1814         }
1815         if (chain_key != curr->curr_chain_key) {
1816                 debug_locks_off();
1817                 WARN(1, "hm#2, depth: %u [%u], %016Lx != %016Lx\n",
1818                         curr->lockdep_depth, i,
1819                         (unsigned long long)chain_key,
1820                         (unsigned long long)curr->curr_chain_key);
1821         }
1822 #endif
1823 }
1824
1825 static int
1826 print_usage_bug(struct task_struct *curr, struct held_lock *this,
1827                 enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit)
1828 {
1829         if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1830                 return 0;
1831
1832         printk("\n=================================\n");
1833         printk(  "[ INFO: inconsistent lock state ]\n");
1834         print_kernel_version();
1835         printk(  "---------------------------------\n");
1836
1837         printk("inconsistent {%s} -> {%s} usage.\n",
1838                 usage_str[prev_bit], usage_str[new_bit]);
1839
1840         printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n",
1841                 curr->comm, task_pid_nr(curr),
1842                 trace_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT,
1843                 trace_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT,
1844                 trace_hardirqs_enabled(curr),
1845                 trace_softirqs_enabled(curr));
1846         print_lock(this);
1847
1848         printk("{%s} state was registered at:\n", usage_str[prev_bit]);
1849         print_stack_trace(hlock_class(this)->usage_traces + prev_bit, 1);
1850
1851         print_irqtrace_events(curr);
1852         printk("\nother info that might help us debug this:\n");
1853         lockdep_print_held_locks(curr);
1854
1855         printk("\nstack backtrace:\n");
1856         dump_stack();
1857
1858         return 0;
1859 }
1860
1861 /*
1862  * Print out an error if an invalid bit is set:
1863  */
1864 static inline int
1865 valid_state(struct task_struct *curr, struct held_lock *this,
1866             enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit)
1867 {
1868         if (unlikely(hlock_class(this)->usage_mask & (1 << bad_bit)))
1869                 return print_usage_bug(curr, this, bad_bit, new_bit);
1870         return 1;
1871 }
1872
1873 static int mark_lock(struct task_struct *curr, struct held_lock *this,
1874                      enum lock_usage_bit new_bit);
1875
1876 #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
1877
1878 /*
1879  * print irq inversion bug:
1880  */
1881 static int
1882 print_irq_inversion_bug(struct task_struct *curr, struct lock_class *other,
1883                         struct held_lock *this, int forwards,
1884                         const char *irqclass)
1885 {
1886         if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1887                 return 0;
1888
1889         printk("\n=========================================================\n");
1890         printk(  "[ INFO: possible irq lock inversion dependency detected ]\n");
1891         print_kernel_version();
1892         printk(  "---------------------------------------------------------\n");
1893         printk("%s/%d just changed the state of lock:\n",
1894                 curr->comm, task_pid_nr(curr));
1895         print_lock(this);
1896         if (forwards)
1897                 printk("but this lock took another, %s-unsafe lock in the past:\n", irqclass);
1898         else
1899                 printk("but this lock was taken by another, %s-safe lock in the past:\n", irqclass);
1900         print_lock_name(other);
1901         printk("\n\nand interrupts could create inverse lock ordering between them.\n\n");
1902
1903         printk("\nother info that might help us debug this:\n");
1904         lockdep_print_held_locks(curr);
1905
1906         printk("\nthe first lock's dependencies:\n");
1907         print_lock_dependencies(hlock_class(this), 0);
1908
1909         printk("\nthe second lock's dependencies:\n");
1910         print_lock_dependencies(other, 0);
1911
1912         printk("\nstack backtrace:\n");
1913         dump_stack();
1914
1915         return 0;
1916 }
1917
1918 /*
1919  * Prove that in the forwards-direction subgraph starting at <this>
1920  * there is no lock matching <mask>:
1921  */
1922 static int
1923 check_usage_forwards(struct task_struct *curr, struct held_lock *this,
1924                      enum lock_usage_bit bit, const char *irqclass)
1925 {
1926         int ret;
1927
1928         find_usage_bit = bit;
1929         /* fills in <forwards_match> */
1930         ret = find_usage_forwards(hlock_class(this), 0);
1931         if (!ret || ret == 1)
1932                 return ret;
1933
1934         return print_irq_inversion_bug(curr, forwards_match, this, 1, irqclass);
1935 }
1936
1937 /*
1938  * Prove that in the backwards-direction subgraph starting at <this>
1939  * there is no lock matching <mask>:
1940  */
1941 static int
1942 check_usage_backwards(struct task_struct *curr, struct held_lock *this,
1943                       enum lock_usage_bit bit, const char *irqclass)
1944 {
1945         int ret;
1946
1947         find_usage_bit = bit;
1948         /* fills in <backwards_match> */
1949         ret = find_usage_backwards(hlock_class(this), 0);
1950         if (!ret || ret == 1)
1951                 return ret;
1952
1953         return print_irq_inversion_bug(curr, backwards_match, this, 0, irqclass);
1954 }
1955
1956 void print_irqtrace_events(struct task_struct *curr)
1957 {
1958         printk("irq event stamp: %u\n", curr->irq_events);
1959         printk("hardirqs last  enabled at (%u): ", curr->hardirq_enable_event);
1960         print_ip_sym(curr->hardirq_enable_ip);
1961         printk("hardirqs last disabled at (%u): ", curr->hardirq_disable_event);
1962         print_ip_sym(curr->hardirq_disable_ip);
1963         printk("softirqs last  enabled at (%u): ", curr->softirq_enable_event);
1964         print_ip_sym(curr->softirq_enable_ip);
1965         printk("softirqs last disabled at (%u): ", curr->softirq_disable_event);
1966         print_ip_sym(curr->softirq_disable_ip);
1967 }
1968
1969 static int HARDIRQ_verbose(struct lock_class *class)
1970 {
1971 #if HARDIRQ_VERBOSE
1972         return class_filter(class);
1973 #endif
1974         return 0;
1975 }
1976
1977 static int SOFTIRQ_verbose(struct lock_class *class)
1978 {
1979 #if SOFTIRQ_VERBOSE
1980         return class_filter(class);
1981 #endif
1982         return 0;
1983 }
1984
1985 static int RECLAIM_FS_verbose(struct lock_class *class)
1986 {
1987 #if RECLAIM_VERBOSE
1988         return class_filter(class);
1989 #endif
1990         return 0;
1991 }
1992
1993 #define STRICT_READ_CHECKS      1
1994
1995 static int (*state_verbose_f[])(struct lock_class *class) = {
1996 #define LOCKDEP_STATE(__STATE) \
1997         __STATE##_verbose,
1998 #include "lockdep_states.h"
1999 #undef LOCKDEP_STATE
2000 };
2001
2002 static inline int state_verbose(enum lock_usage_bit bit,
2003                                 struct lock_class *class)
2004 {
2005         return state_verbose_f[bit >> 2](class);
2006 }
2007
2008 typedef int (*check_usage_f)(struct task_struct *, struct held_lock *,
2009                              enum lock_usage_bit bit, const char *name);
2010
2011 static int
2012 mark_lock_irq(struct task_struct *curr, struct held_lock *this,
2013                 enum lock_usage_bit new_bit)
2014 {
2015         int excl_bit = exclusive_bit(new_bit);
2016         int read = new_bit & 1;
2017         int dir = new_bit & 2;
2018
2019         /*
2020          * mark USED_IN has to look forwards -- to ensure no dependency
2021          * has ENABLED state, which would allow recursion deadlocks.
2022          *
2023          * mark ENABLED has to look backwards -- to ensure no dependee
2024          * has USED_IN state, which, again, would allow  recursion deadlocks.
2025          */
2026         check_usage_f usage = dir ?
2027                 check_usage_backwards : check_usage_forwards;
2028
2029         /*
2030          * Validate that this particular lock does not have conflicting
2031          * usage states.
2032          */
2033         if (!valid_state(curr, this, new_bit, excl_bit))
2034                 return 0;
2035
2036         /*
2037          * Validate that the lock dependencies don't have conflicting usage
2038          * states.
2039          */
2040         if ((!read || !dir || STRICT_READ_CHECKS) &&
2041                         !usage(curr, this, excl_bit, state_name(new_bit & ~1)))
2042                 return 0;
2043
2044         /*
2045          * Check for read in write conflicts
2046          */
2047         if (!read) {
2048                 if (!valid_state(curr, this, new_bit, excl_bit + 1))
2049                         return 0;
2050
2051                 if (STRICT_READ_CHECKS &&
2052                         !usage(curr, this, excl_bit + 1,
2053                                 state_name(new_bit + 1)))
2054                         return 0;
2055         }
2056
2057         if (state_verbose(new_bit, hlock_class(this)))
2058                 return 2;
2059
2060         return 1;
2061 }
2062
2063 enum mark_type {
2064 #define LOCKDEP_STATE(__STATE)  __STATE,
2065 #include "lockdep_states.h"
2066 #undef LOCKDEP_STATE
2067 };
2068
2069 /*
2070  * Mark all held locks with a usage bit:
2071  */
2072 static int
2073 mark_held_locks(struct task_struct *curr, enum mark_type mark)
2074 {
2075         enum lock_usage_bit usage_bit;
2076         struct held_lock *hlock;
2077         int i;
2078
2079         for (i = 0; i < curr->lockdep_depth; i++) {
2080                 hlock = curr->held_locks + i;
2081
2082                 usage_bit = 2 + (mark << 2); /* ENABLED */
2083                 if (hlock->read)
2084                         usage_bit += 1; /* READ */
2085
2086                 BUG_ON(usage_bit >= LOCK_USAGE_STATES);
2087
2088                 if (!mark_lock(curr, hlock, usage_bit))
2089                         return 0;
2090         }
2091
2092         return 1;
2093 }
2094
2095 /*
2096  * Debugging helper: via this flag we know that we are in
2097  * 'early bootup code', and will warn about any invalid irqs-on event:
2098  */
2099 static int early_boot_irqs_enabled;
2100
2101 void early_boot_irqs_off(void)
2102 {
2103         early_boot_irqs_enabled = 0;
2104 }
2105
2106 void early_boot_irqs_on(void)
2107 {
2108         early_boot_irqs_enabled = 1;
2109 }
2110
2111 /*
2112  * Hardirqs will be enabled:
2113  */
2114 void trace_hardirqs_on_caller(unsigned long ip)
2115 {
2116         struct task_struct *curr = current;
2117
2118         time_hardirqs_on(CALLER_ADDR0, ip);
2119
2120         if (unlikely(!debug_locks || current->lockdep_recursion))
2121                 return;
2122
2123         if (DEBUG_LOCKS_WARN_ON(unlikely(!early_boot_irqs_enabled)))
2124                 return;
2125
2126         if (unlikely(curr->hardirqs_enabled)) {
2127                 debug_atomic_inc(&redundant_hardirqs_on);
2128                 return;
2129         }
2130         /* we'll do an OFF -> ON transition: */
2131         curr->hardirqs_enabled = 1;
2132
2133         if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2134                 return;
2135         if (DEBUG_LOCKS_WARN_ON(current->hardirq_context))
2136                 return;
2137         /*
2138          * We are going to turn hardirqs on, so set the
2139          * usage bit for all held locks:
2140          */
2141         if (!mark_held_locks(curr, HARDIRQ))
2142                 return;
2143         /*
2144          * If we have softirqs enabled, then set the usage
2145          * bit for all held locks. (disabled hardirqs prevented
2146          * this bit from being set before)
2147          */
2148         if (curr->softirqs_enabled)
2149                 if (!mark_held_locks(curr, SOFTIRQ))
2150                         return;
2151
2152         curr->hardirq_enable_ip = ip;
2153         curr->hardirq_enable_event = ++curr->irq_events;
2154         debug_atomic_inc(&hardirqs_on_events);
2155 }
2156 EXPORT_SYMBOL(trace_hardirqs_on_caller);
2157
2158 void trace_hardirqs_on(void)
2159 {
2160         trace_hardirqs_on_caller(CALLER_ADDR0);
2161 }
2162 EXPORT_SYMBOL(trace_hardirqs_on);
2163
2164 /*
2165  * Hardirqs were disabled:
2166  */
2167 void trace_hardirqs_off_caller(unsigned long ip)
2168 {
2169         struct task_struct *curr = current;
2170
2171         time_hardirqs_off(CALLER_ADDR0, ip);
2172
2173         if (unlikely(!debug_locks || current->lockdep_recursion))
2174                 return;
2175
2176         if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2177                 return;
2178
2179         if (curr->hardirqs_enabled) {
2180                 /*
2181                  * We have done an ON -> OFF transition:
2182                  */
2183                 curr->hardirqs_enabled = 0;
2184                 curr->hardirq_disable_ip = ip;
2185                 curr->hardirq_disable_event = ++curr->irq_events;
2186                 debug_atomic_inc(&hardirqs_off_events);
2187         } else
2188                 debug_atomic_inc(&redundant_hardirqs_off);
2189 }
2190 EXPORT_SYMBOL(trace_hardirqs_off_caller);
2191
2192 void trace_hardirqs_off(void)
2193 {
2194         trace_hardirqs_off_caller(CALLER_ADDR0);
2195 }
2196 EXPORT_SYMBOL(trace_hardirqs_off);
2197
2198 /*
2199  * Softirqs will be enabled:
2200  */
2201 void trace_softirqs_on(unsigned long ip)
2202 {
2203         struct task_struct *curr = current;
2204
2205         if (unlikely(!debug_locks))
2206                 return;
2207
2208         if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2209                 return;
2210
2211         if (curr->softirqs_enabled) {
2212                 debug_atomic_inc(&redundant_softirqs_on);
2213                 return;
2214         }
2215
2216         /*
2217          * We'll do an OFF -> ON transition:
2218          */
2219         curr->softirqs_enabled = 1;
2220         curr->softirq_enable_ip = ip;
2221         curr->softirq_enable_event = ++curr->irq_events;
2222         debug_atomic_inc(&softirqs_on_events);
2223         /*
2224          * We are going to turn softirqs on, so set the
2225          * usage bit for all held locks, if hardirqs are
2226          * enabled too:
2227          */
2228         if (curr->hardirqs_enabled)
2229                 mark_held_locks(curr, SOFTIRQ);
2230 }
2231
2232 /*
2233  * Softirqs were disabled:
2234  */
2235 void trace_softirqs_off(unsigned long ip)
2236 {
2237         struct task_struct *curr = current;
2238
2239         if (unlikely(!debug_locks))
2240                 return;
2241
2242         if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2243                 return;
2244
2245         if (curr->softirqs_enabled) {
2246                 /*
2247                  * We have done an ON -> OFF transition:
2248                  */
2249                 curr->softirqs_enabled = 0;
2250                 curr->softirq_disable_ip = ip;
2251                 curr->softirq_disable_event = ++curr->irq_events;
2252                 debug_atomic_inc(&softirqs_off_events);
2253                 DEBUG_LOCKS_WARN_ON(!softirq_count());
2254         } else
2255                 debug_atomic_inc(&redundant_softirqs_off);
2256 }
2257
2258 static void __lockdep_trace_alloc(gfp_t gfp_mask, unsigned long flags)
2259 {
2260         struct task_struct *curr = current;
2261
2262         if (unlikely(!debug_locks))
2263                 return;
2264
2265         /* no reclaim without waiting on it */
2266         if (!(gfp_mask & __GFP_WAIT))
2267                 return;
2268
2269         /* this guy won't enter reclaim */
2270         if ((curr->flags & PF_MEMALLOC) && !(gfp_mask & __GFP_NOMEMALLOC))
2271                 return;
2272
2273         /* We're only interested __GFP_FS allocations for now */
2274         if (!(gfp_mask & __GFP_FS))
2275                 return;
2276
2277         if (DEBUG_LOCKS_WARN_ON(irqs_disabled_flags(flags)))
2278                 return;
2279
2280         mark_held_locks(curr, RECLAIM_FS);
2281 }
2282
2283 static void check_flags(unsigned long flags);
2284
2285 void lockdep_trace_alloc(gfp_t gfp_mask)
2286 {
2287         unsigned long flags;
2288
2289         if (unlikely(current->lockdep_recursion))
2290                 return;
2291
2292         raw_local_irq_save(flags);
2293         check_flags(flags);
2294         current->lockdep_recursion = 1;
2295         __lockdep_trace_alloc(gfp_mask, flags);
2296         current->lockdep_recursion = 0;
2297         raw_local_irq_restore(flags);
2298 }
2299
2300 static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock)
2301 {
2302         /*
2303          * If non-trylock use in a hardirq or softirq context, then
2304          * mark the lock as used in these contexts:
2305          */
2306         if (!hlock->trylock) {
2307                 if (hlock->read) {
2308                         if (curr->hardirq_context)
2309                                 if (!mark_lock(curr, hlock,
2310                                                 LOCK_USED_IN_HARDIRQ_READ))
2311                                         return 0;
2312                         if (curr->softirq_context)
2313                                 if (!mark_lock(curr, hlock,
2314                                                 LOCK_USED_IN_SOFTIRQ_READ))
2315                                         return 0;
2316                 } else {
2317                         if (curr->hardirq_context)
2318                                 if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ))
2319                                         return 0;
2320                         if (curr->softirq_context)
2321                                 if (!mark_lock(curr, hlock, LOCK_USED_IN_SOFTIRQ))
2322                                         return 0;
2323                 }
2324         }
2325         if (!hlock->hardirqs_off) {
2326                 if (hlock->read) {
2327                         if (!mark_lock(curr, hlock,
2328                                         LOCK_ENABLED_HARDIRQ_READ))
2329                                 return 0;
2330                         if (curr->softirqs_enabled)
2331                                 if (!mark_lock(curr, hlock,
2332                                                 LOCK_ENABLED_SOFTIRQ_READ))
2333                                         return 0;
2334                 } else {
2335                         if (!mark_lock(curr, hlock,
2336                                         LOCK_ENABLED_HARDIRQ))
2337                                 return 0;
2338                         if (curr->softirqs_enabled)
2339                                 if (!mark_lock(curr, hlock,
2340                                                 LOCK_ENABLED_SOFTIRQ))
2341                                         return 0;
2342                 }
2343         }
2344
2345         /*
2346          * We reuse the irq context infrastructure more broadly as a general
2347          * context checking code. This tests GFP_FS recursion (a lock taken
2348          * during reclaim for a GFP_FS allocation is held over a GFP_FS
2349          * allocation).
2350          */
2351         if (!hlock->trylock && (curr->lockdep_reclaim_gfp & __GFP_FS)) {
2352                 if (hlock->read) {
2353                         if (!mark_lock(curr, hlock, LOCK_USED_IN_RECLAIM_FS_READ))
2354                                         return 0;
2355                 } else {
2356                         if (!mark_lock(curr, hlock, LOCK_USED_IN_RECLAIM_FS))
2357                                         return 0;
2358                 }
2359         }
2360
2361         return 1;
2362 }
2363
2364 static int separate_irq_context(struct task_struct *curr,
2365                 struct held_lock *hlock)
2366 {
2367         unsigned int depth = curr->lockdep_depth;
2368
2369         /*
2370          * Keep track of points where we cross into an interrupt context:
2371          */
2372         hlock->irq_context = 2*(curr->hardirq_context ? 1 : 0) +
2373                                 curr->softirq_context;
2374         if (depth) {
2375                 struct held_lock *prev_hlock;
2376
2377                 prev_hlock = curr->held_locks + depth-1;
2378                 /*
2379                  * If we cross into another context, reset the
2380                  * hash key (this also prevents the checking and the
2381                  * adding of the dependency to 'prev'):
2382                  */
2383                 if (prev_hlock->irq_context != hlock->irq_context)
2384                         return 1;
2385         }
2386         return 0;
2387 }
2388
2389 #else
2390
2391 static inline
2392 int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
2393                 enum lock_usage_bit new_bit)
2394 {
2395         WARN_ON(1);
2396         return 1;
2397 }
2398
2399 static inline int mark_irqflags(struct task_struct *curr,
2400                 struct held_lock *hlock)
2401 {
2402         return 1;
2403 }
2404
2405 static inline int separate_irq_context(struct task_struct *curr,
2406                 struct held_lock *hlock)
2407 {
2408         return 0;
2409 }
2410
2411 void lockdep_trace_alloc(gfp_t gfp_mask)
2412 {
2413 }
2414
2415 #endif
2416
2417 /*
2418  * Mark a lock with a usage bit, and validate the state transition:
2419  */
2420 static int mark_lock(struct task_struct *curr, struct held_lock *this,
2421                              enum lock_usage_bit new_bit)
2422 {
2423         unsigned int new_mask = 1 << new_bit, ret = 1;
2424
2425         /*
2426          * If already set then do not dirty the cacheline,
2427          * nor do any checks:
2428          */
2429         if (likely(hlock_class(this)->usage_mask & new_mask))
2430                 return 1;
2431
2432         if (!graph_lock())
2433                 return 0;
2434         /*
2435          * Make sure we didnt race:
2436          */
2437         if (unlikely(hlock_class(this)->usage_mask & new_mask)) {
2438                 graph_unlock();
2439                 return 1;
2440         }
2441
2442         hlock_class(this)->usage_mask |= new_mask;
2443
2444         if (!save_trace(hlock_class(this)->usage_traces + new_bit))
2445                 return 0;
2446
2447         switch (new_bit) {
2448 #define LOCKDEP_STATE(__STATE)                  \
2449         case LOCK_USED_IN_##__STATE:            \
2450         case LOCK_USED_IN_##__STATE##_READ:     \
2451         case LOCK_ENABLED_##__STATE:            \
2452         case LOCK_ENABLED_##__STATE##_READ:
2453 #include "lockdep_states.h"
2454 #undef LOCKDEP_STATE
2455                 ret = mark_lock_irq(curr, this, new_bit);
2456                 if (!ret)
2457                         return 0;
2458                 break;
2459         case LOCK_USED:
2460                 debug_atomic_dec(&nr_unused_locks);
2461                 break;
2462         default:
2463                 if (!debug_locks_off_graph_unlock())
2464                         return 0;
2465                 WARN_ON(1);
2466                 return 0;
2467         }
2468
2469         graph_unlock();
2470
2471         /*
2472          * We must printk outside of the graph_lock:
2473          */
2474         if (ret == 2) {
2475                 printk("\nmarked lock as {%s}:\n", usage_str[new_bit]);
2476                 print_lock(this);
2477                 print_irqtrace_events(curr);
2478                 dump_stack();
2479         }
2480
2481         return ret;
2482 }
2483
2484 /*
2485  * Initialize a lock instance's lock-class mapping info:
2486  */
2487 void lockdep_init_map(struct lockdep_map *lock, const char *name,
2488                       struct lock_class_key *key, int subclass)
2489 {
2490         if (unlikely(!debug_locks))
2491                 return;
2492
2493         if (DEBUG_LOCKS_WARN_ON(!key))
2494                 return;
2495         if (DEBUG_LOCKS_WARN_ON(!name))
2496                 return;
2497         /*
2498          * Sanity check, the lock-class key must be persistent:
2499          */
2500         if (!static_obj(key)) {
2501                 printk("BUG: key %p not in .data!\n", key);
2502                 DEBUG_LOCKS_WARN_ON(1);
2503                 return;
2504         }
2505         lock->name = name;
2506         lock->key = key;
2507         lock->class_cache = NULL;
2508 #ifdef CONFIG_LOCK_STAT
2509         lock->cpu = raw_smp_processor_id();
2510 #endif
2511         if (subclass)
2512                 register_lock_class(lock, subclass, 1);
2513 }
2514 EXPORT_SYMBOL_GPL(lockdep_init_map);
2515
2516 /*
2517  * This gets called for every mutex_lock*()/spin_lock*() operation.
2518  * We maintain the dependency maps and validate the locking attempt:
2519  */
2520 static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
2521                           int trylock, int read, int check, int hardirqs_off,
2522                           struct lockdep_map *nest_lock, unsigned long ip)
2523 {
2524         struct task_struct *curr = current;
2525         struct lock_class *class = NULL;
2526         struct held_lock *hlock;
2527         unsigned int depth, id;
2528         int chain_head = 0;
2529         u64 chain_key;
2530
2531         if (!prove_locking)
2532                 check = 1;
2533
2534         if (unlikely(!debug_locks))
2535                 return 0;
2536
2537         if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2538                 return 0;
2539
2540         if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
2541                 debug_locks_off();
2542                 printk("BUG: MAX_LOCKDEP_SUBCLASSES too low!\n");
2543                 printk("turning off the locking correctness validator.\n");
2544                 return 0;
2545         }
2546
2547         if (!subclass)
2548                 class = lock->class_cache;
2549         /*
2550          * Not cached yet or subclass?
2551          */
2552         if (unlikely(!class)) {
2553                 class = register_lock_class(lock, subclass, 0);
2554                 if (!class)
2555                         return 0;
2556         }
2557         debug_atomic_inc((atomic_t *)&class->ops);
2558         if (very_verbose(class)) {
2559                 printk("\nacquire class [%p] %s", class->key, class->name);
2560                 if (class->name_version > 1)
2561                         printk("#%d", class->name_version);
2562                 printk("\n");
2563                 dump_stack();
2564         }
2565
2566         /*
2567          * Add the lock to the list of currently held locks.
2568          * (we dont increase the depth just yet, up until the
2569          * dependency checks are done)
2570          */
2571         depth = curr->lockdep_depth;
2572         if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH))
2573                 return 0;
2574
2575         hlock = curr->held_locks + depth;
2576         if (DEBUG_LOCKS_WARN_ON(!class))
2577                 return 0;
2578         hlock->class_idx = class - lock_classes + 1;
2579         hlock->acquire_ip = ip;
2580         hlock->instance = lock;
2581         hlock->nest_lock = nest_lock;
2582         hlock->trylock = trylock;
2583         hlock->read = read;
2584         hlock->check = check;
2585         hlock->hardirqs_off = !!hardirqs_off;
2586 #ifdef CONFIG_LOCK_STAT
2587         hlock->waittime_stamp = 0;
2588         hlock->holdtime_stamp = sched_clock();
2589 #endif
2590
2591         if (check == 2 && !mark_irqflags(curr, hlock))
2592                 return 0;
2593
2594         /* mark it as used: */
2595         if (!mark_lock(curr, hlock, LOCK_USED))
2596                 return 0;
2597
2598         /*
2599          * Calculate the chain hash: it's the combined hash of all the
2600          * lock keys along the dependency chain. We save the hash value
2601          * at every step so that we can get the current hash easily
2602          * after unlock. The chain hash is then used to cache dependency
2603          * results.
2604          *
2605          * The 'key ID' is what is the most compact key value to drive
2606          * the hash, not class->key.
2607          */
2608         id = class - lock_classes;
2609         if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS))
2610                 return 0;
2611
2612         chain_key = curr->curr_chain_key;
2613         if (!depth) {
2614                 if (DEBUG_LOCKS_WARN_ON(chain_key != 0))
2615                         return 0;
2616                 chain_head = 1;
2617         }
2618
2619         hlock->prev_chain_key = chain_key;
2620         if (separate_irq_context(curr, hlock)) {
2621                 chain_key = 0;
2622                 chain_head = 1;
2623         }
2624         chain_key = iterate_chain_key(chain_key, id);
2625
2626         if (!validate_chain(curr, lock, hlock, chain_head, chain_key))
2627                 return 0;
2628
2629         curr->curr_chain_key = chain_key;
2630         curr->lockdep_depth++;
2631         check_chain_key(curr);
2632 #ifdef CONFIG_DEBUG_LOCKDEP
2633         if (unlikely(!debug_locks))
2634                 return 0;
2635 #endif
2636         if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) {
2637                 debug_locks_off();
2638                 printk("BUG: MAX_LOCK_DEPTH too low!\n");
2639                 printk("turning off the locking correctness validator.\n");
2640                 return 0;
2641         }
2642
2643         if (unlikely(curr->lockdep_depth > max_lockdep_depth))
2644                 max_lockdep_depth = curr->lockdep_depth;
2645
2646         return 1;
2647 }
2648
2649 static int
2650 print_unlock_inbalance_bug(struct task_struct *curr, struct lockdep_map *lock,
2651                            unsigned long ip)
2652 {
2653         if (!debug_locks_off())
2654                 return 0;
2655         if (debug_locks_silent)
2656                 return 0;
2657
2658         printk("\n=====================================\n");
2659         printk(  "[ BUG: bad unlock balance detected! ]\n");
2660         printk(  "-------------------------------------\n");
2661         printk("%s/%d is trying to release lock (",
2662                 curr->comm, task_pid_nr(curr));
2663         print_lockdep_cache(lock);
2664         printk(") at:\n");
2665         print_ip_sym(ip);
2666         printk("but there are no more locks to release!\n");
2667         printk("\nother info that might help us debug this:\n");
2668         lockdep_print_held_locks(curr);
2669
2670         printk("\nstack backtrace:\n");
2671         dump_stack();
2672
2673         return 0;
2674 }
2675
2676 /*
2677  * Common debugging checks for both nested and non-nested unlock:
2678  */
2679 static int check_unlock(struct task_struct *curr, struct lockdep_map *lock,
2680                         unsigned long ip)
2681 {
2682         if (unlikely(!debug_locks))
2683                 return 0;
2684         if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2685                 return 0;
2686
2687         if (curr->lockdep_depth <= 0)
2688                 return print_unlock_inbalance_bug(curr, lock, ip);
2689
2690         return 1;
2691 }
2692
2693 static int
2694 __lock_set_class(struct lockdep_map *lock, const char *name,
2695                  struct lock_class_key *key, unsigned int subclass,
2696                  unsigned long ip)
2697 {
2698         struct task_struct *curr = current;
2699         struct held_lock *hlock, *prev_hlock;
2700         struct lock_class *class;
2701         unsigned int depth;
2702         int i;
2703
2704         depth = curr->lockdep_depth;
2705         if (DEBUG_LOCKS_WARN_ON(!depth))
2706                 return 0;
2707
2708         prev_hlock = NULL;
2709         for (i = depth-1; i >= 0; i--) {
2710                 hlock = curr->held_locks + i;
2711                 /*
2712                  * We must not cross into another context:
2713                  */
2714                 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
2715                         break;
2716                 if (hlock->instance == lock)
2717                         goto found_it;
2718                 prev_hlock = hlock;
2719         }
2720         return print_unlock_inbalance_bug(curr, lock, ip);
2721
2722 found_it:
2723         lockdep_init_map(lock, name, key, 0);
2724         class = register_lock_class(lock, subclass, 0);
2725         hlock->class_idx = class - lock_classes + 1;
2726
2727         curr->lockdep_depth = i;
2728         curr->curr_chain_key = hlock->prev_chain_key;
2729
2730         for (; i < depth; i++) {
2731                 hlock = curr->held_locks + i;
2732                 if (!__lock_acquire(hlock->instance,
2733                         hlock_class(hlock)->subclass, hlock->trylock,
2734                                 hlock->read, hlock->check, hlock->hardirqs_off,
2735                                 hlock->nest_lock, hlock->acquire_ip))
2736                         return 0;
2737         }
2738
2739         if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth))
2740                 return 0;
2741         return 1;
2742 }
2743
2744 /*
2745  * Remove the lock to the list of currently held locks in a
2746  * potentially non-nested (out of order) manner. This is a
2747  * relatively rare operation, as all the unlock APIs default
2748  * to nested mode (which uses lock_release()):
2749  */
2750 static int
2751 lock_release_non_nested(struct task_struct *curr,
2752                         struct lockdep_map *lock, unsigned long ip)
2753 {
2754         struct held_lock *hlock, *prev_hlock;
2755         unsigned int depth;
2756         int i;
2757
2758         /*
2759          * Check whether the lock exists in the current stack
2760          * of held locks:
2761          */
2762         depth = curr->lockdep_depth;
2763         if (DEBUG_LOCKS_WARN_ON(!depth))
2764                 return 0;
2765
2766         prev_hlock = NULL;
2767         for (i = depth-1; i >= 0; i--) {
2768                 hlock = curr->held_locks + i;
2769                 /*
2770                  * We must not cross into another context:
2771                  */
2772                 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
2773                         break;
2774                 if (hlock->instance == lock)
2775                         goto found_it;
2776                 prev_hlock = hlock;
2777         }
2778         return print_unlock_inbalance_bug(curr, lock, ip);
2779
2780 found_it:
2781         lock_release_holdtime(hlock);
2782
2783         /*
2784          * We have the right lock to unlock, 'hlock' points to it.
2785          * Now we remove it from the stack, and add back the other
2786          * entries (if any), recalculating the hash along the way:
2787          */
2788         curr->lockdep_depth = i;
2789         curr->curr_chain_key = hlock->prev_chain_key;
2790
2791         for (i++; i < depth; i++) {
2792                 hlock = curr->held_locks + i;
2793                 if (!__lock_acquire(hlock->instance,
2794                         hlock_class(hlock)->subclass, hlock->trylock,
2795                                 hlock->read, hlock->check, hlock->hardirqs_off,
2796                                 hlock->nest_lock, hlock->acquire_ip))
2797                         return 0;
2798         }
2799
2800         if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - 1))
2801                 return 0;
2802         return 1;
2803 }
2804
2805 /*
2806  * Remove the lock to the list of currently held locks - this gets
2807  * called on mutex_unlock()/spin_unlock*() (or on a failed
2808  * mutex_lock_interruptible()). This is done for unlocks that nest
2809  * perfectly. (i.e. the current top of the lock-stack is unlocked)
2810  */
2811 static int lock_release_nested(struct task_struct *curr,
2812                                struct lockdep_map *lock, unsigned long ip)
2813 {
2814         struct held_lock *hlock;
2815         unsigned int depth;
2816
2817         /*
2818          * Pop off the top of the lock stack:
2819          */
2820         depth = curr->lockdep_depth - 1;
2821         hlock = curr->held_locks + depth;
2822
2823         /*
2824          * Is the unlock non-nested:
2825          */
2826         if (hlock->instance != lock)
2827                 return lock_release_non_nested(curr, lock, ip);
2828         curr->lockdep_depth--;
2829
2830         if (DEBUG_LOCKS_WARN_ON(!depth && (hlock->prev_chain_key != 0)))
2831                 return 0;
2832
2833         curr->curr_chain_key = hlock->prev_chain_key;
2834
2835         lock_release_holdtime(hlock);
2836
2837 #ifdef CONFIG_DEBUG_LOCKDEP
2838         hlock->prev_chain_key = 0;
2839         hlock->class_idx = 0;
2840         hlock->acquire_ip = 0;
2841         hlock->irq_context = 0;
2842 #endif
2843         return 1;
2844 }
2845
2846 /*
2847  * Remove the lock to the list of currently held locks - this gets
2848  * called on mutex_unlock()/spin_unlock*() (or on a failed
2849  * mutex_lock_interruptible()). This is done for unlocks that nest
2850  * perfectly. (i.e. the current top of the lock-stack is unlocked)
2851  */
2852 static void
2853 __lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
2854 {
2855         struct task_struct *curr = current;
2856
2857         if (!check_unlock(curr, lock, ip))
2858                 return;
2859
2860         if (nested) {
2861                 if (!lock_release_nested(curr, lock, ip))
2862                         return;
2863         } else {
2864                 if (!lock_release_non_nested(curr, lock, ip))
2865                         return;
2866         }
2867
2868         check_chain_key(curr);
2869 }
2870
2871 /*
2872  * Check whether we follow the irq-flags state precisely:
2873  */
2874 static void check_flags(unsigned long flags)
2875 {
2876 #if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP) && \
2877     defined(CONFIG_TRACE_IRQFLAGS)
2878         if (!debug_locks)
2879                 return;
2880
2881         if (irqs_disabled_flags(flags)) {
2882                 if (DEBUG_LOCKS_WARN_ON(current->hardirqs_enabled)) {
2883                         printk("possible reason: unannotated irqs-off.\n");
2884                 }
2885         } else {
2886                 if (DEBUG_LOCKS_WARN_ON(!current->hardirqs_enabled)) {
2887                         printk("possible reason: unannotated irqs-on.\n");
2888                 }
2889         }
2890
2891         /*
2892          * We dont accurately track softirq state in e.g.
2893          * hardirq contexts (such as on 4KSTACKS), so only
2894          * check if not in hardirq contexts:
2895          */
2896         if (!hardirq_count()) {
2897                 if (softirq_count())
2898                         DEBUG_LOCKS_WARN_ON(current->softirqs_enabled);
2899                 else
2900                         DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
2901         }
2902
2903         if (!debug_locks)
2904                 print_irqtrace_events(current);
2905 #endif
2906 }
2907
2908 void lock_set_class(struct lockdep_map *lock, const char *name,
2909                     struct lock_class_key *key, unsigned int subclass,
2910                     unsigned long ip)
2911 {
2912         unsigned long flags;
2913
2914         if (unlikely(current->lockdep_recursion))
2915                 return;
2916
2917         raw_local_irq_save(flags);
2918         current->lockdep_recursion = 1;
2919         check_flags(flags);
2920         if (__lock_set_class(lock, name, key, subclass, ip))
2921                 check_chain_key(current);
2922         current->lockdep_recursion = 0;
2923         raw_local_irq_restore(flags);
2924 }
2925 EXPORT_SYMBOL_GPL(lock_set_class);
2926
2927 DEFINE_TRACE(lock_acquire);
2928
2929 /*
2930  * We are not always called with irqs disabled - do that here,
2931  * and also avoid lockdep recursion:
2932  */
2933 void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
2934                           int trylock, int read, int check,
2935                           struct lockdep_map *nest_lock, unsigned long ip)
2936 {
2937         unsigned long flags;
2938
2939         trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
2940
2941         if (unlikely(current->lockdep_recursion))
2942                 return;
2943
2944         raw_local_irq_save(flags);
2945         check_flags(flags);
2946
2947         current->lockdep_recursion = 1;
2948         __lock_acquire(lock, subclass, trylock, read, check,
2949                        irqs_disabled_flags(flags), nest_lock, ip);
2950         current->lockdep_recursion = 0;
2951         raw_local_irq_restore(flags);
2952 }
2953 EXPORT_SYMBOL_GPL(lock_acquire);
2954
2955 DEFINE_TRACE(lock_release);
2956
2957 void lock_release(struct lockdep_map *lock, int nested,
2958                           unsigned long ip)
2959 {
2960         unsigned long flags;
2961
2962         trace_lock_release(lock, nested, ip);
2963
2964         if (unlikely(current->lockdep_recursion))
2965                 return;
2966
2967         raw_local_irq_save(flags);
2968         check_flags(flags);
2969         current->lockdep_recursion = 1;
2970         __lock_release(lock, nested, ip);
2971         current->lockdep_recursion = 0;
2972         raw_local_irq_restore(flags);
2973 }
2974 EXPORT_SYMBOL_GPL(lock_release);
2975
2976 void lockdep_set_current_reclaim_state(gfp_t gfp_mask)
2977 {
2978         current->lockdep_reclaim_gfp = gfp_mask;
2979 }
2980
2981 void lockdep_clear_current_reclaim_state(void)
2982 {
2983         current->lockdep_reclaim_gfp = 0;
2984 }
2985
2986 #ifdef CONFIG_LOCK_STAT
2987 static int
2988 print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock,
2989                            unsigned long ip)
2990 {
2991         if (!debug_locks_off())
2992                 return 0;
2993         if (debug_locks_silent)
2994                 return 0;
2995
2996         printk("\n=================================\n");
2997         printk(  "[ BUG: bad contention detected! ]\n");
2998         printk(  "---------------------------------\n");
2999         printk("%s/%d is trying to contend lock (",
3000                 curr->comm, task_pid_nr(curr));
3001         print_lockdep_cache(lock);
3002         printk(") at:\n");
3003         print_ip_sym(ip);
3004         printk("but there are no locks held!\n");
3005         printk("\nother info that might help us debug this:\n");
3006         lockdep_print_held_locks(curr);
3007
3008         printk("\nstack backtrace:\n");
3009         dump_stack();
3010
3011         return 0;
3012 }
3013
3014 static void
3015 __lock_contended(struct lockdep_map *lock, unsigned long ip)
3016 {
3017         struct task_struct *curr = current;
3018         struct held_lock *hlock, *prev_hlock;
3019         struct lock_class_stats *stats;
3020         unsigned int depth;
3021         int i, contention_point, contending_point;
3022
3023         depth = curr->lockdep_depth;
3024         if (DEBUG_LOCKS_WARN_ON(!depth))
3025                 return;
3026
3027         prev_hlock = NULL;
3028         for (i = depth-1; i >= 0; i--) {
3029                 hlock = curr->held_locks + i;
3030                 /*
3031                  * We must not cross into another context:
3032                  */
3033                 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
3034                         break;
3035                 if (hlock->instance == lock)
3036                         goto found_it;
3037                 prev_hlock = hlock;
3038         }
3039         print_lock_contention_bug(curr, lock, ip);
3040         return;
3041
3042 found_it:
3043         hlock->waittime_stamp = sched_clock();
3044
3045         contention_point = lock_point(hlock_class(hlock)->contention_point, ip);
3046         contending_point = lock_point(hlock_class(hlock)->contending_point,
3047                                       lock->ip);
3048
3049         stats = get_lock_stats(hlock_class(hlock));
3050         if (contention_point < LOCKSTAT_POINTS)
3051                 stats->contention_point[contention_point]++;
3052         if (contending_point < LOCKSTAT_POINTS)
3053                 stats->contending_point[contending_point]++;
3054         if (lock->cpu != smp_processor_id())
3055                 stats->bounces[bounce_contended + !!hlock->read]++;
3056         put_lock_stats(stats);
3057 }
3058
3059 static void
3060 __lock_acquired(struct lockdep_map *lock, unsigned long ip)
3061 {
3062         struct task_struct *curr = current;
3063         struct held_lock *hlock, *prev_hlock;
3064         struct lock_class_stats *stats;
3065         unsigned int depth;
3066         u64 now;
3067         s64 waittime = 0;
3068         int i, cpu;
3069
3070         depth = curr->lockdep_depth;
3071         if (DEBUG_LOCKS_WARN_ON(!depth))
3072                 return;
3073
3074         prev_hlock = NULL;
3075         for (i = depth-1; i >= 0; i--) {
3076                 hlock = curr->held_locks + i;
3077                 /*
3078                  * We must not cross into another context:
3079                  */
3080                 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
3081                         break;
3082                 if (hlock->instance == lock)
3083                         goto found_it;
3084                 prev_hlock = hlock;
3085         }
3086         print_lock_contention_bug(curr, lock, _RET_IP_);
3087         return;
3088
3089 found_it:
3090         cpu = smp_processor_id();
3091         if (hlock->waittime_stamp) {
3092                 now = sched_clock();
3093                 waittime = now - hlock->waittime_stamp;
3094                 hlock->holdtime_stamp = now;
3095         }
3096
3097         stats = get_lock_stats(hlock_class(hlock));
3098         if (waittime) {
3099                 if (hlock->read)
3100                         lock_time_inc(&stats->read_waittime, waittime);
3101                 else
3102                         lock_time_inc(&stats->write_waittime, waittime);
3103         }
3104         if (lock->cpu != cpu)
3105                 stats->bounces[bounce_acquired + !!hlock->read]++;
3106         put_lock_stats(stats);
3107
3108         lock->cpu = cpu;
3109         lock->ip = ip;
3110 }
3111
3112 DEFINE_TRACE(lock_contended);
3113
3114 void lock_contended(struct lockdep_map *lock, unsigned long ip)
3115 {
3116         unsigned long flags;
3117
3118         trace_lock_contended(lock, ip);
3119
3120         if (unlikely(!lock_stat))
3121                 return;
3122
3123         if (unlikely(current->lockdep_recursion))
3124                 return;
3125
3126         raw_local_irq_save(flags);
3127         check_flags(flags);
3128         current->lockdep_recursion = 1;
3129         __lock_contended(lock, ip);
3130         current->lockdep_recursion = 0;
3131         raw_local_irq_restore(flags);
3132 }
3133 EXPORT_SYMBOL_GPL(lock_contended);
3134
3135 DEFINE_TRACE(lock_acquired);
3136
3137 void lock_acquired(struct lockdep_map *lock, unsigned long ip)
3138 {
3139         unsigned long flags;
3140
3141         trace_lock_acquired(lock, ip);
3142
3143         if (unlikely(!lock_stat))
3144                 return;
3145
3146         if (unlikely(current->lockdep_recursion))
3147                 return;
3148
3149         raw_local_irq_save(flags);
3150         check_flags(flags);
3151         current->lockdep_recursion = 1;
3152         __lock_acquired(lock, ip);
3153         current->lockdep_recursion = 0;
3154         raw_local_irq_restore(flags);
3155 }
3156 EXPORT_SYMBOL_GPL(lock_acquired);
3157 #endif
3158
3159 /*
3160  * Used by the testsuite, sanitize the validator state
3161  * after a simulated failure:
3162  */
3163
3164 void lockdep_reset(void)
3165 {
3166         unsigned long flags;
3167         int i;
3168
3169         raw_local_irq_save(flags);
3170         current->curr_chain_key = 0;
3171         current->lockdep_depth = 0;
3172         current->lockdep_recursion = 0;
3173         memset(current->held_locks, 0, MAX_LOCK_DEPTH*sizeof(struct held_lock));
3174         nr_hardirq_chains = 0;
3175         nr_softirq_chains = 0;
3176         nr_process_chains = 0;
3177         debug_locks = 1;
3178         for (i = 0; i < CHAINHASH_SIZE; i++)
3179                 INIT_LIST_HEAD(chainhash_table + i);
3180         raw_local_irq_restore(flags);
3181 }
3182
3183 static void zap_class(struct lock_class *class)
3184 {
3185         int i;
3186
3187         /*
3188          * Remove all dependencies this lock is
3189          * involved in:
3190          */
3191         for (i = 0; i < nr_list_entries; i++) {
3192                 if (list_entries[i].class == class)
3193                         list_del_rcu(&list_entries[i].entry);
3194         }
3195         /*
3196          * Unhash the class and remove it from the all_lock_classes list:
3197          */
3198         list_del_rcu(&class->hash_entry);
3199         list_del_rcu(&class->lock_entry);
3200
3201         class->key = NULL;
3202 }
3203
3204 static inline int within(const void *addr, void *start, unsigned long size)
3205 {
3206         return addr >= start && addr < start + size;
3207 }
3208
3209 void lockdep_free_key_range(void *start, unsigned long size)
3210 {
3211         struct lock_class *class, *next;
3212         struct list_head *head;
3213         unsigned long flags;
3214         int i;
3215         int locked;
3216
3217         raw_local_irq_save(flags);
3218         locked = graph_lock();
3219
3220         /*
3221          * Unhash all classes that were created by this module:
3222          */
3223         for (i = 0; i < CLASSHASH_SIZE; i++) {
3224                 head = classhash_table + i;
3225                 if (list_empty(head))
3226                         continue;
3227                 list_for_each_entry_safe(class, next, head, hash_entry) {
3228                         if (within(class->key, start, size))
3229                                 zap_class(class);
3230                         else if (within(class->name, start, size))
3231                                 zap_class(class);
3232                 }
3233         }
3234
3235         if (locked)
3236                 graph_unlock();
3237         raw_local_irq_restore(flags);
3238 }
3239
3240 void lockdep_reset_lock(struct lockdep_map *lock)
3241 {
3242         struct lock_class *class, *next;
3243         struct list_head *head;
3244         unsigned long flags;
3245         int i, j;
3246         int locked;
3247
3248         raw_local_irq_save(flags);
3249
3250         /*
3251          * Remove all classes this lock might have:
3252          */
3253         for (j = 0; j < MAX_LOCKDEP_SUBCLASSES; j++) {
3254                 /*
3255                  * If the class exists we look it up and zap it:
3256                  */
3257                 class = look_up_lock_class(lock, j);
3258                 if (class)
3259                         zap_class(class);
3260         }
3261         /*
3262          * Debug check: in the end all mapped classes should
3263          * be gone.
3264          */
3265         locked = graph_lock();
3266         for (i = 0; i < CLASSHASH_SIZE; i++) {
3267                 head = classhash_table + i;
3268                 if (list_empty(head))
3269                         continue;
3270                 list_for_each_entry_safe(class, next, head, hash_entry) {
3271                         if (unlikely(class == lock->class_cache)) {
3272                                 if (debug_locks_off_graph_unlock())
3273                                         WARN_ON(1);
3274                                 goto out_restore;
3275                         }
3276                 }
3277         }
3278         if (locked)
3279                 graph_unlock();
3280
3281 out_restore:
3282         raw_local_irq_restore(flags);
3283 }
3284
3285 void lockdep_init(void)
3286 {
3287         int i;
3288
3289         /*
3290          * Some architectures have their own start_kernel()
3291          * code which calls lockdep_init(), while we also
3292          * call lockdep_init() from the start_kernel() itself,
3293          * and we want to initialize the hashes only once:
3294          */
3295         if (lockdep_initialized)
3296                 return;
3297
3298         for (i = 0; i < CLASSHASH_SIZE; i++)
3299                 INIT_LIST_HEAD(classhash_table + i);
3300
3301         for (i = 0; i < CHAINHASH_SIZE; i++)
3302                 INIT_LIST_HEAD(chainhash_table + i);
3303
3304         lockdep_initialized = 1;
3305 }
3306
3307 void __init lockdep_info(void)
3308 {
3309         printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n");
3310
3311         printk("... MAX_LOCKDEP_SUBCLASSES:  %lu\n", MAX_LOCKDEP_SUBCLASSES);
3312         printk("... MAX_LOCK_DEPTH:          %lu\n", MAX_LOCK_DEPTH);
3313         printk("... MAX_LOCKDEP_KEYS:        %lu\n", MAX_LOCKDEP_KEYS);
3314         printk("... CLASSHASH_SIZE:          %lu\n", CLASSHASH_SIZE);
3315         printk("... MAX_LOCKDEP_ENTRIES:     %lu\n", MAX_LOCKDEP_ENTRIES);
3316         printk("... MAX_LOCKDEP_CHAINS:      %lu\n", MAX_LOCKDEP_CHAINS);
3317         printk("... CHAINHASH_SIZE:          %lu\n", CHAINHASH_SIZE);
3318
3319         printk(" memory used by lock dependency info: %lu kB\n",
3320                 (sizeof(struct lock_class) * MAX_LOCKDEP_KEYS +
3321                 sizeof(struct list_head) * CLASSHASH_SIZE +
3322                 sizeof(struct lock_list) * MAX_LOCKDEP_ENTRIES +
3323                 sizeof(struct lock_chain) * MAX_LOCKDEP_CHAINS +
3324                 sizeof(struct list_head) * CHAINHASH_SIZE) / 1024);
3325
3326         printk(" per task-struct memory footprint: %lu bytes\n",
3327                 sizeof(struct held_lock) * MAX_LOCK_DEPTH);
3328
3329 #ifdef CONFIG_DEBUG_LOCKDEP
3330         if (lockdep_init_error) {
3331                 printk("WARNING: lockdep init error! Arch code didn't call lockdep_init() early enough?\n");
3332                 printk("Call stack leading to lockdep invocation was:\n");
3333                 print_stack_trace(&lockdep_init_trace, 0);
3334         }
3335 #endif
3336 }
3337
3338 static void
3339 print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
3340                      const void *mem_to, struct held_lock *hlock)
3341 {
3342         if (!debug_locks_off())
3343                 return;
3344         if (debug_locks_silent)
3345                 return;
3346
3347         printk("\n=========================\n");
3348         printk(  "[ BUG: held lock freed! ]\n");
3349         printk(  "-------------------------\n");
3350         printk("%s/%d is freeing memory %p-%p, with a lock still held there!\n",
3351                 curr->comm, task_pid_nr(curr), mem_from, mem_to-1);
3352         print_lock(hlock);
3353         lockdep_print_held_locks(curr);
3354
3355         printk("\nstack backtrace:\n");
3356         dump_stack();
3357 }
3358
3359 static inline int not_in_range(const void* mem_from, unsigned long mem_len,
3360                                 const void* lock_from, unsigned long lock_len)
3361 {
3362         return lock_from + lock_len <= mem_from ||
3363                 mem_from + mem_len <= lock_from;
3364 }
3365
3366 /*
3367  * Called when kernel memory is freed (or unmapped), or if a lock
3368  * is destroyed or reinitialized - this code checks whether there is
3369  * any held lock in the memory range of <from> to <to>:
3370  */
3371 void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
3372 {
3373         struct task_struct *curr = current;
3374         struct held_lock *hlock;
3375         unsigned long flags;
3376         int i;
3377
3378         if (unlikely(!debug_locks))
3379                 return;
3380
3381         local_irq_save(flags);
3382         for (i = 0; i < curr->lockdep_depth; i++) {
3383                 hlock = curr->held_locks + i;
3384
3385                 if (not_in_range(mem_from, mem_len, hlock->instance,
3386                                         sizeof(*hlock->instance)))
3387                         continue;
3388
3389                 print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock);
3390                 break;
3391         }
3392         local_irq_restore(flags);
3393 }
3394 EXPORT_SYMBOL_GPL(debug_check_no_locks_freed);
3395
3396 static void print_held_locks_bug(struct task_struct *curr)
3397 {
3398         if (!debug_locks_off())
3399                 return;
3400         if (debug_locks_silent)
3401                 return;
3402
3403         printk("\n=====================================\n");
3404         printk(  "[ BUG: lock held at task exit time! ]\n");
3405         printk(  "-------------------------------------\n");
3406         printk("%s/%d is exiting with locks still held!\n",
3407                 curr->comm, task_pid_nr(curr));
3408         lockdep_print_held_locks(curr);
3409
3410         printk("\nstack backtrace:\n");
3411         dump_stack();
3412 }
3413
3414 void debug_check_no_locks_held(struct task_struct *task)
3415 {
3416         if (unlikely(task->lockdep_depth > 0))
3417                 print_held_locks_bug(task);
3418 }
3419
3420 void debug_show_all_locks(void)
3421 {
3422         struct task_struct *g, *p;
3423         int count = 10;
3424         int unlock = 1;
3425
3426         if (unlikely(!debug_locks)) {
3427                 printk("INFO: lockdep is turned off.\n");
3428                 return;
3429         }
3430         printk("\nShowing all locks held in the system:\n");
3431
3432         /*
3433          * Here we try to get the tasklist_lock as hard as possible,
3434          * if not successful after 2 seconds we ignore it (but keep
3435          * trying). This is to enable a debug printout even if a
3436          * tasklist_lock-holding task deadlocks or crashes.
3437          */
3438 retry:
3439         if (!read_trylock(&tasklist_lock)) {
3440                 if (count == 10)
3441                         printk("hm, tasklist_lock locked, retrying... ");
3442                 if (count) {
3443                         count--;
3444                         printk(" #%d", 10-count);
3445                         mdelay(200);
3446                         goto retry;
3447                 }
3448                 printk(" ignoring it.\n");
3449                 unlock = 0;
3450         } else {
3451                 if (count != 10)
3452                         printk(KERN_CONT " locked it.\n");
3453         }
3454
3455         do_each_thread(g, p) {
3456                 /*
3457                  * It's not reliable to print a task's held locks
3458                  * if it's not sleeping (or if it's not the current
3459                  * task):
3460                  */
3461                 if (p->state == TASK_RUNNING && p != current)
3462                         continue;
3463                 if (p->lockdep_depth)
3464                         lockdep_print_held_locks(p);
3465                 if (!unlock)
3466                         if (read_trylock(&tasklist_lock))
3467                                 unlock = 1;
3468         } while_each_thread(g, p);
3469
3470         printk("\n");
3471         printk("=============================================\n\n");
3472
3473         if (unlock)
3474                 read_unlock(&tasklist_lock);
3475 }
3476 EXPORT_SYMBOL_GPL(debug_show_all_locks);
3477
3478 /*
3479  * Careful: only use this function if you are sure that
3480  * the task cannot run in parallel!
3481  */
3482 void __debug_show_held_locks(struct task_struct *task)
3483 {
3484         if (unlikely(!debug_locks)) {
3485                 printk("INFO: lockdep is turned off.\n");
3486                 return;
3487         }
3488         lockdep_print_held_locks(task);
3489 }
3490 EXPORT_SYMBOL_GPL(__debug_show_held_locks);
3491
3492 void debug_show_held_locks(struct task_struct *task)
3493 {
3494                 __debug_show_held_locks(task);
3495 }
3496 EXPORT_SYMBOL_GPL(debug_show_held_locks);
3497
3498 void lockdep_sys_exit(void)
3499 {
3500         struct task_struct *curr = current;
3501
3502         if (unlikely(curr->lockdep_depth)) {
3503                 if (!debug_locks_off())
3504                         return;
3505                 printk("\n================================================\n");
3506                 printk(  "[ BUG: lock held when returning to user space! ]\n");
3507                 printk(  "------------------------------------------------\n");
3508                 printk("%s/%d is leaving the kernel with locks still held!\n",
3509                                 curr->comm, curr->pid);
3510                 lockdep_print_held_locks(curr);
3511         }
3512 }