Merge branch 'i2c/for-current' of git://git.kernel.org/pub/scm/linux/kernel/git/wsa...
[sfrench/cifs-2.6.git] / mm / kmemleak.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * mm/kmemleak.c
4  *
5  * Copyright (C) 2008 ARM Limited
6  * Written by Catalin Marinas <catalin.marinas@arm.com>
7  *
8  * For more information on the algorithm and kmemleak usage, please see
9  * Documentation/dev-tools/kmemleak.rst.
10  *
11  * Notes on locking
12  * ----------------
13  *
14  * The following locks and mutexes are used by kmemleak:
15  *
16  * - kmemleak_lock (rwlock): protects the object_list modifications and
17  *   accesses to the object_tree_root. The object_list is the main list
18  *   holding the metadata (struct kmemleak_object) for the allocated memory
19  *   blocks. The object_tree_root is a red black tree used to look-up
20  *   metadata based on a pointer to the corresponding memory block.  The
21  *   kmemleak_object structures are added to the object_list and
22  *   object_tree_root in the create_object() function called from the
23  *   kmemleak_alloc() callback and removed in delete_object() called from the
24  *   kmemleak_free() callback
25  * - kmemleak_object.lock (spinlock): protects a kmemleak_object. Accesses to
26  *   the metadata (e.g. count) are protected by this lock. Note that some
27  *   members of this structure may be protected by other means (atomic or
28  *   kmemleak_lock). This lock is also held when scanning the corresponding
29  *   memory block to avoid the kernel freeing it via the kmemleak_free()
30  *   callback. This is less heavyweight than holding a global lock like
31  *   kmemleak_lock during scanning
32  * - scan_mutex (mutex): ensures that only one thread may scan the memory for
33  *   unreferenced objects at a time. The gray_list contains the objects which
34  *   are already referenced or marked as false positives and need to be
35  *   scanned. This list is only modified during a scanning episode when the
36  *   scan_mutex is held. At the end of a scan, the gray_list is always empty.
37  *   Note that the kmemleak_object.use_count is incremented when an object is
38  *   added to the gray_list and therefore cannot be freed. This mutex also
39  *   prevents multiple users of the "kmemleak" debugfs file together with
40  *   modifications to the memory scanning parameters including the scan_thread
41  *   pointer
42  *
43  * Locks and mutexes are acquired/nested in the following order:
44  *
45  *   scan_mutex [-> object->lock] -> kmemleak_lock -> other_object->lock (SINGLE_DEPTH_NESTING)
46  *
47  * No kmemleak_lock and object->lock nesting is allowed outside scan_mutex
48  * regions.
49  *
50  * The kmemleak_object structures have a use_count incremented or decremented
51  * using the get_object()/put_object() functions. When the use_count becomes
52  * 0, this count can no longer be incremented and put_object() schedules the
53  * kmemleak_object freeing via an RCU callback. All calls to the get_object()
54  * function must be protected by rcu_read_lock() to avoid accessing a freed
55  * structure.
56  */
57
58 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
59
60 #include <linux/init.h>
61 #include <linux/kernel.h>
62 #include <linux/list.h>
63 #include <linux/sched/signal.h>
64 #include <linux/sched/task.h>
65 #include <linux/sched/task_stack.h>
66 #include <linux/jiffies.h>
67 #include <linux/delay.h>
68 #include <linux/export.h>
69 #include <linux/kthread.h>
70 #include <linux/rbtree.h>
71 #include <linux/fs.h>
72 #include <linux/debugfs.h>
73 #include <linux/seq_file.h>
74 #include <linux/cpumask.h>
75 #include <linux/spinlock.h>
76 #include <linux/module.h>
77 #include <linux/mutex.h>
78 #include <linux/rcupdate.h>
79 #include <linux/stacktrace.h>
80 #include <linux/cache.h>
81 #include <linux/percpu.h>
82 #include <linux/memblock.h>
83 #include <linux/pfn.h>
84 #include <linux/mmzone.h>
85 #include <linux/slab.h>
86 #include <linux/thread_info.h>
87 #include <linux/err.h>
88 #include <linux/uaccess.h>
89 #include <linux/string.h>
90 #include <linux/nodemask.h>
91 #include <linux/mm.h>
92 #include <linux/workqueue.h>
93 #include <linux/crc32.h>
94
95 #include <asm/sections.h>
96 #include <asm/processor.h>
97 #include <linux/atomic.h>
98
99 #include <linux/kasan.h>
100 #include <linux/kmemleak.h>
101 #include <linux/memory_hotplug.h>
102
103 /*
104  * Kmemleak configuration and common defines.
105  */
106 #define MAX_TRACE               16      /* stack trace length */
107 #define MSECS_MIN_AGE           5000    /* minimum object age for reporting */
108 #define SECS_FIRST_SCAN         60      /* delay before the first scan */
109 #define SECS_SCAN_WAIT          600     /* subsequent auto scanning delay */
110 #define MAX_SCAN_SIZE           4096    /* maximum size of a scanned block */
111
112 #define BYTES_PER_POINTER       sizeof(void *)
113
114 /* GFP bitmask for kmemleak internal allocations */
115 #define gfp_kmemleak_mask(gfp)  (((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \
116                                  __GFP_NORETRY | __GFP_NOMEMALLOC | \
117                                  __GFP_NOWARN)
118
119 /* scanning area inside a memory block */
120 struct kmemleak_scan_area {
121         struct hlist_node node;
122         unsigned long start;
123         size_t size;
124 };
125
126 #define KMEMLEAK_GREY   0
127 #define KMEMLEAK_BLACK  -1
128
129 /*
130  * Structure holding the metadata for each allocated memory block.
131  * Modifications to such objects should be made while holding the
132  * object->lock. Insertions or deletions from object_list, gray_list or
133  * rb_node are already protected by the corresponding locks or mutex (see
134  * the notes on locking above). These objects are reference-counted
135  * (use_count) and freed using the RCU mechanism.
136  */
137 struct kmemleak_object {
138         spinlock_t lock;
139         unsigned int flags;             /* object status flags */
140         struct list_head object_list;
141         struct list_head gray_list;
142         struct rb_node rb_node;
143         struct rcu_head rcu;            /* object_list lockless traversal */
144         /* object usage count; object freed when use_count == 0 */
145         atomic_t use_count;
146         unsigned long pointer;
147         size_t size;
148         /* pass surplus references to this pointer */
149         unsigned long excess_ref;
150         /* minimum number of a pointers found before it is considered leak */
151         int min_count;
152         /* the total number of pointers found pointing to this object */
153         int count;
154         /* checksum for detecting modified objects */
155         u32 checksum;
156         /* memory ranges to be scanned inside an object (empty for all) */
157         struct hlist_head area_list;
158         unsigned long trace[MAX_TRACE];
159         unsigned int trace_len;
160         unsigned long jiffies;          /* creation timestamp */
161         pid_t pid;                      /* pid of the current task */
162         char comm[TASK_COMM_LEN];       /* executable name */
163 };
164
165 /* flag representing the memory block allocation status */
166 #define OBJECT_ALLOCATED        (1 << 0)
167 /* flag set after the first reporting of an unreference object */
168 #define OBJECT_REPORTED         (1 << 1)
169 /* flag set to not scan the object */
170 #define OBJECT_NO_SCAN          (1 << 2)
171
172 #define HEX_PREFIX              "    "
173 /* number of bytes to print per line; must be 16 or 32 */
174 #define HEX_ROW_SIZE            16
175 /* number of bytes to print at a time (1, 2, 4, 8) */
176 #define HEX_GROUP_SIZE          1
177 /* include ASCII after the hex output */
178 #define HEX_ASCII               1
179 /* max number of lines to be printed */
180 #define HEX_MAX_LINES           2
181
182 /* the list of all allocated objects */
183 static LIST_HEAD(object_list);
184 /* the list of gray-colored objects (see color_gray comment below) */
185 static LIST_HEAD(gray_list);
186 /* search tree for object boundaries */
187 static struct rb_root object_tree_root = RB_ROOT;
188 /* rw_lock protecting the access to object_list and object_tree_root */
189 static DEFINE_RWLOCK(kmemleak_lock);
190
191 /* allocation caches for kmemleak internal data */
192 static struct kmem_cache *object_cache;
193 static struct kmem_cache *scan_area_cache;
194
195 /* set if tracing memory operations is enabled */
196 static int kmemleak_enabled;
197 /* same as above but only for the kmemleak_free() callback */
198 static int kmemleak_free_enabled;
199 /* set in the late_initcall if there were no errors */
200 static int kmemleak_initialized;
201 /* enables or disables early logging of the memory operations */
202 static int kmemleak_early_log = 1;
203 /* set if a kmemleak warning was issued */
204 static int kmemleak_warning;
205 /* set if a fatal kmemleak error has occurred */
206 static int kmemleak_error;
207
208 /* minimum and maximum address that may be valid pointers */
209 static unsigned long min_addr = ULONG_MAX;
210 static unsigned long max_addr;
211
212 static struct task_struct *scan_thread;
213 /* used to avoid reporting of recently allocated objects */
214 static unsigned long jiffies_min_age;
215 static unsigned long jiffies_last_scan;
216 /* delay between automatic memory scannings */
217 static signed long jiffies_scan_wait;
218 /* enables or disables the task stacks scanning */
219 static int kmemleak_stack_scan = 1;
220 /* protects the memory scanning, parameters and debug/kmemleak file access */
221 static DEFINE_MUTEX(scan_mutex);
222 /* setting kmemleak=on, will set this var, skipping the disable */
223 static int kmemleak_skip_disable;
224 /* If there are leaks that can be reported */
225 static bool kmemleak_found_leaks;
226
227 static bool kmemleak_verbose;
228 module_param_named(verbose, kmemleak_verbose, bool, 0600);
229
230 /*
231  * Early object allocation/freeing logging. Kmemleak is initialized after the
232  * kernel allocator. However, both the kernel allocator and kmemleak may
233  * allocate memory blocks which need to be tracked. Kmemleak defines an
234  * arbitrary buffer to hold the allocation/freeing information before it is
235  * fully initialized.
236  */
237
238 /* kmemleak operation type for early logging */
239 enum {
240         KMEMLEAK_ALLOC,
241         KMEMLEAK_ALLOC_PERCPU,
242         KMEMLEAK_FREE,
243         KMEMLEAK_FREE_PART,
244         KMEMLEAK_FREE_PERCPU,
245         KMEMLEAK_NOT_LEAK,
246         KMEMLEAK_IGNORE,
247         KMEMLEAK_SCAN_AREA,
248         KMEMLEAK_NO_SCAN,
249         KMEMLEAK_SET_EXCESS_REF
250 };
251
252 /*
253  * Structure holding the information passed to kmemleak callbacks during the
254  * early logging.
255  */
256 struct early_log {
257         int op_type;                    /* kmemleak operation type */
258         int min_count;                  /* minimum reference count */
259         const void *ptr;                /* allocated/freed memory block */
260         union {
261                 size_t size;            /* memory block size */
262                 unsigned long excess_ref; /* surplus reference passing */
263         };
264         unsigned long trace[MAX_TRACE]; /* stack trace */
265         unsigned int trace_len;         /* stack trace length */
266 };
267
268 /* early logging buffer and current position */
269 static struct early_log
270         early_log[CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE] __initdata;
271 static int crt_early_log __initdata;
272
273 static void kmemleak_disable(void);
274
275 /*
276  * Print a warning and dump the stack trace.
277  */
278 #define kmemleak_warn(x...)     do {            \
279         pr_warn(x);                             \
280         dump_stack();                           \
281         kmemleak_warning = 1;                   \
282 } while (0)
283
284 /*
285  * Macro invoked when a serious kmemleak condition occurred and cannot be
286  * recovered from. Kmemleak will be disabled and further allocation/freeing
287  * tracing no longer available.
288  */
289 #define kmemleak_stop(x...)     do {    \
290         kmemleak_warn(x);               \
291         kmemleak_disable();             \
292 } while (0)
293
294 #define warn_or_seq_printf(seq, fmt, ...)       do {    \
295         if (seq)                                        \
296                 seq_printf(seq, fmt, ##__VA_ARGS__);    \
297         else                                            \
298                 pr_warn(fmt, ##__VA_ARGS__);            \
299 } while (0)
300
301 static void warn_or_seq_hex_dump(struct seq_file *seq, int prefix_type,
302                                  int rowsize, int groupsize, const void *buf,
303                                  size_t len, bool ascii)
304 {
305         if (seq)
306                 seq_hex_dump(seq, HEX_PREFIX, prefix_type, rowsize, groupsize,
307                              buf, len, ascii);
308         else
309                 print_hex_dump(KERN_WARNING, pr_fmt(HEX_PREFIX), prefix_type,
310                                rowsize, groupsize, buf, len, ascii);
311 }
312
313 /*
314  * Printing of the objects hex dump to the seq file. The number of lines to be
315  * printed is limited to HEX_MAX_LINES to prevent seq file spamming. The
316  * actual number of printed bytes depends on HEX_ROW_SIZE. It must be called
317  * with the object->lock held.
318  */
319 static void hex_dump_object(struct seq_file *seq,
320                             struct kmemleak_object *object)
321 {
322         const u8 *ptr = (const u8 *)object->pointer;
323         size_t len;
324
325         /* limit the number of lines to HEX_MAX_LINES */
326         len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE);
327
328         warn_or_seq_printf(seq, "  hex dump (first %zu bytes):\n", len);
329         kasan_disable_current();
330         warn_or_seq_hex_dump(seq, DUMP_PREFIX_NONE, HEX_ROW_SIZE,
331                              HEX_GROUP_SIZE, ptr, len, HEX_ASCII);
332         kasan_enable_current();
333 }
334
335 /*
336  * Object colors, encoded with count and min_count:
337  * - white - orphan object, not enough references to it (count < min_count)
338  * - gray  - not orphan, not marked as false positive (min_count == 0) or
339  *              sufficient references to it (count >= min_count)
340  * - black - ignore, it doesn't contain references (e.g. text section)
341  *              (min_count == -1). No function defined for this color.
342  * Newly created objects don't have any color assigned (object->count == -1)
343  * before the next memory scan when they become white.
344  */
345 static bool color_white(const struct kmemleak_object *object)
346 {
347         return object->count != KMEMLEAK_BLACK &&
348                 object->count < object->min_count;
349 }
350
351 static bool color_gray(const struct kmemleak_object *object)
352 {
353         return object->min_count != KMEMLEAK_BLACK &&
354                 object->count >= object->min_count;
355 }
356
357 /*
358  * Objects are considered unreferenced only if their color is white, they have
359  * not be deleted and have a minimum age to avoid false positives caused by
360  * pointers temporarily stored in CPU registers.
361  */
362 static bool unreferenced_object(struct kmemleak_object *object)
363 {
364         return (color_white(object) && object->flags & OBJECT_ALLOCATED) &&
365                 time_before_eq(object->jiffies + jiffies_min_age,
366                                jiffies_last_scan);
367 }
368
369 /*
370  * Printing of the unreferenced objects information to the seq file. The
371  * print_unreferenced function must be called with the object->lock held.
372  */
373 static void print_unreferenced(struct seq_file *seq,
374                                struct kmemleak_object *object)
375 {
376         int i;
377         unsigned int msecs_age = jiffies_to_msecs(jiffies - object->jiffies);
378
379         warn_or_seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
380                    object->pointer, object->size);
381         warn_or_seq_printf(seq, "  comm \"%s\", pid %d, jiffies %lu (age %d.%03ds)\n",
382                    object->comm, object->pid, object->jiffies,
383                    msecs_age / 1000, msecs_age % 1000);
384         hex_dump_object(seq, object);
385         warn_or_seq_printf(seq, "  backtrace:\n");
386
387         for (i = 0; i < object->trace_len; i++) {
388                 void *ptr = (void *)object->trace[i];
389                 warn_or_seq_printf(seq, "    [<%p>] %pS\n", ptr, ptr);
390         }
391 }
392
393 /*
394  * Print the kmemleak_object information. This function is used mainly for
395  * debugging special cases when kmemleak operations. It must be called with
396  * the object->lock held.
397  */
398 static void dump_object_info(struct kmemleak_object *object)
399 {
400         pr_notice("Object 0x%08lx (size %zu):\n",
401                   object->pointer, object->size);
402         pr_notice("  comm \"%s\", pid %d, jiffies %lu\n",
403                   object->comm, object->pid, object->jiffies);
404         pr_notice("  min_count = %d\n", object->min_count);
405         pr_notice("  count = %d\n", object->count);
406         pr_notice("  flags = 0x%x\n", object->flags);
407         pr_notice("  checksum = %u\n", object->checksum);
408         pr_notice("  backtrace:\n");
409         stack_trace_print(object->trace, object->trace_len, 4);
410 }
411
412 /*
413  * Look-up a memory block metadata (kmemleak_object) in the object search
414  * tree based on a pointer value. If alias is 0, only values pointing to the
415  * beginning of the memory block are allowed. The kmemleak_lock must be held
416  * when calling this function.
417  */
418 static struct kmemleak_object *lookup_object(unsigned long ptr, int alias)
419 {
420         struct rb_node *rb = object_tree_root.rb_node;
421
422         while (rb) {
423                 struct kmemleak_object *object =
424                         rb_entry(rb, struct kmemleak_object, rb_node);
425                 if (ptr < object->pointer)
426                         rb = object->rb_node.rb_left;
427                 else if (object->pointer + object->size <= ptr)
428                         rb = object->rb_node.rb_right;
429                 else if (object->pointer == ptr || alias)
430                         return object;
431                 else {
432                         kmemleak_warn("Found object by alias at 0x%08lx\n",
433                                       ptr);
434                         dump_object_info(object);
435                         break;
436                 }
437         }
438         return NULL;
439 }
440
441 /*
442  * Increment the object use_count. Return 1 if successful or 0 otherwise. Note
443  * that once an object's use_count reached 0, the RCU freeing was already
444  * registered and the object should no longer be used. This function must be
445  * called under the protection of rcu_read_lock().
446  */
447 static int get_object(struct kmemleak_object *object)
448 {
449         return atomic_inc_not_zero(&object->use_count);
450 }
451
452 /*
453  * RCU callback to free a kmemleak_object.
454  */
455 static void free_object_rcu(struct rcu_head *rcu)
456 {
457         struct hlist_node *tmp;
458         struct kmemleak_scan_area *area;
459         struct kmemleak_object *object =
460                 container_of(rcu, struct kmemleak_object, rcu);
461
462         /*
463          * Once use_count is 0 (guaranteed by put_object), there is no other
464          * code accessing this object, hence no need for locking.
465          */
466         hlist_for_each_entry_safe(area, tmp, &object->area_list, node) {
467                 hlist_del(&area->node);
468                 kmem_cache_free(scan_area_cache, area);
469         }
470         kmem_cache_free(object_cache, object);
471 }
472
473 /*
474  * Decrement the object use_count. Once the count is 0, free the object using
475  * an RCU callback. Since put_object() may be called via the kmemleak_free() ->
476  * delete_object() path, the delayed RCU freeing ensures that there is no
477  * recursive call to the kernel allocator. Lock-less RCU object_list traversal
478  * is also possible.
479  */
480 static void put_object(struct kmemleak_object *object)
481 {
482         if (!atomic_dec_and_test(&object->use_count))
483                 return;
484
485         /* should only get here after delete_object was called */
486         WARN_ON(object->flags & OBJECT_ALLOCATED);
487
488         call_rcu(&object->rcu, free_object_rcu);
489 }
490
491 /*
492  * Look up an object in the object search tree and increase its use_count.
493  */
494 static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
495 {
496         unsigned long flags;
497         struct kmemleak_object *object;
498
499         rcu_read_lock();
500         read_lock_irqsave(&kmemleak_lock, flags);
501         object = lookup_object(ptr, alias);
502         read_unlock_irqrestore(&kmemleak_lock, flags);
503
504         /* check whether the object is still available */
505         if (object && !get_object(object))
506                 object = NULL;
507         rcu_read_unlock();
508
509         return object;
510 }
511
512 /*
513  * Look up an object in the object search tree and remove it from both
514  * object_tree_root and object_list. The returned object's use_count should be
515  * at least 1, as initially set by create_object().
516  */
517 static struct kmemleak_object *find_and_remove_object(unsigned long ptr, int alias)
518 {
519         unsigned long flags;
520         struct kmemleak_object *object;
521
522         write_lock_irqsave(&kmemleak_lock, flags);
523         object = lookup_object(ptr, alias);
524         if (object) {
525                 rb_erase(&object->rb_node, &object_tree_root);
526                 list_del_rcu(&object->object_list);
527         }
528         write_unlock_irqrestore(&kmemleak_lock, flags);
529
530         return object;
531 }
532
533 /*
534  * Save stack trace to the given array of MAX_TRACE size.
535  */
536 static int __save_stack_trace(unsigned long *trace)
537 {
538         return stack_trace_save(trace, MAX_TRACE, 2);
539 }
540
541 /*
542  * Create the metadata (struct kmemleak_object) corresponding to an allocated
543  * memory block and add it to the object_list and object_tree_root.
544  */
545 static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
546                                              int min_count, gfp_t gfp)
547 {
548         unsigned long flags;
549         struct kmemleak_object *object, *parent;
550         struct rb_node **link, *rb_parent;
551         unsigned long untagged_ptr;
552
553         object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp));
554         if (!object) {
555                 pr_warn("Cannot allocate a kmemleak_object structure\n");
556                 kmemleak_disable();
557                 return NULL;
558         }
559
560         INIT_LIST_HEAD(&object->object_list);
561         INIT_LIST_HEAD(&object->gray_list);
562         INIT_HLIST_HEAD(&object->area_list);
563         spin_lock_init(&object->lock);
564         atomic_set(&object->use_count, 1);
565         object->flags = OBJECT_ALLOCATED;
566         object->pointer = ptr;
567         object->size = size;
568         object->excess_ref = 0;
569         object->min_count = min_count;
570         object->count = 0;                      /* white color initially */
571         object->jiffies = jiffies;
572         object->checksum = 0;
573
574         /* task information */
575         if (in_irq()) {
576                 object->pid = 0;
577                 strncpy(object->comm, "hardirq", sizeof(object->comm));
578         } else if (in_serving_softirq()) {
579                 object->pid = 0;
580                 strncpy(object->comm, "softirq", sizeof(object->comm));
581         } else {
582                 object->pid = current->pid;
583                 /*
584                  * There is a small chance of a race with set_task_comm(),
585                  * however using get_task_comm() here may cause locking
586                  * dependency issues with current->alloc_lock. In the worst
587                  * case, the command line is not correct.
588                  */
589                 strncpy(object->comm, current->comm, sizeof(object->comm));
590         }
591
592         /* kernel backtrace */
593         object->trace_len = __save_stack_trace(object->trace);
594
595         write_lock_irqsave(&kmemleak_lock, flags);
596
597         untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
598         min_addr = min(min_addr, untagged_ptr);
599         max_addr = max(max_addr, untagged_ptr + size);
600         link = &object_tree_root.rb_node;
601         rb_parent = NULL;
602         while (*link) {
603                 rb_parent = *link;
604                 parent = rb_entry(rb_parent, struct kmemleak_object, rb_node);
605                 if (ptr + size <= parent->pointer)
606                         link = &parent->rb_node.rb_left;
607                 else if (parent->pointer + parent->size <= ptr)
608                         link = &parent->rb_node.rb_right;
609                 else {
610                         kmemleak_stop("Cannot insert 0x%lx into the object search tree (overlaps existing)\n",
611                                       ptr);
612                         /*
613                          * No need for parent->lock here since "parent" cannot
614                          * be freed while the kmemleak_lock is held.
615                          */
616                         dump_object_info(parent);
617                         kmem_cache_free(object_cache, object);
618                         object = NULL;
619                         goto out;
620                 }
621         }
622         rb_link_node(&object->rb_node, rb_parent, link);
623         rb_insert_color(&object->rb_node, &object_tree_root);
624
625         list_add_tail_rcu(&object->object_list, &object_list);
626 out:
627         write_unlock_irqrestore(&kmemleak_lock, flags);
628         return object;
629 }
630
631 /*
632  * Mark the object as not allocated and schedule RCU freeing via put_object().
633  */
634 static void __delete_object(struct kmemleak_object *object)
635 {
636         unsigned long flags;
637
638         WARN_ON(!(object->flags & OBJECT_ALLOCATED));
639         WARN_ON(atomic_read(&object->use_count) < 1);
640
641         /*
642          * Locking here also ensures that the corresponding memory block
643          * cannot be freed when it is being scanned.
644          */
645         spin_lock_irqsave(&object->lock, flags);
646         object->flags &= ~OBJECT_ALLOCATED;
647         spin_unlock_irqrestore(&object->lock, flags);
648         put_object(object);
649 }
650
651 /*
652  * Look up the metadata (struct kmemleak_object) corresponding to ptr and
653  * delete it.
654  */
655 static void delete_object_full(unsigned long ptr)
656 {
657         struct kmemleak_object *object;
658
659         object = find_and_remove_object(ptr, 0);
660         if (!object) {
661 #ifdef DEBUG
662                 kmemleak_warn("Freeing unknown object at 0x%08lx\n",
663                               ptr);
664 #endif
665                 return;
666         }
667         __delete_object(object);
668 }
669
670 /*
671  * Look up the metadata (struct kmemleak_object) corresponding to ptr and
672  * delete it. If the memory block is partially freed, the function may create
673  * additional metadata for the remaining parts of the block.
674  */
675 static void delete_object_part(unsigned long ptr, size_t size)
676 {
677         struct kmemleak_object *object;
678         unsigned long start, end;
679
680         object = find_and_remove_object(ptr, 1);
681         if (!object) {
682 #ifdef DEBUG
683                 kmemleak_warn("Partially freeing unknown object at 0x%08lx (size %zu)\n",
684                               ptr, size);
685 #endif
686                 return;
687         }
688
689         /*
690          * Create one or two objects that may result from the memory block
691          * split. Note that partial freeing is only done by free_bootmem() and
692          * this happens before kmemleak_init() is called. The path below is
693          * only executed during early log recording in kmemleak_init(), so
694          * GFP_KERNEL is enough.
695          */
696         start = object->pointer;
697         end = object->pointer + object->size;
698         if (ptr > start)
699                 create_object(start, ptr - start, object->min_count,
700                               GFP_KERNEL);
701         if (ptr + size < end)
702                 create_object(ptr + size, end - ptr - size, object->min_count,
703                               GFP_KERNEL);
704
705         __delete_object(object);
706 }
707
708 static void __paint_it(struct kmemleak_object *object, int color)
709 {
710         object->min_count = color;
711         if (color == KMEMLEAK_BLACK)
712                 object->flags |= OBJECT_NO_SCAN;
713 }
714
715 static void paint_it(struct kmemleak_object *object, int color)
716 {
717         unsigned long flags;
718
719         spin_lock_irqsave(&object->lock, flags);
720         __paint_it(object, color);
721         spin_unlock_irqrestore(&object->lock, flags);
722 }
723
724 static void paint_ptr(unsigned long ptr, int color)
725 {
726         struct kmemleak_object *object;
727
728         object = find_and_get_object(ptr, 0);
729         if (!object) {
730                 kmemleak_warn("Trying to color unknown object at 0x%08lx as %s\n",
731                               ptr,
732                               (color == KMEMLEAK_GREY) ? "Grey" :
733                               (color == KMEMLEAK_BLACK) ? "Black" : "Unknown");
734                 return;
735         }
736         paint_it(object, color);
737         put_object(object);
738 }
739
740 /*
741  * Mark an object permanently as gray-colored so that it can no longer be
742  * reported as a leak. This is used in general to mark a false positive.
743  */
744 static void make_gray_object(unsigned long ptr)
745 {
746         paint_ptr(ptr, KMEMLEAK_GREY);
747 }
748
749 /*
750  * Mark the object as black-colored so that it is ignored from scans and
751  * reporting.
752  */
753 static void make_black_object(unsigned long ptr)
754 {
755         paint_ptr(ptr, KMEMLEAK_BLACK);
756 }
757
758 /*
759  * Add a scanning area to the object. If at least one such area is added,
760  * kmemleak will only scan these ranges rather than the whole memory block.
761  */
762 static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
763 {
764         unsigned long flags;
765         struct kmemleak_object *object;
766         struct kmemleak_scan_area *area;
767
768         object = find_and_get_object(ptr, 1);
769         if (!object) {
770                 kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
771                               ptr);
772                 return;
773         }
774
775         area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp));
776         if (!area) {
777                 pr_warn("Cannot allocate a scan area\n");
778                 goto out;
779         }
780
781         spin_lock_irqsave(&object->lock, flags);
782         if (size == SIZE_MAX) {
783                 size = object->pointer + object->size - ptr;
784         } else if (ptr + size > object->pointer + object->size) {
785                 kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
786                 dump_object_info(object);
787                 kmem_cache_free(scan_area_cache, area);
788                 goto out_unlock;
789         }
790
791         INIT_HLIST_NODE(&area->node);
792         area->start = ptr;
793         area->size = size;
794
795         hlist_add_head(&area->node, &object->area_list);
796 out_unlock:
797         spin_unlock_irqrestore(&object->lock, flags);
798 out:
799         put_object(object);
800 }
801
802 /*
803  * Any surplus references (object already gray) to 'ptr' are passed to
804  * 'excess_ref'. This is used in the vmalloc() case where a pointer to
805  * vm_struct may be used as an alternative reference to the vmalloc'ed object
806  * (see free_thread_stack()).
807  */
808 static void object_set_excess_ref(unsigned long ptr, unsigned long excess_ref)
809 {
810         unsigned long flags;
811         struct kmemleak_object *object;
812
813         object = find_and_get_object(ptr, 0);
814         if (!object) {
815                 kmemleak_warn("Setting excess_ref on unknown object at 0x%08lx\n",
816                               ptr);
817                 return;
818         }
819
820         spin_lock_irqsave(&object->lock, flags);
821         object->excess_ref = excess_ref;
822         spin_unlock_irqrestore(&object->lock, flags);
823         put_object(object);
824 }
825
826 /*
827  * Set the OBJECT_NO_SCAN flag for the object corresponding to the give
828  * pointer. Such object will not be scanned by kmemleak but references to it
829  * are searched.
830  */
831 static void object_no_scan(unsigned long ptr)
832 {
833         unsigned long flags;
834         struct kmemleak_object *object;
835
836         object = find_and_get_object(ptr, 0);
837         if (!object) {
838                 kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr);
839                 return;
840         }
841
842         spin_lock_irqsave(&object->lock, flags);
843         object->flags |= OBJECT_NO_SCAN;
844         spin_unlock_irqrestore(&object->lock, flags);
845         put_object(object);
846 }
847
848 /*
849  * Log an early kmemleak_* call to the early_log buffer. These calls will be
850  * processed later once kmemleak is fully initialized.
851  */
852 static void __init log_early(int op_type, const void *ptr, size_t size,
853                              int min_count)
854 {
855         unsigned long flags;
856         struct early_log *log;
857
858         if (kmemleak_error) {
859                 /* kmemleak stopped recording, just count the requests */
860                 crt_early_log++;
861                 return;
862         }
863
864         if (crt_early_log >= ARRAY_SIZE(early_log)) {
865                 crt_early_log++;
866                 kmemleak_disable();
867                 return;
868         }
869
870         /*
871          * There is no need for locking since the kernel is still in UP mode
872          * at this stage. Disabling the IRQs is enough.
873          */
874         local_irq_save(flags);
875         log = &early_log[crt_early_log];
876         log->op_type = op_type;
877         log->ptr = ptr;
878         log->size = size;
879         log->min_count = min_count;
880         log->trace_len = __save_stack_trace(log->trace);
881         crt_early_log++;
882         local_irq_restore(flags);
883 }
884
885 /*
886  * Log an early allocated block and populate the stack trace.
887  */
888 static void early_alloc(struct early_log *log)
889 {
890         struct kmemleak_object *object;
891         unsigned long flags;
892         int i;
893
894         if (!kmemleak_enabled || !log->ptr || IS_ERR(log->ptr))
895                 return;
896
897         /*
898          * RCU locking needed to ensure object is not freed via put_object().
899          */
900         rcu_read_lock();
901         object = create_object((unsigned long)log->ptr, log->size,
902                                log->min_count, GFP_ATOMIC);
903         if (!object)
904                 goto out;
905         spin_lock_irqsave(&object->lock, flags);
906         for (i = 0; i < log->trace_len; i++)
907                 object->trace[i] = log->trace[i];
908         object->trace_len = log->trace_len;
909         spin_unlock_irqrestore(&object->lock, flags);
910 out:
911         rcu_read_unlock();
912 }
913
914 /*
915  * Log an early allocated block and populate the stack trace.
916  */
917 static void early_alloc_percpu(struct early_log *log)
918 {
919         unsigned int cpu;
920         const void __percpu *ptr = log->ptr;
921
922         for_each_possible_cpu(cpu) {
923                 log->ptr = per_cpu_ptr(ptr, cpu);
924                 early_alloc(log);
925         }
926 }
927
928 /**
929  * kmemleak_alloc - register a newly allocated object
930  * @ptr:        pointer to beginning of the object
931  * @size:       size of the object
932  * @min_count:  minimum number of references to this object. If during memory
933  *              scanning a number of references less than @min_count is found,
934  *              the object is reported as a memory leak. If @min_count is 0,
935  *              the object is never reported as a leak. If @min_count is -1,
936  *              the object is ignored (not scanned and not reported as a leak)
937  * @gfp:        kmalloc() flags used for kmemleak internal memory allocations
938  *
939  * This function is called from the kernel allocators when a new object
940  * (memory block) is allocated (kmem_cache_alloc, kmalloc etc.).
941  */
942 void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
943                           gfp_t gfp)
944 {
945         pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count);
946
947         if (kmemleak_enabled && ptr && !IS_ERR(ptr))
948                 create_object((unsigned long)ptr, size, min_count, gfp);
949         else if (kmemleak_early_log)
950                 log_early(KMEMLEAK_ALLOC, ptr, size, min_count);
951 }
952 EXPORT_SYMBOL_GPL(kmemleak_alloc);
953
954 /**
955  * kmemleak_alloc_percpu - register a newly allocated __percpu object
956  * @ptr:        __percpu pointer to beginning of the object
957  * @size:       size of the object
958  * @gfp:        flags used for kmemleak internal memory allocations
959  *
960  * This function is called from the kernel percpu allocator when a new object
961  * (memory block) is allocated (alloc_percpu).
962  */
963 void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
964                                  gfp_t gfp)
965 {
966         unsigned int cpu;
967
968         pr_debug("%s(0x%p, %zu)\n", __func__, ptr, size);
969
970         /*
971          * Percpu allocations are only scanned and not reported as leaks
972          * (min_count is set to 0).
973          */
974         if (kmemleak_enabled && ptr && !IS_ERR(ptr))
975                 for_each_possible_cpu(cpu)
976                         create_object((unsigned long)per_cpu_ptr(ptr, cpu),
977                                       size, 0, gfp);
978         else if (kmemleak_early_log)
979                 log_early(KMEMLEAK_ALLOC_PERCPU, ptr, size, 0);
980 }
981 EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu);
982
983 /**
984  * kmemleak_vmalloc - register a newly vmalloc'ed object
985  * @area:       pointer to vm_struct
986  * @size:       size of the object
987  * @gfp:        __vmalloc() flags used for kmemleak internal memory allocations
988  *
989  * This function is called from the vmalloc() kernel allocator when a new
990  * object (memory block) is allocated.
991  */
992 void __ref kmemleak_vmalloc(const struct vm_struct *area, size_t size, gfp_t gfp)
993 {
994         pr_debug("%s(0x%p, %zu)\n", __func__, area, size);
995
996         /*
997          * A min_count = 2 is needed because vm_struct contains a reference to
998          * the virtual address of the vmalloc'ed block.
999          */
1000         if (kmemleak_enabled) {
1001                 create_object((unsigned long)area->addr, size, 2, gfp);
1002                 object_set_excess_ref((unsigned long)area,
1003                                       (unsigned long)area->addr);
1004         } else if (kmemleak_early_log) {
1005                 log_early(KMEMLEAK_ALLOC, area->addr, size, 2);
1006                 /* reusing early_log.size for storing area->addr */
1007                 log_early(KMEMLEAK_SET_EXCESS_REF,
1008                           area, (unsigned long)area->addr, 0);
1009         }
1010 }
1011 EXPORT_SYMBOL_GPL(kmemleak_vmalloc);
1012
1013 /**
1014  * kmemleak_free - unregister a previously registered object
1015  * @ptr:        pointer to beginning of the object
1016  *
1017  * This function is called from the kernel allocators when an object (memory
1018  * block) is freed (kmem_cache_free, kfree, vfree etc.).
1019  */
1020 void __ref kmemleak_free(const void *ptr)
1021 {
1022         pr_debug("%s(0x%p)\n", __func__, ptr);
1023
1024         if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
1025                 delete_object_full((unsigned long)ptr);
1026         else if (kmemleak_early_log)
1027                 log_early(KMEMLEAK_FREE, ptr, 0, 0);
1028 }
1029 EXPORT_SYMBOL_GPL(kmemleak_free);
1030
1031 /**
1032  * kmemleak_free_part - partially unregister a previously registered object
1033  * @ptr:        pointer to the beginning or inside the object. This also
1034  *              represents the start of the range to be freed
1035  * @size:       size to be unregistered
1036  *
1037  * This function is called when only a part of a memory block is freed
1038  * (usually from the bootmem allocator).
1039  */
1040 void __ref kmemleak_free_part(const void *ptr, size_t size)
1041 {
1042         pr_debug("%s(0x%p)\n", __func__, ptr);
1043
1044         if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1045                 delete_object_part((unsigned long)ptr, size);
1046         else if (kmemleak_early_log)
1047                 log_early(KMEMLEAK_FREE_PART, ptr, size, 0);
1048 }
1049 EXPORT_SYMBOL_GPL(kmemleak_free_part);
1050
1051 /**
1052  * kmemleak_free_percpu - unregister a previously registered __percpu object
1053  * @ptr:        __percpu pointer to beginning of the object
1054  *
1055  * This function is called from the kernel percpu allocator when an object
1056  * (memory block) is freed (free_percpu).
1057  */
1058 void __ref kmemleak_free_percpu(const void __percpu *ptr)
1059 {
1060         unsigned int cpu;
1061
1062         pr_debug("%s(0x%p)\n", __func__, ptr);
1063
1064         if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
1065                 for_each_possible_cpu(cpu)
1066                         delete_object_full((unsigned long)per_cpu_ptr(ptr,
1067                                                                       cpu));
1068         else if (kmemleak_early_log)
1069                 log_early(KMEMLEAK_FREE_PERCPU, ptr, 0, 0);
1070 }
1071 EXPORT_SYMBOL_GPL(kmemleak_free_percpu);
1072
1073 /**
1074  * kmemleak_update_trace - update object allocation stack trace
1075  * @ptr:        pointer to beginning of the object
1076  *
1077  * Override the object allocation stack trace for cases where the actual
1078  * allocation place is not always useful.
1079  */
1080 void __ref kmemleak_update_trace(const void *ptr)
1081 {
1082         struct kmemleak_object *object;
1083         unsigned long flags;
1084
1085         pr_debug("%s(0x%p)\n", __func__, ptr);
1086
1087         if (!kmemleak_enabled || IS_ERR_OR_NULL(ptr))
1088                 return;
1089
1090         object = find_and_get_object((unsigned long)ptr, 1);
1091         if (!object) {
1092 #ifdef DEBUG
1093                 kmemleak_warn("Updating stack trace for unknown object at %p\n",
1094                               ptr);
1095 #endif
1096                 return;
1097         }
1098
1099         spin_lock_irqsave(&object->lock, flags);
1100         object->trace_len = __save_stack_trace(object->trace);
1101         spin_unlock_irqrestore(&object->lock, flags);
1102
1103         put_object(object);
1104 }
1105 EXPORT_SYMBOL(kmemleak_update_trace);
1106
1107 /**
1108  * kmemleak_not_leak - mark an allocated object as false positive
1109  * @ptr:        pointer to beginning of the object
1110  *
1111  * Calling this function on an object will cause the memory block to no longer
1112  * be reported as leak and always be scanned.
1113  */
1114 void __ref kmemleak_not_leak(const void *ptr)
1115 {
1116         pr_debug("%s(0x%p)\n", __func__, ptr);
1117
1118         if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1119                 make_gray_object((unsigned long)ptr);
1120         else if (kmemleak_early_log)
1121                 log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0);
1122 }
1123 EXPORT_SYMBOL(kmemleak_not_leak);
1124
1125 /**
1126  * kmemleak_ignore - ignore an allocated object
1127  * @ptr:        pointer to beginning of the object
1128  *
1129  * Calling this function on an object will cause the memory block to be
1130  * ignored (not scanned and not reported as a leak). This is usually done when
1131  * it is known that the corresponding block is not a leak and does not contain
1132  * any references to other allocated memory blocks.
1133  */
1134 void __ref kmemleak_ignore(const void *ptr)
1135 {
1136         pr_debug("%s(0x%p)\n", __func__, ptr);
1137
1138         if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1139                 make_black_object((unsigned long)ptr);
1140         else if (kmemleak_early_log)
1141                 log_early(KMEMLEAK_IGNORE, ptr, 0, 0);
1142 }
1143 EXPORT_SYMBOL(kmemleak_ignore);
1144
1145 /**
1146  * kmemleak_scan_area - limit the range to be scanned in an allocated object
1147  * @ptr:        pointer to beginning or inside the object. This also
1148  *              represents the start of the scan area
1149  * @size:       size of the scan area
1150  * @gfp:        kmalloc() flags used for kmemleak internal memory allocations
1151  *
1152  * This function is used when it is known that only certain parts of an object
1153  * contain references to other objects. Kmemleak will only scan these areas
1154  * reducing the number false negatives.
1155  */
1156 void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
1157 {
1158         pr_debug("%s(0x%p)\n", __func__, ptr);
1159
1160         if (kmemleak_enabled && ptr && size && !IS_ERR(ptr))
1161                 add_scan_area((unsigned long)ptr, size, gfp);
1162         else if (kmemleak_early_log)
1163                 log_early(KMEMLEAK_SCAN_AREA, ptr, size, 0);
1164 }
1165 EXPORT_SYMBOL(kmemleak_scan_area);
1166
1167 /**
1168  * kmemleak_no_scan - do not scan an allocated object
1169  * @ptr:        pointer to beginning of the object
1170  *
1171  * This function notifies kmemleak not to scan the given memory block. Useful
1172  * in situations where it is known that the given object does not contain any
1173  * references to other objects. Kmemleak will not scan such objects reducing
1174  * the number of false negatives.
1175  */
1176 void __ref kmemleak_no_scan(const void *ptr)
1177 {
1178         pr_debug("%s(0x%p)\n", __func__, ptr);
1179
1180         if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1181                 object_no_scan((unsigned long)ptr);
1182         else if (kmemleak_early_log)
1183                 log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0);
1184 }
1185 EXPORT_SYMBOL(kmemleak_no_scan);
1186
1187 /**
1188  * kmemleak_alloc_phys - similar to kmemleak_alloc but taking a physical
1189  *                       address argument
1190  * @phys:       physical address of the object
1191  * @size:       size of the object
1192  * @min_count:  minimum number of references to this object.
1193  *              See kmemleak_alloc()
1194  * @gfp:        kmalloc() flags used for kmemleak internal memory allocations
1195  */
1196 void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, int min_count,
1197                                gfp_t gfp)
1198 {
1199         if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1200                 kmemleak_alloc(__va(phys), size, min_count, gfp);
1201 }
1202 EXPORT_SYMBOL(kmemleak_alloc_phys);
1203
1204 /**
1205  * kmemleak_free_part_phys - similar to kmemleak_free_part but taking a
1206  *                           physical address argument
1207  * @phys:       physical address if the beginning or inside an object. This
1208  *              also represents the start of the range to be freed
1209  * @size:       size to be unregistered
1210  */
1211 void __ref kmemleak_free_part_phys(phys_addr_t phys, size_t size)
1212 {
1213         if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1214                 kmemleak_free_part(__va(phys), size);
1215 }
1216 EXPORT_SYMBOL(kmemleak_free_part_phys);
1217
1218 /**
1219  * kmemleak_not_leak_phys - similar to kmemleak_not_leak but taking a physical
1220  *                          address argument
1221  * @phys:       physical address of the object
1222  */
1223 void __ref kmemleak_not_leak_phys(phys_addr_t phys)
1224 {
1225         if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1226                 kmemleak_not_leak(__va(phys));
1227 }
1228 EXPORT_SYMBOL(kmemleak_not_leak_phys);
1229
1230 /**
1231  * kmemleak_ignore_phys - similar to kmemleak_ignore but taking a physical
1232  *                        address argument
1233  * @phys:       physical address of the object
1234  */
1235 void __ref kmemleak_ignore_phys(phys_addr_t phys)
1236 {
1237         if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1238                 kmemleak_ignore(__va(phys));
1239 }
1240 EXPORT_SYMBOL(kmemleak_ignore_phys);
1241
1242 /*
1243  * Update an object's checksum and return true if it was modified.
1244  */
1245 static bool update_checksum(struct kmemleak_object *object)
1246 {
1247         u32 old_csum = object->checksum;
1248
1249         kasan_disable_current();
1250         object->checksum = crc32(0, (void *)object->pointer, object->size);
1251         kasan_enable_current();
1252
1253         return object->checksum != old_csum;
1254 }
1255
1256 /*
1257  * Update an object's references. object->lock must be held by the caller.
1258  */
1259 static void update_refs(struct kmemleak_object *object)
1260 {
1261         if (!color_white(object)) {
1262                 /* non-orphan, ignored or new */
1263                 return;
1264         }
1265
1266         /*
1267          * Increase the object's reference count (number of pointers to the
1268          * memory block). If this count reaches the required minimum, the
1269          * object's color will become gray and it will be added to the
1270          * gray_list.
1271          */
1272         object->count++;
1273         if (color_gray(object)) {
1274                 /* put_object() called when removing from gray_list */
1275                 WARN_ON(!get_object(object));
1276                 list_add_tail(&object->gray_list, &gray_list);
1277         }
1278 }
1279
1280 /*
1281  * Memory scanning is a long process and it needs to be interruptable. This
1282  * function checks whether such interrupt condition occurred.
1283  */
1284 static int scan_should_stop(void)
1285 {
1286         if (!kmemleak_enabled)
1287                 return 1;
1288
1289         /*
1290          * This function may be called from either process or kthread context,
1291          * hence the need to check for both stop conditions.
1292          */
1293         if (current->mm)
1294                 return signal_pending(current);
1295         else
1296                 return kthread_should_stop();
1297
1298         return 0;
1299 }
1300
1301 /*
1302  * Scan a memory block (exclusive range) for valid pointers and add those
1303  * found to the gray list.
1304  */
1305 static void scan_block(void *_start, void *_end,
1306                        struct kmemleak_object *scanned)
1307 {
1308         unsigned long *ptr;
1309         unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
1310         unsigned long *end = _end - (BYTES_PER_POINTER - 1);
1311         unsigned long flags;
1312         unsigned long untagged_ptr;
1313
1314         read_lock_irqsave(&kmemleak_lock, flags);
1315         for (ptr = start; ptr < end; ptr++) {
1316                 struct kmemleak_object *object;
1317                 unsigned long pointer;
1318                 unsigned long excess_ref;
1319
1320                 if (scan_should_stop())
1321                         break;
1322
1323                 kasan_disable_current();
1324                 pointer = *ptr;
1325                 kasan_enable_current();
1326
1327                 untagged_ptr = (unsigned long)kasan_reset_tag((void *)pointer);
1328                 if (untagged_ptr < min_addr || untagged_ptr >= max_addr)
1329                         continue;
1330
1331                 /*
1332                  * No need for get_object() here since we hold kmemleak_lock.
1333                  * object->use_count cannot be dropped to 0 while the object
1334                  * is still present in object_tree_root and object_list
1335                  * (with updates protected by kmemleak_lock).
1336                  */
1337                 object = lookup_object(pointer, 1);
1338                 if (!object)
1339                         continue;
1340                 if (object == scanned)
1341                         /* self referenced, ignore */
1342                         continue;
1343
1344                 /*
1345                  * Avoid the lockdep recursive warning on object->lock being
1346                  * previously acquired in scan_object(). These locks are
1347                  * enclosed by scan_mutex.
1348                  */
1349                 spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
1350                 /* only pass surplus references (object already gray) */
1351                 if (color_gray(object)) {
1352                         excess_ref = object->excess_ref;
1353                         /* no need for update_refs() if object already gray */
1354                 } else {
1355                         excess_ref = 0;
1356                         update_refs(object);
1357                 }
1358                 spin_unlock(&object->lock);
1359
1360                 if (excess_ref) {
1361                         object = lookup_object(excess_ref, 0);
1362                         if (!object)
1363                                 continue;
1364                         if (object == scanned)
1365                                 /* circular reference, ignore */
1366                                 continue;
1367                         spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
1368                         update_refs(object);
1369                         spin_unlock(&object->lock);
1370                 }
1371         }
1372         read_unlock_irqrestore(&kmemleak_lock, flags);
1373 }
1374
1375 /*
1376  * Scan a large memory block in MAX_SCAN_SIZE chunks to reduce the latency.
1377  */
1378 #ifdef CONFIG_SMP
1379 static void scan_large_block(void *start, void *end)
1380 {
1381         void *next;
1382
1383         while (start < end) {
1384                 next = min(start + MAX_SCAN_SIZE, end);
1385                 scan_block(start, next, NULL);
1386                 start = next;
1387                 cond_resched();
1388         }
1389 }
1390 #endif
1391
1392 /*
1393  * Scan a memory block corresponding to a kmemleak_object. A condition is
1394  * that object->use_count >= 1.
1395  */
1396 static void scan_object(struct kmemleak_object *object)
1397 {
1398         struct kmemleak_scan_area *area;
1399         unsigned long flags;
1400
1401         /*
1402          * Once the object->lock is acquired, the corresponding memory block
1403          * cannot be freed (the same lock is acquired in delete_object).
1404          */
1405         spin_lock_irqsave(&object->lock, flags);
1406         if (object->flags & OBJECT_NO_SCAN)
1407                 goto out;
1408         if (!(object->flags & OBJECT_ALLOCATED))
1409                 /* already freed object */
1410                 goto out;
1411         if (hlist_empty(&object->area_list)) {
1412                 void *start = (void *)object->pointer;
1413                 void *end = (void *)(object->pointer + object->size);
1414                 void *next;
1415
1416                 do {
1417                         next = min(start + MAX_SCAN_SIZE, end);
1418                         scan_block(start, next, object);
1419
1420                         start = next;
1421                         if (start >= end)
1422                                 break;
1423
1424                         spin_unlock_irqrestore(&object->lock, flags);
1425                         cond_resched();
1426                         spin_lock_irqsave(&object->lock, flags);
1427                 } while (object->flags & OBJECT_ALLOCATED);
1428         } else
1429                 hlist_for_each_entry(area, &object->area_list, node)
1430                         scan_block((void *)area->start,
1431                                    (void *)(area->start + area->size),
1432                                    object);
1433 out:
1434         spin_unlock_irqrestore(&object->lock, flags);
1435 }
1436
1437 /*
1438  * Scan the objects already referenced (gray objects). More objects will be
1439  * referenced and, if there are no memory leaks, all the objects are scanned.
1440  */
1441 static void scan_gray_list(void)
1442 {
1443         struct kmemleak_object *object, *tmp;
1444
1445         /*
1446          * The list traversal is safe for both tail additions and removals
1447          * from inside the loop. The kmemleak objects cannot be freed from
1448          * outside the loop because their use_count was incremented.
1449          */
1450         object = list_entry(gray_list.next, typeof(*object), gray_list);
1451         while (&object->gray_list != &gray_list) {
1452                 cond_resched();
1453
1454                 /* may add new objects to the list */
1455                 if (!scan_should_stop())
1456                         scan_object(object);
1457
1458                 tmp = list_entry(object->gray_list.next, typeof(*object),
1459                                  gray_list);
1460
1461                 /* remove the object from the list and release it */
1462                 list_del(&object->gray_list);
1463                 put_object(object);
1464
1465                 object = tmp;
1466         }
1467         WARN_ON(!list_empty(&gray_list));
1468 }
1469
1470 /*
1471  * Scan data sections and all the referenced memory blocks allocated via the
1472  * kernel's standard allocators. This function must be called with the
1473  * scan_mutex held.
1474  */
1475 static void kmemleak_scan(void)
1476 {
1477         unsigned long flags;
1478         struct kmemleak_object *object;
1479         int i;
1480         int new_leaks = 0;
1481
1482         jiffies_last_scan = jiffies;
1483
1484         /* prepare the kmemleak_object's */
1485         rcu_read_lock();
1486         list_for_each_entry_rcu(object, &object_list, object_list) {
1487                 spin_lock_irqsave(&object->lock, flags);
1488 #ifdef DEBUG
1489                 /*
1490                  * With a few exceptions there should be a maximum of
1491                  * 1 reference to any object at this point.
1492                  */
1493                 if (atomic_read(&object->use_count) > 1) {
1494                         pr_debug("object->use_count = %d\n",
1495                                  atomic_read(&object->use_count));
1496                         dump_object_info(object);
1497                 }
1498 #endif
1499                 /* reset the reference count (whiten the object) */
1500                 object->count = 0;
1501                 if (color_gray(object) && get_object(object))
1502                         list_add_tail(&object->gray_list, &gray_list);
1503
1504                 spin_unlock_irqrestore(&object->lock, flags);
1505         }
1506         rcu_read_unlock();
1507
1508 #ifdef CONFIG_SMP
1509         /* per-cpu sections scanning */
1510         for_each_possible_cpu(i)
1511                 scan_large_block(__per_cpu_start + per_cpu_offset(i),
1512                                  __per_cpu_end + per_cpu_offset(i));
1513 #endif
1514
1515         /*
1516          * Struct page scanning for each node.
1517          */
1518         get_online_mems();
1519         for_each_online_node(i) {
1520                 unsigned long start_pfn = node_start_pfn(i);
1521                 unsigned long end_pfn = node_end_pfn(i);
1522                 unsigned long pfn;
1523
1524                 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
1525                         struct page *page = pfn_to_online_page(pfn);
1526
1527                         if (!page)
1528                                 continue;
1529
1530                         /* only scan pages belonging to this node */
1531                         if (page_to_nid(page) != i)
1532                                 continue;
1533                         /* only scan if page is in use */
1534                         if (page_count(page) == 0)
1535                                 continue;
1536                         scan_block(page, page + 1, NULL);
1537                         if (!(pfn & 63))
1538                                 cond_resched();
1539                 }
1540         }
1541         put_online_mems();
1542
1543         /*
1544          * Scanning the task stacks (may introduce false negatives).
1545          */
1546         if (kmemleak_stack_scan) {
1547                 struct task_struct *p, *g;
1548
1549                 read_lock(&tasklist_lock);
1550                 do_each_thread(g, p) {
1551                         void *stack = try_get_task_stack(p);
1552                         if (stack) {
1553                                 scan_block(stack, stack + THREAD_SIZE, NULL);
1554                                 put_task_stack(p);
1555                         }
1556                 } while_each_thread(g, p);
1557                 read_unlock(&tasklist_lock);
1558         }
1559
1560         /*
1561          * Scan the objects already referenced from the sections scanned
1562          * above.
1563          */
1564         scan_gray_list();
1565
1566         /*
1567          * Check for new or unreferenced objects modified since the previous
1568          * scan and color them gray until the next scan.
1569          */
1570         rcu_read_lock();
1571         list_for_each_entry_rcu(object, &object_list, object_list) {
1572                 spin_lock_irqsave(&object->lock, flags);
1573                 if (color_white(object) && (object->flags & OBJECT_ALLOCATED)
1574                     && update_checksum(object) && get_object(object)) {
1575                         /* color it gray temporarily */
1576                         object->count = object->min_count;
1577                         list_add_tail(&object->gray_list, &gray_list);
1578                 }
1579                 spin_unlock_irqrestore(&object->lock, flags);
1580         }
1581         rcu_read_unlock();
1582
1583         /*
1584          * Re-scan the gray list for modified unreferenced objects.
1585          */
1586         scan_gray_list();
1587
1588         /*
1589          * If scanning was stopped do not report any new unreferenced objects.
1590          */
1591         if (scan_should_stop())
1592                 return;
1593
1594         /*
1595          * Scanning result reporting.
1596          */
1597         rcu_read_lock();
1598         list_for_each_entry_rcu(object, &object_list, object_list) {
1599                 spin_lock_irqsave(&object->lock, flags);
1600                 if (unreferenced_object(object) &&
1601                     !(object->flags & OBJECT_REPORTED)) {
1602                         object->flags |= OBJECT_REPORTED;
1603
1604                         if (kmemleak_verbose)
1605                                 print_unreferenced(NULL, object);
1606
1607                         new_leaks++;
1608                 }
1609                 spin_unlock_irqrestore(&object->lock, flags);
1610         }
1611         rcu_read_unlock();
1612
1613         if (new_leaks) {
1614                 kmemleak_found_leaks = true;
1615
1616                 pr_info("%d new suspected memory leaks (see /sys/kernel/debug/kmemleak)\n",
1617                         new_leaks);
1618         }
1619
1620 }
1621
1622 /*
1623  * Thread function performing automatic memory scanning. Unreferenced objects
1624  * at the end of a memory scan are reported but only the first time.
1625  */
1626 static int kmemleak_scan_thread(void *arg)
1627 {
1628         static int first_run = IS_ENABLED(CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN);
1629
1630         pr_info("Automatic memory scanning thread started\n");
1631         set_user_nice(current, 10);
1632
1633         /*
1634          * Wait before the first scan to allow the system to fully initialize.
1635          */
1636         if (first_run) {
1637                 signed long timeout = msecs_to_jiffies(SECS_FIRST_SCAN * 1000);
1638                 first_run = 0;
1639                 while (timeout && !kthread_should_stop())
1640                         timeout = schedule_timeout_interruptible(timeout);
1641         }
1642
1643         while (!kthread_should_stop()) {
1644                 signed long timeout = jiffies_scan_wait;
1645
1646                 mutex_lock(&scan_mutex);
1647                 kmemleak_scan();
1648                 mutex_unlock(&scan_mutex);
1649
1650                 /* wait before the next scan */
1651                 while (timeout && !kthread_should_stop())
1652                         timeout = schedule_timeout_interruptible(timeout);
1653         }
1654
1655         pr_info("Automatic memory scanning thread ended\n");
1656
1657         return 0;
1658 }
1659
1660 /*
1661  * Start the automatic memory scanning thread. This function must be called
1662  * with the scan_mutex held.
1663  */
1664 static void start_scan_thread(void)
1665 {
1666         if (scan_thread)
1667                 return;
1668         scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak");
1669         if (IS_ERR(scan_thread)) {
1670                 pr_warn("Failed to create the scan thread\n");
1671                 scan_thread = NULL;
1672         }
1673 }
1674
1675 /*
1676  * Stop the automatic memory scanning thread.
1677  */
1678 static void stop_scan_thread(void)
1679 {
1680         if (scan_thread) {
1681                 kthread_stop(scan_thread);
1682                 scan_thread = NULL;
1683         }
1684 }
1685
1686 /*
1687  * Iterate over the object_list and return the first valid object at or after
1688  * the required position with its use_count incremented. The function triggers
1689  * a memory scanning when the pos argument points to the first position.
1690  */
1691 static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos)
1692 {
1693         struct kmemleak_object *object;
1694         loff_t n = *pos;
1695         int err;
1696
1697         err = mutex_lock_interruptible(&scan_mutex);
1698         if (err < 0)
1699                 return ERR_PTR(err);
1700
1701         rcu_read_lock();
1702         list_for_each_entry_rcu(object, &object_list, object_list) {
1703                 if (n-- > 0)
1704                         continue;
1705                 if (get_object(object))
1706                         goto out;
1707         }
1708         object = NULL;
1709 out:
1710         return object;
1711 }
1712
1713 /*
1714  * Return the next object in the object_list. The function decrements the
1715  * use_count of the previous object and increases that of the next one.
1716  */
1717 static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1718 {
1719         struct kmemleak_object *prev_obj = v;
1720         struct kmemleak_object *next_obj = NULL;
1721         struct kmemleak_object *obj = prev_obj;
1722
1723         ++(*pos);
1724
1725         list_for_each_entry_continue_rcu(obj, &object_list, object_list) {
1726                 if (get_object(obj)) {
1727                         next_obj = obj;
1728                         break;
1729                 }
1730         }
1731
1732         put_object(prev_obj);
1733         return next_obj;
1734 }
1735
1736 /*
1737  * Decrement the use_count of the last object required, if any.
1738  */
1739 static void kmemleak_seq_stop(struct seq_file *seq, void *v)
1740 {
1741         if (!IS_ERR(v)) {
1742                 /*
1743                  * kmemleak_seq_start may return ERR_PTR if the scan_mutex
1744                  * waiting was interrupted, so only release it if !IS_ERR.
1745                  */
1746                 rcu_read_unlock();
1747                 mutex_unlock(&scan_mutex);
1748                 if (v)
1749                         put_object(v);
1750         }
1751 }
1752
1753 /*
1754  * Print the information for an unreferenced object to the seq file.
1755  */
1756 static int kmemleak_seq_show(struct seq_file *seq, void *v)
1757 {
1758         struct kmemleak_object *object = v;
1759         unsigned long flags;
1760
1761         spin_lock_irqsave(&object->lock, flags);
1762         if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object))
1763                 print_unreferenced(seq, object);
1764         spin_unlock_irqrestore(&object->lock, flags);
1765         return 0;
1766 }
1767
1768 static const struct seq_operations kmemleak_seq_ops = {
1769         .start = kmemleak_seq_start,
1770         .next  = kmemleak_seq_next,
1771         .stop  = kmemleak_seq_stop,
1772         .show  = kmemleak_seq_show,
1773 };
1774
1775 static int kmemleak_open(struct inode *inode, struct file *file)
1776 {
1777         return seq_open(file, &kmemleak_seq_ops);
1778 }
1779
1780 static int dump_str_object_info(const char *str)
1781 {
1782         unsigned long flags;
1783         struct kmemleak_object *object;
1784         unsigned long addr;
1785
1786         if (kstrtoul(str, 0, &addr))
1787                 return -EINVAL;
1788         object = find_and_get_object(addr, 0);
1789         if (!object) {
1790                 pr_info("Unknown object at 0x%08lx\n", addr);
1791                 return -EINVAL;
1792         }
1793
1794         spin_lock_irqsave(&object->lock, flags);
1795         dump_object_info(object);
1796         spin_unlock_irqrestore(&object->lock, flags);
1797
1798         put_object(object);
1799         return 0;
1800 }
1801
1802 /*
1803  * We use grey instead of black to ensure we can do future scans on the same
1804  * objects. If we did not do future scans these black objects could
1805  * potentially contain references to newly allocated objects in the future and
1806  * we'd end up with false positives.
1807  */
1808 static void kmemleak_clear(void)
1809 {
1810         struct kmemleak_object *object;
1811         unsigned long flags;
1812
1813         rcu_read_lock();
1814         list_for_each_entry_rcu(object, &object_list, object_list) {
1815                 spin_lock_irqsave(&object->lock, flags);
1816                 if ((object->flags & OBJECT_REPORTED) &&
1817                     unreferenced_object(object))
1818                         __paint_it(object, KMEMLEAK_GREY);
1819                 spin_unlock_irqrestore(&object->lock, flags);
1820         }
1821         rcu_read_unlock();
1822
1823         kmemleak_found_leaks = false;
1824 }
1825
1826 static void __kmemleak_do_cleanup(void);
1827
1828 /*
1829  * File write operation to configure kmemleak at run-time. The following
1830  * commands can be written to the /sys/kernel/debug/kmemleak file:
1831  *   off        - disable kmemleak (irreversible)
1832  *   stack=on   - enable the task stacks scanning
1833  *   stack=off  - disable the tasks stacks scanning
1834  *   scan=on    - start the automatic memory scanning thread
1835  *   scan=off   - stop the automatic memory scanning thread
1836  *   scan=...   - set the automatic memory scanning period in seconds (0 to
1837  *                disable it)
1838  *   scan       - trigger a memory scan
1839  *   clear      - mark all current reported unreferenced kmemleak objects as
1840  *                grey to ignore printing them, or free all kmemleak objects
1841  *                if kmemleak has been disabled.
1842  *   dump=...   - dump information about the object found at the given address
1843  */
1844 static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
1845                               size_t size, loff_t *ppos)
1846 {
1847         char buf[64];
1848         int buf_size;
1849         int ret;
1850
1851         buf_size = min(size, (sizeof(buf) - 1));
1852         if (strncpy_from_user(buf, user_buf, buf_size) < 0)
1853                 return -EFAULT;
1854         buf[buf_size] = 0;
1855
1856         ret = mutex_lock_interruptible(&scan_mutex);
1857         if (ret < 0)
1858                 return ret;
1859
1860         if (strncmp(buf, "clear", 5) == 0) {
1861                 if (kmemleak_enabled)
1862                         kmemleak_clear();
1863                 else
1864                         __kmemleak_do_cleanup();
1865                 goto out;
1866         }
1867
1868         if (!kmemleak_enabled) {
1869                 ret = -EPERM;
1870                 goto out;
1871         }
1872
1873         if (strncmp(buf, "off", 3) == 0)
1874                 kmemleak_disable();
1875         else if (strncmp(buf, "stack=on", 8) == 0)
1876                 kmemleak_stack_scan = 1;
1877         else if (strncmp(buf, "stack=off", 9) == 0)
1878                 kmemleak_stack_scan = 0;
1879         else if (strncmp(buf, "scan=on", 7) == 0)
1880                 start_scan_thread();
1881         else if (strncmp(buf, "scan=off", 8) == 0)
1882                 stop_scan_thread();
1883         else if (strncmp(buf, "scan=", 5) == 0) {
1884                 unsigned long secs;
1885
1886                 ret = kstrtoul(buf + 5, 0, &secs);
1887                 if (ret < 0)
1888                         goto out;
1889                 stop_scan_thread();
1890                 if (secs) {
1891                         jiffies_scan_wait = msecs_to_jiffies(secs * 1000);
1892                         start_scan_thread();
1893                 }
1894         } else if (strncmp(buf, "scan", 4) == 0)
1895                 kmemleak_scan();
1896         else if (strncmp(buf, "dump=", 5) == 0)
1897                 ret = dump_str_object_info(buf + 5);
1898         else
1899                 ret = -EINVAL;
1900
1901 out:
1902         mutex_unlock(&scan_mutex);
1903         if (ret < 0)
1904                 return ret;
1905
1906         /* ignore the rest of the buffer, only one command at a time */
1907         *ppos += size;
1908         return size;
1909 }
1910
1911 static const struct file_operations kmemleak_fops = {
1912         .owner          = THIS_MODULE,
1913         .open           = kmemleak_open,
1914         .read           = seq_read,
1915         .write          = kmemleak_write,
1916         .llseek         = seq_lseek,
1917         .release        = seq_release,
1918 };
1919
1920 static void __kmemleak_do_cleanup(void)
1921 {
1922         struct kmemleak_object *object;
1923
1924         rcu_read_lock();
1925         list_for_each_entry_rcu(object, &object_list, object_list)
1926                 delete_object_full(object->pointer);
1927         rcu_read_unlock();
1928 }
1929
1930 /*
1931  * Stop the memory scanning thread and free the kmemleak internal objects if
1932  * no previous scan thread (otherwise, kmemleak may still have some useful
1933  * information on memory leaks).
1934  */
1935 static void kmemleak_do_cleanup(struct work_struct *work)
1936 {
1937         stop_scan_thread();
1938
1939         mutex_lock(&scan_mutex);
1940         /*
1941          * Once it is made sure that kmemleak_scan has stopped, it is safe to no
1942          * longer track object freeing. Ordering of the scan thread stopping and
1943          * the memory accesses below is guaranteed by the kthread_stop()
1944          * function.
1945          */
1946         kmemleak_free_enabled = 0;
1947         mutex_unlock(&scan_mutex);
1948
1949         if (!kmemleak_found_leaks)
1950                 __kmemleak_do_cleanup();
1951         else
1952                 pr_info("Kmemleak disabled without freeing internal data. Reclaim the memory with \"echo clear > /sys/kernel/debug/kmemleak\".\n");
1953 }
1954
1955 static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup);
1956
1957 /*
1958  * Disable kmemleak. No memory allocation/freeing will be traced once this
1959  * function is called. Disabling kmemleak is an irreversible operation.
1960  */
1961 static void kmemleak_disable(void)
1962 {
1963         /* atomically check whether it was already invoked */
1964         if (cmpxchg(&kmemleak_error, 0, 1))
1965                 return;
1966
1967         /* stop any memory operation tracing */
1968         kmemleak_enabled = 0;
1969         kmemleak_early_log = 0;
1970
1971         /* check whether it is too early for a kernel thread */
1972         if (kmemleak_initialized)
1973                 schedule_work(&cleanup_work);
1974         else
1975                 kmemleak_free_enabled = 0;
1976
1977         pr_info("Kernel memory leak detector disabled\n");
1978 }
1979
1980 /*
1981  * Allow boot-time kmemleak disabling (enabled by default).
1982  */
1983 static int __init kmemleak_boot_config(char *str)
1984 {
1985         if (!str)
1986                 return -EINVAL;
1987         if (strcmp(str, "off") == 0)
1988                 kmemleak_disable();
1989         else if (strcmp(str, "on") == 0)
1990                 kmemleak_skip_disable = 1;
1991         else
1992                 return -EINVAL;
1993         return 0;
1994 }
1995 early_param("kmemleak", kmemleak_boot_config);
1996
1997 static void __init print_log_trace(struct early_log *log)
1998 {
1999         pr_notice("Early log backtrace:\n");
2000         stack_trace_print(log->trace, log->trace_len, 2);
2001 }
2002
2003 /*
2004  * Kmemleak initialization.
2005  */
2006 void __init kmemleak_init(void)
2007 {
2008         int i;
2009         unsigned long flags;
2010
2011 #ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
2012         if (!kmemleak_skip_disable) {
2013                 kmemleak_disable();
2014                 return;
2015         }
2016 #endif
2017
2018         jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE);
2019         jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000);
2020
2021         object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE);
2022         scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE);
2023
2024         if (crt_early_log > ARRAY_SIZE(early_log))
2025                 pr_warn("Early log buffer exceeded (%d), please increase DEBUG_KMEMLEAK_EARLY_LOG_SIZE\n",
2026                         crt_early_log);
2027
2028         /* the kernel is still in UP mode, so disabling the IRQs is enough */
2029         local_irq_save(flags);
2030         kmemleak_early_log = 0;
2031         if (kmemleak_error) {
2032                 local_irq_restore(flags);
2033                 return;
2034         } else {
2035                 kmemleak_enabled = 1;
2036                 kmemleak_free_enabled = 1;
2037         }
2038         local_irq_restore(flags);
2039
2040         /* register the data/bss sections */
2041         create_object((unsigned long)_sdata, _edata - _sdata,
2042                       KMEMLEAK_GREY, GFP_ATOMIC);
2043         create_object((unsigned long)__bss_start, __bss_stop - __bss_start,
2044                       KMEMLEAK_GREY, GFP_ATOMIC);
2045         /* only register .data..ro_after_init if not within .data */
2046         if (__start_ro_after_init < _sdata || __end_ro_after_init > _edata)
2047                 create_object((unsigned long)__start_ro_after_init,
2048                               __end_ro_after_init - __start_ro_after_init,
2049                               KMEMLEAK_GREY, GFP_ATOMIC);
2050
2051         /*
2052          * This is the point where tracking allocations is safe. Automatic
2053          * scanning is started during the late initcall. Add the early logged
2054          * callbacks to the kmemleak infrastructure.
2055          */
2056         for (i = 0; i < crt_early_log; i++) {
2057                 struct early_log *log = &early_log[i];
2058
2059                 switch (log->op_type) {
2060                 case KMEMLEAK_ALLOC:
2061                         early_alloc(log);
2062                         break;
2063                 case KMEMLEAK_ALLOC_PERCPU:
2064                         early_alloc_percpu(log);
2065                         break;
2066                 case KMEMLEAK_FREE:
2067                         kmemleak_free(log->ptr);
2068                         break;
2069                 case KMEMLEAK_FREE_PART:
2070                         kmemleak_free_part(log->ptr, log->size);
2071                         break;
2072                 case KMEMLEAK_FREE_PERCPU:
2073                         kmemleak_free_percpu(log->ptr);
2074                         break;
2075                 case KMEMLEAK_NOT_LEAK:
2076                         kmemleak_not_leak(log->ptr);
2077                         break;
2078                 case KMEMLEAK_IGNORE:
2079                         kmemleak_ignore(log->ptr);
2080                         break;
2081                 case KMEMLEAK_SCAN_AREA:
2082                         kmemleak_scan_area(log->ptr, log->size, GFP_KERNEL);
2083                         break;
2084                 case KMEMLEAK_NO_SCAN:
2085                         kmemleak_no_scan(log->ptr);
2086                         break;
2087                 case KMEMLEAK_SET_EXCESS_REF:
2088                         object_set_excess_ref((unsigned long)log->ptr,
2089                                               log->excess_ref);
2090                         break;
2091                 default:
2092                         kmemleak_warn("Unknown early log operation: %d\n",
2093                                       log->op_type);
2094                 }
2095
2096                 if (kmemleak_warning) {
2097                         print_log_trace(log);
2098                         kmemleak_warning = 0;
2099                 }
2100         }
2101 }
2102
2103 /*
2104  * Late initialization function.
2105  */
2106 static int __init kmemleak_late_init(void)
2107 {
2108         kmemleak_initialized = 1;
2109
2110         debugfs_create_file("kmemleak", 0644, NULL, NULL, &kmemleak_fops);
2111
2112         if (kmemleak_error) {
2113                 /*
2114                  * Some error occurred and kmemleak was disabled. There is a
2115                  * small chance that kmemleak_disable() was called immediately
2116                  * after setting kmemleak_initialized and we may end up with
2117                  * two clean-up threads but serialized by scan_mutex.
2118                  */
2119                 schedule_work(&cleanup_work);
2120                 return -ENOMEM;
2121         }
2122
2123         if (IS_ENABLED(CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN)) {
2124                 mutex_lock(&scan_mutex);
2125                 start_scan_thread();
2126                 mutex_unlock(&scan_mutex);
2127         }
2128
2129         pr_info("Kernel memory leak detector initialized\n");
2130
2131         return 0;
2132 }
2133 late_initcall(kmemleak_late_init);