Merge tag 'mac80211-next-for-davem-2018-01-04' of git://git.kernel.org/pub/scm/linux...
[sfrench/cifs-2.6.git] / kernel / kprobes.c
1 /*
2  *  Kernel Probes (KProbes)
3  *  kernel/kprobes.c
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18  *
19  * Copyright (C) IBM Corporation, 2002, 2004
20  *
21  * 2002-Oct     Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
22  *              Probes initial implementation (includes suggestions from
23  *              Rusty Russell).
24  * 2004-Aug     Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
25  *              hlists and exceptions notifier as suggested by Andi Kleen.
26  * 2004-July    Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
27  *              interface to access function arguments.
28  * 2004-Sep     Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
29  *              exceptions notifier to be first on the priority list.
30  * 2005-May     Hien Nguyen <hien@us.ibm.com>, Jim Keniston
31  *              <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
32  *              <prasanna@in.ibm.com> added function-return probes.
33  */
34 #include <linux/kprobes.h>
35 #include <linux/hash.h>
36 #include <linux/init.h>
37 #include <linux/slab.h>
38 #include <linux/stddef.h>
39 #include <linux/export.h>
40 #include <linux/moduleloader.h>
41 #include <linux/kallsyms.h>
42 #include <linux/freezer.h>
43 #include <linux/seq_file.h>
44 #include <linux/debugfs.h>
45 #include <linux/sysctl.h>
46 #include <linux/kdebug.h>
47 #include <linux/memory.h>
48 #include <linux/ftrace.h>
49 #include <linux/cpu.h>
50 #include <linux/jump_label.h>
51
52 #include <asm/sections.h>
53 #include <asm/cacheflush.h>
54 #include <asm/errno.h>
55 #include <linux/uaccess.h>
56
57 #define KPROBE_HASH_BITS 6
58 #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
59
60
61 static int kprobes_initialized;
62 static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
63 static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
64
65 /* NOTE: change this value only with kprobe_mutex held */
66 static bool kprobes_all_disarmed;
67
68 /* This protects kprobe_table and optimizing_list */
69 static DEFINE_MUTEX(kprobe_mutex);
70 static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
71 static struct {
72         raw_spinlock_t lock ____cacheline_aligned_in_smp;
73 } kretprobe_table_locks[KPROBE_TABLE_SIZE];
74
75 kprobe_opcode_t * __weak kprobe_lookup_name(const char *name,
76                                         unsigned int __unused)
77 {
78         return ((kprobe_opcode_t *)(kallsyms_lookup_name(name)));
79 }
80
81 static raw_spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
82 {
83         return &(kretprobe_table_locks[hash].lock);
84 }
85
86 /* List of symbols that can be overriden for error injection. */
87 static LIST_HEAD(kprobe_error_injection_list);
88 static DEFINE_MUTEX(kprobe_ei_mutex);
89 struct kprobe_ei_entry {
90         struct list_head list;
91         unsigned long start_addr;
92         unsigned long end_addr;
93         void *priv;
94 };
95
96 /* Blacklist -- list of struct kprobe_blacklist_entry */
97 static LIST_HEAD(kprobe_blacklist);
98
99 #ifdef __ARCH_WANT_KPROBES_INSN_SLOT
100 /*
101  * kprobe->ainsn.insn points to the copy of the instruction to be
102  * single-stepped. x86_64, POWER4 and above have no-exec support and
103  * stepping on the instruction on a vmalloced/kmalloced/data page
104  * is a recipe for disaster
105  */
106 struct kprobe_insn_page {
107         struct list_head list;
108         kprobe_opcode_t *insns;         /* Page of instruction slots */
109         struct kprobe_insn_cache *cache;
110         int nused;
111         int ngarbage;
112         char slot_used[];
113 };
114
115 #define KPROBE_INSN_PAGE_SIZE(slots)                    \
116         (offsetof(struct kprobe_insn_page, slot_used) + \
117          (sizeof(char) * (slots)))
118
119 static int slots_per_page(struct kprobe_insn_cache *c)
120 {
121         return PAGE_SIZE/(c->insn_size * sizeof(kprobe_opcode_t));
122 }
123
124 enum kprobe_slot_state {
125         SLOT_CLEAN = 0,
126         SLOT_DIRTY = 1,
127         SLOT_USED = 2,
128 };
129
130 void __weak *alloc_insn_page(void)
131 {
132         return module_alloc(PAGE_SIZE);
133 }
134
135 void __weak free_insn_page(void *page)
136 {
137         module_memfree(page);
138 }
139
140 struct kprobe_insn_cache kprobe_insn_slots = {
141         .mutex = __MUTEX_INITIALIZER(kprobe_insn_slots.mutex),
142         .alloc = alloc_insn_page,
143         .free = free_insn_page,
144         .pages = LIST_HEAD_INIT(kprobe_insn_slots.pages),
145         .insn_size = MAX_INSN_SIZE,
146         .nr_garbage = 0,
147 };
148 static int collect_garbage_slots(struct kprobe_insn_cache *c);
149
150 /**
151  * __get_insn_slot() - Find a slot on an executable page for an instruction.
152  * We allocate an executable page if there's no room on existing ones.
153  */
154 kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c)
155 {
156         struct kprobe_insn_page *kip;
157         kprobe_opcode_t *slot = NULL;
158
159         /* Since the slot array is not protected by rcu, we need a mutex */
160         mutex_lock(&c->mutex);
161  retry:
162         rcu_read_lock();
163         list_for_each_entry_rcu(kip, &c->pages, list) {
164                 if (kip->nused < slots_per_page(c)) {
165                         int i;
166                         for (i = 0; i < slots_per_page(c); i++) {
167                                 if (kip->slot_used[i] == SLOT_CLEAN) {
168                                         kip->slot_used[i] = SLOT_USED;
169                                         kip->nused++;
170                                         slot = kip->insns + (i * c->insn_size);
171                                         rcu_read_unlock();
172                                         goto out;
173                                 }
174                         }
175                         /* kip->nused is broken. Fix it. */
176                         kip->nused = slots_per_page(c);
177                         WARN_ON(1);
178                 }
179         }
180         rcu_read_unlock();
181
182         /* If there are any garbage slots, collect it and try again. */
183         if (c->nr_garbage && collect_garbage_slots(c) == 0)
184                 goto retry;
185
186         /* All out of space.  Need to allocate a new page. */
187         kip = kmalloc(KPROBE_INSN_PAGE_SIZE(slots_per_page(c)), GFP_KERNEL);
188         if (!kip)
189                 goto out;
190
191         /*
192          * Use module_alloc so this page is within +/- 2GB of where the
193          * kernel image and loaded module images reside. This is required
194          * so x86_64 can correctly handle the %rip-relative fixups.
195          */
196         kip->insns = c->alloc();
197         if (!kip->insns) {
198                 kfree(kip);
199                 goto out;
200         }
201         INIT_LIST_HEAD(&kip->list);
202         memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c));
203         kip->slot_used[0] = SLOT_USED;
204         kip->nused = 1;
205         kip->ngarbage = 0;
206         kip->cache = c;
207         list_add_rcu(&kip->list, &c->pages);
208         slot = kip->insns;
209 out:
210         mutex_unlock(&c->mutex);
211         return slot;
212 }
213
214 /* Return 1 if all garbages are collected, otherwise 0. */
215 static int collect_one_slot(struct kprobe_insn_page *kip, int idx)
216 {
217         kip->slot_used[idx] = SLOT_CLEAN;
218         kip->nused--;
219         if (kip->nused == 0) {
220                 /*
221                  * Page is no longer in use.  Free it unless
222                  * it's the last one.  We keep the last one
223                  * so as not to have to set it up again the
224                  * next time somebody inserts a probe.
225                  */
226                 if (!list_is_singular(&kip->list)) {
227                         list_del_rcu(&kip->list);
228                         synchronize_rcu();
229                         kip->cache->free(kip->insns);
230                         kfree(kip);
231                 }
232                 return 1;
233         }
234         return 0;
235 }
236
237 static int collect_garbage_slots(struct kprobe_insn_cache *c)
238 {
239         struct kprobe_insn_page *kip, *next;
240
241         /* Ensure no-one is interrupted on the garbages */
242         synchronize_sched();
243
244         list_for_each_entry_safe(kip, next, &c->pages, list) {
245                 int i;
246                 if (kip->ngarbage == 0)
247                         continue;
248                 kip->ngarbage = 0;      /* we will collect all garbages */
249                 for (i = 0; i < slots_per_page(c); i++) {
250                         if (kip->slot_used[i] == SLOT_DIRTY && collect_one_slot(kip, i))
251                                 break;
252                 }
253         }
254         c->nr_garbage = 0;
255         return 0;
256 }
257
258 void __free_insn_slot(struct kprobe_insn_cache *c,
259                       kprobe_opcode_t *slot, int dirty)
260 {
261         struct kprobe_insn_page *kip;
262         long idx;
263
264         mutex_lock(&c->mutex);
265         rcu_read_lock();
266         list_for_each_entry_rcu(kip, &c->pages, list) {
267                 idx = ((long)slot - (long)kip->insns) /
268                         (c->insn_size * sizeof(kprobe_opcode_t));
269                 if (idx >= 0 && idx < slots_per_page(c))
270                         goto out;
271         }
272         /* Could not find this slot. */
273         WARN_ON(1);
274         kip = NULL;
275 out:
276         rcu_read_unlock();
277         /* Mark and sweep: this may sleep */
278         if (kip) {
279                 /* Check double free */
280                 WARN_ON(kip->slot_used[idx] != SLOT_USED);
281                 if (dirty) {
282                         kip->slot_used[idx] = SLOT_DIRTY;
283                         kip->ngarbage++;
284                         if (++c->nr_garbage > slots_per_page(c))
285                                 collect_garbage_slots(c);
286                 } else {
287                         collect_one_slot(kip, idx);
288                 }
289         }
290         mutex_unlock(&c->mutex);
291 }
292
293 /*
294  * Check given address is on the page of kprobe instruction slots.
295  * This will be used for checking whether the address on a stack
296  * is on a text area or not.
297  */
298 bool __is_insn_slot_addr(struct kprobe_insn_cache *c, unsigned long addr)
299 {
300         struct kprobe_insn_page *kip;
301         bool ret = false;
302
303         rcu_read_lock();
304         list_for_each_entry_rcu(kip, &c->pages, list) {
305                 if (addr >= (unsigned long)kip->insns &&
306                     addr < (unsigned long)kip->insns + PAGE_SIZE) {
307                         ret = true;
308                         break;
309                 }
310         }
311         rcu_read_unlock();
312
313         return ret;
314 }
315
316 #ifdef CONFIG_OPTPROBES
317 /* For optimized_kprobe buffer */
318 struct kprobe_insn_cache kprobe_optinsn_slots = {
319         .mutex = __MUTEX_INITIALIZER(kprobe_optinsn_slots.mutex),
320         .alloc = alloc_insn_page,
321         .free = free_insn_page,
322         .pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages),
323         /* .insn_size is initialized later */
324         .nr_garbage = 0,
325 };
326 #endif
327 #endif
328
329 /* We have preemption disabled.. so it is safe to use __ versions */
330 static inline void set_kprobe_instance(struct kprobe *kp)
331 {
332         __this_cpu_write(kprobe_instance, kp);
333 }
334
335 static inline void reset_kprobe_instance(void)
336 {
337         __this_cpu_write(kprobe_instance, NULL);
338 }
339
340 /*
341  * This routine is called either:
342  *      - under the kprobe_mutex - during kprobe_[un]register()
343  *                              OR
344  *      - with preemption disabled - from arch/xxx/kernel/kprobes.c
345  */
346 struct kprobe *get_kprobe(void *addr)
347 {
348         struct hlist_head *head;
349         struct kprobe *p;
350
351         head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
352         hlist_for_each_entry_rcu(p, head, hlist) {
353                 if (p->addr == addr)
354                         return p;
355         }
356
357         return NULL;
358 }
359 NOKPROBE_SYMBOL(get_kprobe);
360
361 static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs);
362
363 /* Return true if the kprobe is an aggregator */
364 static inline int kprobe_aggrprobe(struct kprobe *p)
365 {
366         return p->pre_handler == aggr_pre_handler;
367 }
368
369 /* Return true(!0) if the kprobe is unused */
370 static inline int kprobe_unused(struct kprobe *p)
371 {
372         return kprobe_aggrprobe(p) && kprobe_disabled(p) &&
373                list_empty(&p->list);
374 }
375
376 /*
377  * Keep all fields in the kprobe consistent
378  */
379 static inline void copy_kprobe(struct kprobe *ap, struct kprobe *p)
380 {
381         memcpy(&p->opcode, &ap->opcode, sizeof(kprobe_opcode_t));
382         memcpy(&p->ainsn, &ap->ainsn, sizeof(struct arch_specific_insn));
383 }
384
385 #ifdef CONFIG_OPTPROBES
386 /* NOTE: change this value only with kprobe_mutex held */
387 static bool kprobes_allow_optimization;
388
389 /*
390  * Call all pre_handler on the list, but ignores its return value.
391  * This must be called from arch-dep optimized caller.
392  */
393 void opt_pre_handler(struct kprobe *p, struct pt_regs *regs)
394 {
395         struct kprobe *kp;
396
397         list_for_each_entry_rcu(kp, &p->list, list) {
398                 if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
399                         set_kprobe_instance(kp);
400                         kp->pre_handler(kp, regs);
401                 }
402                 reset_kprobe_instance();
403         }
404 }
405 NOKPROBE_SYMBOL(opt_pre_handler);
406
407 /* Free optimized instructions and optimized_kprobe */
408 static void free_aggr_kprobe(struct kprobe *p)
409 {
410         struct optimized_kprobe *op;
411
412         op = container_of(p, struct optimized_kprobe, kp);
413         arch_remove_optimized_kprobe(op);
414         arch_remove_kprobe(p);
415         kfree(op);
416 }
417
418 /* Return true(!0) if the kprobe is ready for optimization. */
419 static inline int kprobe_optready(struct kprobe *p)
420 {
421         struct optimized_kprobe *op;
422
423         if (kprobe_aggrprobe(p)) {
424                 op = container_of(p, struct optimized_kprobe, kp);
425                 return arch_prepared_optinsn(&op->optinsn);
426         }
427
428         return 0;
429 }
430
431 /* Return true(!0) if the kprobe is disarmed. Note: p must be on hash list */
432 static inline int kprobe_disarmed(struct kprobe *p)
433 {
434         struct optimized_kprobe *op;
435
436         /* If kprobe is not aggr/opt probe, just return kprobe is disabled */
437         if (!kprobe_aggrprobe(p))
438                 return kprobe_disabled(p);
439
440         op = container_of(p, struct optimized_kprobe, kp);
441
442         return kprobe_disabled(p) && list_empty(&op->list);
443 }
444
445 /* Return true(!0) if the probe is queued on (un)optimizing lists */
446 static int kprobe_queued(struct kprobe *p)
447 {
448         struct optimized_kprobe *op;
449
450         if (kprobe_aggrprobe(p)) {
451                 op = container_of(p, struct optimized_kprobe, kp);
452                 if (!list_empty(&op->list))
453                         return 1;
454         }
455         return 0;
456 }
457
458 /*
459  * Return an optimized kprobe whose optimizing code replaces
460  * instructions including addr (exclude breakpoint).
461  */
462 static struct kprobe *get_optimized_kprobe(unsigned long addr)
463 {
464         int i;
465         struct kprobe *p = NULL;
466         struct optimized_kprobe *op;
467
468         /* Don't check i == 0, since that is a breakpoint case. */
469         for (i = 1; !p && i < MAX_OPTIMIZED_LENGTH; i++)
470                 p = get_kprobe((void *)(addr - i));
471
472         if (p && kprobe_optready(p)) {
473                 op = container_of(p, struct optimized_kprobe, kp);
474                 if (arch_within_optimized_kprobe(op, addr))
475                         return p;
476         }
477
478         return NULL;
479 }
480
481 /* Optimization staging list, protected by kprobe_mutex */
482 static LIST_HEAD(optimizing_list);
483 static LIST_HEAD(unoptimizing_list);
484 static LIST_HEAD(freeing_list);
485
486 static void kprobe_optimizer(struct work_struct *work);
487 static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
488 #define OPTIMIZE_DELAY 5
489
490 /*
491  * Optimize (replace a breakpoint with a jump) kprobes listed on
492  * optimizing_list.
493  */
494 static void do_optimize_kprobes(void)
495 {
496         /*
497          * The optimization/unoptimization refers online_cpus via
498          * stop_machine() and cpu-hotplug modifies online_cpus.
499          * And same time, text_mutex will be held in cpu-hotplug and here.
500          * This combination can cause a deadlock (cpu-hotplug try to lock
501          * text_mutex but stop_machine can not be done because online_cpus
502          * has been changed)
503          * To avoid this deadlock, caller must have locked cpu hotplug
504          * for preventing cpu-hotplug outside of text_mutex locking.
505          */
506         lockdep_assert_cpus_held();
507
508         /* Optimization never be done when disarmed */
509         if (kprobes_all_disarmed || !kprobes_allow_optimization ||
510             list_empty(&optimizing_list))
511                 return;
512
513         mutex_lock(&text_mutex);
514         arch_optimize_kprobes(&optimizing_list);
515         mutex_unlock(&text_mutex);
516 }
517
518 /*
519  * Unoptimize (replace a jump with a breakpoint and remove the breakpoint
520  * if need) kprobes listed on unoptimizing_list.
521  */
522 static void do_unoptimize_kprobes(void)
523 {
524         struct optimized_kprobe *op, *tmp;
525
526         /* See comment in do_optimize_kprobes() */
527         lockdep_assert_cpus_held();
528
529         /* Unoptimization must be done anytime */
530         if (list_empty(&unoptimizing_list))
531                 return;
532
533         mutex_lock(&text_mutex);
534         arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list);
535         /* Loop free_list for disarming */
536         list_for_each_entry_safe(op, tmp, &freeing_list, list) {
537                 /* Disarm probes if marked disabled */
538                 if (kprobe_disabled(&op->kp))
539                         arch_disarm_kprobe(&op->kp);
540                 if (kprobe_unused(&op->kp)) {
541                         /*
542                          * Remove unused probes from hash list. After waiting
543                          * for synchronization, these probes are reclaimed.
544                          * (reclaiming is done by do_free_cleaned_kprobes.)
545                          */
546                         hlist_del_rcu(&op->kp.hlist);
547                 } else
548                         list_del_init(&op->list);
549         }
550         mutex_unlock(&text_mutex);
551 }
552
553 /* Reclaim all kprobes on the free_list */
554 static void do_free_cleaned_kprobes(void)
555 {
556         struct optimized_kprobe *op, *tmp;
557
558         list_for_each_entry_safe(op, tmp, &freeing_list, list) {
559                 BUG_ON(!kprobe_unused(&op->kp));
560                 list_del_init(&op->list);
561                 free_aggr_kprobe(&op->kp);
562         }
563 }
564
565 /* Start optimizer after OPTIMIZE_DELAY passed */
566 static void kick_kprobe_optimizer(void)
567 {
568         schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY);
569 }
570
571 /* Kprobe jump optimizer */
572 static void kprobe_optimizer(struct work_struct *work)
573 {
574         mutex_lock(&kprobe_mutex);
575         cpus_read_lock();
576         /* Lock modules while optimizing kprobes */
577         mutex_lock(&module_mutex);
578
579         /*
580          * Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed)
581          * kprobes before waiting for quiesence period.
582          */
583         do_unoptimize_kprobes();
584
585         /*
586          * Step 2: Wait for quiesence period to ensure all potentially
587          * preempted tasks to have normally scheduled. Because optprobe
588          * may modify multiple instructions, there is a chance that Nth
589          * instruction is preempted. In that case, such tasks can return
590          * to 2nd-Nth byte of jump instruction. This wait is for avoiding it.
591          * Note that on non-preemptive kernel, this is transparently converted
592          * to synchronoze_sched() to wait for all interrupts to have completed.
593          */
594         synchronize_rcu_tasks();
595
596         /* Step 3: Optimize kprobes after quiesence period */
597         do_optimize_kprobes();
598
599         /* Step 4: Free cleaned kprobes after quiesence period */
600         do_free_cleaned_kprobes();
601
602         mutex_unlock(&module_mutex);
603         cpus_read_unlock();
604         mutex_unlock(&kprobe_mutex);
605
606         /* Step 5: Kick optimizer again if needed */
607         if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list))
608                 kick_kprobe_optimizer();
609 }
610
611 /* Wait for completing optimization and unoptimization */
612 void wait_for_kprobe_optimizer(void)
613 {
614         mutex_lock(&kprobe_mutex);
615
616         while (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) {
617                 mutex_unlock(&kprobe_mutex);
618
619                 /* this will also make optimizing_work execute immmediately */
620                 flush_delayed_work(&optimizing_work);
621                 /* @optimizing_work might not have been queued yet, relax */
622                 cpu_relax();
623
624                 mutex_lock(&kprobe_mutex);
625         }
626
627         mutex_unlock(&kprobe_mutex);
628 }
629
630 /* Optimize kprobe if p is ready to be optimized */
631 static void optimize_kprobe(struct kprobe *p)
632 {
633         struct optimized_kprobe *op;
634
635         /* Check if the kprobe is disabled or not ready for optimization. */
636         if (!kprobe_optready(p) || !kprobes_allow_optimization ||
637             (kprobe_disabled(p) || kprobes_all_disarmed))
638                 return;
639
640         /* Both of break_handler and post_handler are not supported. */
641         if (p->break_handler || p->post_handler)
642                 return;
643
644         op = container_of(p, struct optimized_kprobe, kp);
645
646         /* Check there is no other kprobes at the optimized instructions */
647         if (arch_check_optimized_kprobe(op) < 0)
648                 return;
649
650         /* Check if it is already optimized. */
651         if (op->kp.flags & KPROBE_FLAG_OPTIMIZED)
652                 return;
653         op->kp.flags |= KPROBE_FLAG_OPTIMIZED;
654
655         if (!list_empty(&op->list))
656                 /* This is under unoptimizing. Just dequeue the probe */
657                 list_del_init(&op->list);
658         else {
659                 list_add(&op->list, &optimizing_list);
660                 kick_kprobe_optimizer();
661         }
662 }
663
664 /* Short cut to direct unoptimizing */
665 static void force_unoptimize_kprobe(struct optimized_kprobe *op)
666 {
667         lockdep_assert_cpus_held();
668         arch_unoptimize_kprobe(op);
669         if (kprobe_disabled(&op->kp))
670                 arch_disarm_kprobe(&op->kp);
671 }
672
673 /* Unoptimize a kprobe if p is optimized */
674 static void unoptimize_kprobe(struct kprobe *p, bool force)
675 {
676         struct optimized_kprobe *op;
677
678         if (!kprobe_aggrprobe(p) || kprobe_disarmed(p))
679                 return; /* This is not an optprobe nor optimized */
680
681         op = container_of(p, struct optimized_kprobe, kp);
682         if (!kprobe_optimized(p)) {
683                 /* Unoptimized or unoptimizing case */
684                 if (force && !list_empty(&op->list)) {
685                         /*
686                          * Only if this is unoptimizing kprobe and forced,
687                          * forcibly unoptimize it. (No need to unoptimize
688                          * unoptimized kprobe again :)
689                          */
690                         list_del_init(&op->list);
691                         force_unoptimize_kprobe(op);
692                 }
693                 return;
694         }
695
696         op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
697         if (!list_empty(&op->list)) {
698                 /* Dequeue from the optimization queue */
699                 list_del_init(&op->list);
700                 return;
701         }
702         /* Optimized kprobe case */
703         if (force)
704                 /* Forcibly update the code: this is a special case */
705                 force_unoptimize_kprobe(op);
706         else {
707                 list_add(&op->list, &unoptimizing_list);
708                 kick_kprobe_optimizer();
709         }
710 }
711
712 /* Cancel unoptimizing for reusing */
713 static void reuse_unused_kprobe(struct kprobe *ap)
714 {
715         struct optimized_kprobe *op;
716
717         BUG_ON(!kprobe_unused(ap));
718         /*
719          * Unused kprobe MUST be on the way of delayed unoptimizing (means
720          * there is still a relative jump) and disabled.
721          */
722         op = container_of(ap, struct optimized_kprobe, kp);
723         if (unlikely(list_empty(&op->list)))
724                 printk(KERN_WARNING "Warning: found a stray unused "
725                         "aggrprobe@%p\n", ap->addr);
726         /* Enable the probe again */
727         ap->flags &= ~KPROBE_FLAG_DISABLED;
728         /* Optimize it again (remove from op->list) */
729         BUG_ON(!kprobe_optready(ap));
730         optimize_kprobe(ap);
731 }
732
733 /* Remove optimized instructions */
734 static void kill_optimized_kprobe(struct kprobe *p)
735 {
736         struct optimized_kprobe *op;
737
738         op = container_of(p, struct optimized_kprobe, kp);
739         if (!list_empty(&op->list))
740                 /* Dequeue from the (un)optimization queue */
741                 list_del_init(&op->list);
742         op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
743
744         if (kprobe_unused(p)) {
745                 /* Enqueue if it is unused */
746                 list_add(&op->list, &freeing_list);
747                 /*
748                  * Remove unused probes from the hash list. After waiting
749                  * for synchronization, this probe is reclaimed.
750                  * (reclaiming is done by do_free_cleaned_kprobes().)
751                  */
752                 hlist_del_rcu(&op->kp.hlist);
753         }
754
755         /* Don't touch the code, because it is already freed. */
756         arch_remove_optimized_kprobe(op);
757 }
758
759 static inline
760 void __prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
761 {
762         if (!kprobe_ftrace(p))
763                 arch_prepare_optimized_kprobe(op, p);
764 }
765
766 /* Try to prepare optimized instructions */
767 static void prepare_optimized_kprobe(struct kprobe *p)
768 {
769         struct optimized_kprobe *op;
770
771         op = container_of(p, struct optimized_kprobe, kp);
772         __prepare_optimized_kprobe(op, p);
773 }
774
775 /* Allocate new optimized_kprobe and try to prepare optimized instructions */
776 static struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
777 {
778         struct optimized_kprobe *op;
779
780         op = kzalloc(sizeof(struct optimized_kprobe), GFP_KERNEL);
781         if (!op)
782                 return NULL;
783
784         INIT_LIST_HEAD(&op->list);
785         op->kp.addr = p->addr;
786         __prepare_optimized_kprobe(op, p);
787
788         return &op->kp;
789 }
790
791 static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p);
792
793 /*
794  * Prepare an optimized_kprobe and optimize it
795  * NOTE: p must be a normal registered kprobe
796  */
797 static void try_to_optimize_kprobe(struct kprobe *p)
798 {
799         struct kprobe *ap;
800         struct optimized_kprobe *op;
801
802         /* Impossible to optimize ftrace-based kprobe */
803         if (kprobe_ftrace(p))
804                 return;
805
806         /* For preparing optimization, jump_label_text_reserved() is called */
807         cpus_read_lock();
808         jump_label_lock();
809         mutex_lock(&text_mutex);
810
811         ap = alloc_aggr_kprobe(p);
812         if (!ap)
813                 goto out;
814
815         op = container_of(ap, struct optimized_kprobe, kp);
816         if (!arch_prepared_optinsn(&op->optinsn)) {
817                 /* If failed to setup optimizing, fallback to kprobe */
818                 arch_remove_optimized_kprobe(op);
819                 kfree(op);
820                 goto out;
821         }
822
823         init_aggr_kprobe(ap, p);
824         optimize_kprobe(ap);    /* This just kicks optimizer thread */
825
826 out:
827         mutex_unlock(&text_mutex);
828         jump_label_unlock();
829         cpus_read_unlock();
830 }
831
832 #ifdef CONFIG_SYSCTL
833 static void optimize_all_kprobes(void)
834 {
835         struct hlist_head *head;
836         struct kprobe *p;
837         unsigned int i;
838
839         mutex_lock(&kprobe_mutex);
840         /* If optimization is already allowed, just return */
841         if (kprobes_allow_optimization)
842                 goto out;
843
844         cpus_read_lock();
845         kprobes_allow_optimization = true;
846         for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
847                 head = &kprobe_table[i];
848                 hlist_for_each_entry_rcu(p, head, hlist)
849                         if (!kprobe_disabled(p))
850                                 optimize_kprobe(p);
851         }
852         cpus_read_unlock();
853         printk(KERN_INFO "Kprobes globally optimized\n");
854 out:
855         mutex_unlock(&kprobe_mutex);
856 }
857
858 static void unoptimize_all_kprobes(void)
859 {
860         struct hlist_head *head;
861         struct kprobe *p;
862         unsigned int i;
863
864         mutex_lock(&kprobe_mutex);
865         /* If optimization is already prohibited, just return */
866         if (!kprobes_allow_optimization) {
867                 mutex_unlock(&kprobe_mutex);
868                 return;
869         }
870
871         cpus_read_lock();
872         kprobes_allow_optimization = false;
873         for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
874                 head = &kprobe_table[i];
875                 hlist_for_each_entry_rcu(p, head, hlist) {
876                         if (!kprobe_disabled(p))
877                                 unoptimize_kprobe(p, false);
878                 }
879         }
880         cpus_read_unlock();
881         mutex_unlock(&kprobe_mutex);
882
883         /* Wait for unoptimizing completion */
884         wait_for_kprobe_optimizer();
885         printk(KERN_INFO "Kprobes globally unoptimized\n");
886 }
887
888 static DEFINE_MUTEX(kprobe_sysctl_mutex);
889 int sysctl_kprobes_optimization;
890 int proc_kprobes_optimization_handler(struct ctl_table *table, int write,
891                                       void __user *buffer, size_t *length,
892                                       loff_t *ppos)
893 {
894         int ret;
895
896         mutex_lock(&kprobe_sysctl_mutex);
897         sysctl_kprobes_optimization = kprobes_allow_optimization ? 1 : 0;
898         ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
899
900         if (sysctl_kprobes_optimization)
901                 optimize_all_kprobes();
902         else
903                 unoptimize_all_kprobes();
904         mutex_unlock(&kprobe_sysctl_mutex);
905
906         return ret;
907 }
908 #endif /* CONFIG_SYSCTL */
909
910 /* Put a breakpoint for a probe. Must be called with text_mutex locked */
911 static void __arm_kprobe(struct kprobe *p)
912 {
913         struct kprobe *_p;
914
915         /* Check collision with other optimized kprobes */
916         _p = get_optimized_kprobe((unsigned long)p->addr);
917         if (unlikely(_p))
918                 /* Fallback to unoptimized kprobe */
919                 unoptimize_kprobe(_p, true);
920
921         arch_arm_kprobe(p);
922         optimize_kprobe(p);     /* Try to optimize (add kprobe to a list) */
923 }
924
925 /* Remove the breakpoint of a probe. Must be called with text_mutex locked */
926 static void __disarm_kprobe(struct kprobe *p, bool reopt)
927 {
928         struct kprobe *_p;
929
930         /* Try to unoptimize */
931         unoptimize_kprobe(p, kprobes_all_disarmed);
932
933         if (!kprobe_queued(p)) {
934                 arch_disarm_kprobe(p);
935                 /* If another kprobe was blocked, optimize it. */
936                 _p = get_optimized_kprobe((unsigned long)p->addr);
937                 if (unlikely(_p) && reopt)
938                         optimize_kprobe(_p);
939         }
940         /* TODO: reoptimize others after unoptimized this probe */
941 }
942
943 #else /* !CONFIG_OPTPROBES */
944
945 #define optimize_kprobe(p)                      do {} while (0)
946 #define unoptimize_kprobe(p, f)                 do {} while (0)
947 #define kill_optimized_kprobe(p)                do {} while (0)
948 #define prepare_optimized_kprobe(p)             do {} while (0)
949 #define try_to_optimize_kprobe(p)               do {} while (0)
950 #define __arm_kprobe(p)                         arch_arm_kprobe(p)
951 #define __disarm_kprobe(p, o)                   arch_disarm_kprobe(p)
952 #define kprobe_disarmed(p)                      kprobe_disabled(p)
953 #define wait_for_kprobe_optimizer()             do {} while (0)
954
955 /* There should be no unused kprobes can be reused without optimization */
956 static void reuse_unused_kprobe(struct kprobe *ap)
957 {
958         printk(KERN_ERR "Error: There should be no unused kprobe here.\n");
959         BUG_ON(kprobe_unused(ap));
960 }
961
962 static void free_aggr_kprobe(struct kprobe *p)
963 {
964         arch_remove_kprobe(p);
965         kfree(p);
966 }
967
968 static struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
969 {
970         return kzalloc(sizeof(struct kprobe), GFP_KERNEL);
971 }
972 #endif /* CONFIG_OPTPROBES */
973
974 #ifdef CONFIG_KPROBES_ON_FTRACE
975 static struct ftrace_ops kprobe_ftrace_ops __read_mostly = {
976         .func = kprobe_ftrace_handler,
977         .flags = FTRACE_OPS_FL_SAVE_REGS | FTRACE_OPS_FL_IPMODIFY,
978 };
979 static int kprobe_ftrace_enabled;
980
981 /* Must ensure p->addr is really on ftrace */
982 static int prepare_kprobe(struct kprobe *p)
983 {
984         if (!kprobe_ftrace(p))
985                 return arch_prepare_kprobe(p);
986
987         return arch_prepare_kprobe_ftrace(p);
988 }
989
990 /* Caller must lock kprobe_mutex */
991 static void arm_kprobe_ftrace(struct kprobe *p)
992 {
993         int ret;
994
995         ret = ftrace_set_filter_ip(&kprobe_ftrace_ops,
996                                    (unsigned long)p->addr, 0, 0);
997         WARN(ret < 0, "Failed to arm kprobe-ftrace at %p (%d)\n", p->addr, ret);
998         kprobe_ftrace_enabled++;
999         if (kprobe_ftrace_enabled == 1) {
1000                 ret = register_ftrace_function(&kprobe_ftrace_ops);
1001                 WARN(ret < 0, "Failed to init kprobe-ftrace (%d)\n", ret);
1002         }
1003 }
1004
1005 /* Caller must lock kprobe_mutex */
1006 static void disarm_kprobe_ftrace(struct kprobe *p)
1007 {
1008         int ret;
1009
1010         kprobe_ftrace_enabled--;
1011         if (kprobe_ftrace_enabled == 0) {
1012                 ret = unregister_ftrace_function(&kprobe_ftrace_ops);
1013                 WARN(ret < 0, "Failed to init kprobe-ftrace (%d)\n", ret);
1014         }
1015         ret = ftrace_set_filter_ip(&kprobe_ftrace_ops,
1016                            (unsigned long)p->addr, 1, 0);
1017         WARN(ret < 0, "Failed to disarm kprobe-ftrace at %p (%d)\n", p->addr, ret);
1018 }
1019 #else   /* !CONFIG_KPROBES_ON_FTRACE */
1020 #define prepare_kprobe(p)       arch_prepare_kprobe(p)
1021 #define arm_kprobe_ftrace(p)    do {} while (0)
1022 #define disarm_kprobe_ftrace(p) do {} while (0)
1023 #endif
1024
1025 /* Arm a kprobe with text_mutex */
1026 static void arm_kprobe(struct kprobe *kp)
1027 {
1028         if (unlikely(kprobe_ftrace(kp))) {
1029                 arm_kprobe_ftrace(kp);
1030                 return;
1031         }
1032         cpus_read_lock();
1033         mutex_lock(&text_mutex);
1034         __arm_kprobe(kp);
1035         mutex_unlock(&text_mutex);
1036         cpus_read_unlock();
1037 }
1038
1039 /* Disarm a kprobe with text_mutex */
1040 static void disarm_kprobe(struct kprobe *kp, bool reopt)
1041 {
1042         if (unlikely(kprobe_ftrace(kp))) {
1043                 disarm_kprobe_ftrace(kp);
1044                 return;
1045         }
1046
1047         cpus_read_lock();
1048         mutex_lock(&text_mutex);
1049         __disarm_kprobe(kp, reopt);
1050         mutex_unlock(&text_mutex);
1051         cpus_read_unlock();
1052 }
1053
1054 /*
1055  * Aggregate handlers for multiple kprobes support - these handlers
1056  * take care of invoking the individual kprobe handlers on p->list
1057  */
1058 static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
1059 {
1060         struct kprobe *kp;
1061
1062         list_for_each_entry_rcu(kp, &p->list, list) {
1063                 if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
1064                         set_kprobe_instance(kp);
1065                         if (kp->pre_handler(kp, regs))
1066                                 return 1;
1067                 }
1068                 reset_kprobe_instance();
1069         }
1070         return 0;
1071 }
1072 NOKPROBE_SYMBOL(aggr_pre_handler);
1073
1074 static void aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
1075                               unsigned long flags)
1076 {
1077         struct kprobe *kp;
1078
1079         list_for_each_entry_rcu(kp, &p->list, list) {
1080                 if (kp->post_handler && likely(!kprobe_disabled(kp))) {
1081                         set_kprobe_instance(kp);
1082                         kp->post_handler(kp, regs, flags);
1083                         reset_kprobe_instance();
1084                 }
1085         }
1086 }
1087 NOKPROBE_SYMBOL(aggr_post_handler);
1088
1089 static int aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
1090                               int trapnr)
1091 {
1092         struct kprobe *cur = __this_cpu_read(kprobe_instance);
1093
1094         /*
1095          * if we faulted "during" the execution of a user specified
1096          * probe handler, invoke just that probe's fault handler
1097          */
1098         if (cur && cur->fault_handler) {
1099                 if (cur->fault_handler(cur, regs, trapnr))
1100                         return 1;
1101         }
1102         return 0;
1103 }
1104 NOKPROBE_SYMBOL(aggr_fault_handler);
1105
1106 static int aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
1107 {
1108         struct kprobe *cur = __this_cpu_read(kprobe_instance);
1109         int ret = 0;
1110
1111         if (cur && cur->break_handler) {
1112                 if (cur->break_handler(cur, regs))
1113                         ret = 1;
1114         }
1115         reset_kprobe_instance();
1116         return ret;
1117 }
1118 NOKPROBE_SYMBOL(aggr_break_handler);
1119
1120 /* Walks the list and increments nmissed count for multiprobe case */
1121 void kprobes_inc_nmissed_count(struct kprobe *p)
1122 {
1123         struct kprobe *kp;
1124         if (!kprobe_aggrprobe(p)) {
1125                 p->nmissed++;
1126         } else {
1127                 list_for_each_entry_rcu(kp, &p->list, list)
1128                         kp->nmissed++;
1129         }
1130         return;
1131 }
1132 NOKPROBE_SYMBOL(kprobes_inc_nmissed_count);
1133
1134 void recycle_rp_inst(struct kretprobe_instance *ri,
1135                      struct hlist_head *head)
1136 {
1137         struct kretprobe *rp = ri->rp;
1138
1139         /* remove rp inst off the rprobe_inst_table */
1140         hlist_del(&ri->hlist);
1141         INIT_HLIST_NODE(&ri->hlist);
1142         if (likely(rp)) {
1143                 raw_spin_lock(&rp->lock);
1144                 hlist_add_head(&ri->hlist, &rp->free_instances);
1145                 raw_spin_unlock(&rp->lock);
1146         } else
1147                 /* Unregistering */
1148                 hlist_add_head(&ri->hlist, head);
1149 }
1150 NOKPROBE_SYMBOL(recycle_rp_inst);
1151
1152 void kretprobe_hash_lock(struct task_struct *tsk,
1153                          struct hlist_head **head, unsigned long *flags)
1154 __acquires(hlist_lock)
1155 {
1156         unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
1157         raw_spinlock_t *hlist_lock;
1158
1159         *head = &kretprobe_inst_table[hash];
1160         hlist_lock = kretprobe_table_lock_ptr(hash);
1161         raw_spin_lock_irqsave(hlist_lock, *flags);
1162 }
1163 NOKPROBE_SYMBOL(kretprobe_hash_lock);
1164
1165 static void kretprobe_table_lock(unsigned long hash,
1166                                  unsigned long *flags)
1167 __acquires(hlist_lock)
1168 {
1169         raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
1170         raw_spin_lock_irqsave(hlist_lock, *flags);
1171 }
1172 NOKPROBE_SYMBOL(kretprobe_table_lock);
1173
1174 void kretprobe_hash_unlock(struct task_struct *tsk,
1175                            unsigned long *flags)
1176 __releases(hlist_lock)
1177 {
1178         unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
1179         raw_spinlock_t *hlist_lock;
1180
1181         hlist_lock = kretprobe_table_lock_ptr(hash);
1182         raw_spin_unlock_irqrestore(hlist_lock, *flags);
1183 }
1184 NOKPROBE_SYMBOL(kretprobe_hash_unlock);
1185
1186 static void kretprobe_table_unlock(unsigned long hash,
1187                                    unsigned long *flags)
1188 __releases(hlist_lock)
1189 {
1190         raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
1191         raw_spin_unlock_irqrestore(hlist_lock, *flags);
1192 }
1193 NOKPROBE_SYMBOL(kretprobe_table_unlock);
1194
1195 /*
1196  * This function is called from finish_task_switch when task tk becomes dead,
1197  * so that we can recycle any function-return probe instances associated
1198  * with this task. These left over instances represent probed functions
1199  * that have been called but will never return.
1200  */
1201 void kprobe_flush_task(struct task_struct *tk)
1202 {
1203         struct kretprobe_instance *ri;
1204         struct hlist_head *head, empty_rp;
1205         struct hlist_node *tmp;
1206         unsigned long hash, flags = 0;
1207
1208         if (unlikely(!kprobes_initialized))
1209                 /* Early boot.  kretprobe_table_locks not yet initialized. */
1210                 return;
1211
1212         INIT_HLIST_HEAD(&empty_rp);
1213         hash = hash_ptr(tk, KPROBE_HASH_BITS);
1214         head = &kretprobe_inst_table[hash];
1215         kretprobe_table_lock(hash, &flags);
1216         hlist_for_each_entry_safe(ri, tmp, head, hlist) {
1217                 if (ri->task == tk)
1218                         recycle_rp_inst(ri, &empty_rp);
1219         }
1220         kretprobe_table_unlock(hash, &flags);
1221         hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
1222                 hlist_del(&ri->hlist);
1223                 kfree(ri);
1224         }
1225 }
1226 NOKPROBE_SYMBOL(kprobe_flush_task);
1227
1228 static inline void free_rp_inst(struct kretprobe *rp)
1229 {
1230         struct kretprobe_instance *ri;
1231         struct hlist_node *next;
1232
1233         hlist_for_each_entry_safe(ri, next, &rp->free_instances, hlist) {
1234                 hlist_del(&ri->hlist);
1235                 kfree(ri);
1236         }
1237 }
1238
1239 static void cleanup_rp_inst(struct kretprobe *rp)
1240 {
1241         unsigned long flags, hash;
1242         struct kretprobe_instance *ri;
1243         struct hlist_node *next;
1244         struct hlist_head *head;
1245
1246         /* No race here */
1247         for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) {
1248                 kretprobe_table_lock(hash, &flags);
1249                 head = &kretprobe_inst_table[hash];
1250                 hlist_for_each_entry_safe(ri, next, head, hlist) {
1251                         if (ri->rp == rp)
1252                                 ri->rp = NULL;
1253                 }
1254                 kretprobe_table_unlock(hash, &flags);
1255         }
1256         free_rp_inst(rp);
1257 }
1258 NOKPROBE_SYMBOL(cleanup_rp_inst);
1259
1260 /*
1261 * Add the new probe to ap->list. Fail if this is the
1262 * second jprobe at the address - two jprobes can't coexist
1263 */
1264 static int add_new_kprobe(struct kprobe *ap, struct kprobe *p)
1265 {
1266         BUG_ON(kprobe_gone(ap) || kprobe_gone(p));
1267
1268         if (p->break_handler || p->post_handler)
1269                 unoptimize_kprobe(ap, true);    /* Fall back to normal kprobe */
1270
1271         if (p->break_handler) {
1272                 if (ap->break_handler)
1273                         return -EEXIST;
1274                 list_add_tail_rcu(&p->list, &ap->list);
1275                 ap->break_handler = aggr_break_handler;
1276         } else
1277                 list_add_rcu(&p->list, &ap->list);
1278         if (p->post_handler && !ap->post_handler)
1279                 ap->post_handler = aggr_post_handler;
1280
1281         return 0;
1282 }
1283
1284 /*
1285  * Fill in the required fields of the "manager kprobe". Replace the
1286  * earlier kprobe in the hlist with the manager kprobe
1287  */
1288 static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
1289 {
1290         /* Copy p's insn slot to ap */
1291         copy_kprobe(p, ap);
1292         flush_insn_slot(ap);
1293         ap->addr = p->addr;
1294         ap->flags = p->flags & ~KPROBE_FLAG_OPTIMIZED;
1295         ap->pre_handler = aggr_pre_handler;
1296         ap->fault_handler = aggr_fault_handler;
1297         /* We don't care the kprobe which has gone. */
1298         if (p->post_handler && !kprobe_gone(p))
1299                 ap->post_handler = aggr_post_handler;
1300         if (p->break_handler && !kprobe_gone(p))
1301                 ap->break_handler = aggr_break_handler;
1302
1303         INIT_LIST_HEAD(&ap->list);
1304         INIT_HLIST_NODE(&ap->hlist);
1305
1306         list_add_rcu(&p->list, &ap->list);
1307         hlist_replace_rcu(&p->hlist, &ap->hlist);
1308 }
1309
1310 /*
1311  * This is the second or subsequent kprobe at the address - handle
1312  * the intricacies
1313  */
1314 static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p)
1315 {
1316         int ret = 0;
1317         struct kprobe *ap = orig_p;
1318
1319         cpus_read_lock();
1320
1321         /* For preparing optimization, jump_label_text_reserved() is called */
1322         jump_label_lock();
1323         mutex_lock(&text_mutex);
1324
1325         if (!kprobe_aggrprobe(orig_p)) {
1326                 /* If orig_p is not an aggr_kprobe, create new aggr_kprobe. */
1327                 ap = alloc_aggr_kprobe(orig_p);
1328                 if (!ap) {
1329                         ret = -ENOMEM;
1330                         goto out;
1331                 }
1332                 init_aggr_kprobe(ap, orig_p);
1333         } else if (kprobe_unused(ap))
1334                 /* This probe is going to die. Rescue it */
1335                 reuse_unused_kprobe(ap);
1336
1337         if (kprobe_gone(ap)) {
1338                 /*
1339                  * Attempting to insert new probe at the same location that
1340                  * had a probe in the module vaddr area which already
1341                  * freed. So, the instruction slot has already been
1342                  * released. We need a new slot for the new probe.
1343                  */
1344                 ret = arch_prepare_kprobe(ap);
1345                 if (ret)
1346                         /*
1347                          * Even if fail to allocate new slot, don't need to
1348                          * free aggr_probe. It will be used next time, or
1349                          * freed by unregister_kprobe.
1350                          */
1351                         goto out;
1352
1353                 /* Prepare optimized instructions if possible. */
1354                 prepare_optimized_kprobe(ap);
1355
1356                 /*
1357                  * Clear gone flag to prevent allocating new slot again, and
1358                  * set disabled flag because it is not armed yet.
1359                  */
1360                 ap->flags = (ap->flags & ~KPROBE_FLAG_GONE)
1361                             | KPROBE_FLAG_DISABLED;
1362         }
1363
1364         /* Copy ap's insn slot to p */
1365         copy_kprobe(ap, p);
1366         ret = add_new_kprobe(ap, p);
1367
1368 out:
1369         mutex_unlock(&text_mutex);
1370         jump_label_unlock();
1371         cpus_read_unlock();
1372
1373         if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) {
1374                 ap->flags &= ~KPROBE_FLAG_DISABLED;
1375                 if (!kprobes_all_disarmed)
1376                         /* Arm the breakpoint again. */
1377                         arm_kprobe(ap);
1378         }
1379         return ret;
1380 }
1381
1382 bool __weak arch_within_kprobe_blacklist(unsigned long addr)
1383 {
1384         /* The __kprobes marked functions and entry code must not be probed */
1385         return addr >= (unsigned long)__kprobes_text_start &&
1386                addr < (unsigned long)__kprobes_text_end;
1387 }
1388
1389 bool within_kprobe_blacklist(unsigned long addr)
1390 {
1391         struct kprobe_blacklist_entry *ent;
1392
1393         if (arch_within_kprobe_blacklist(addr))
1394                 return true;
1395         /*
1396          * If there exists a kprobe_blacklist, verify and
1397          * fail any probe registration in the prohibited area
1398          */
1399         list_for_each_entry(ent, &kprobe_blacklist, list) {
1400                 if (addr >= ent->start_addr && addr < ent->end_addr)
1401                         return true;
1402         }
1403
1404         return false;
1405 }
1406
1407 bool within_kprobe_error_injection_list(unsigned long addr)
1408 {
1409         struct kprobe_ei_entry *ent;
1410
1411         list_for_each_entry(ent, &kprobe_error_injection_list, list) {
1412                 if (addr >= ent->start_addr && addr < ent->end_addr)
1413                         return true;
1414         }
1415         return false;
1416 }
1417
1418 /*
1419  * If we have a symbol_name argument, look it up and add the offset field
1420  * to it. This way, we can specify a relative address to a symbol.
1421  * This returns encoded errors if it fails to look up symbol or invalid
1422  * combination of parameters.
1423  */
1424 static kprobe_opcode_t *_kprobe_addr(kprobe_opcode_t *addr,
1425                         const char *symbol_name, unsigned int offset)
1426 {
1427         if ((symbol_name && addr) || (!symbol_name && !addr))
1428                 goto invalid;
1429
1430         if (symbol_name) {
1431                 addr = kprobe_lookup_name(symbol_name, offset);
1432                 if (!addr)
1433                         return ERR_PTR(-ENOENT);
1434         }
1435
1436         addr = (kprobe_opcode_t *)(((char *)addr) + offset);
1437         if (addr)
1438                 return addr;
1439
1440 invalid:
1441         return ERR_PTR(-EINVAL);
1442 }
1443
1444 static kprobe_opcode_t *kprobe_addr(struct kprobe *p)
1445 {
1446         return _kprobe_addr(p->addr, p->symbol_name, p->offset);
1447 }
1448
1449 /* Check passed kprobe is valid and return kprobe in kprobe_table. */
1450 static struct kprobe *__get_valid_kprobe(struct kprobe *p)
1451 {
1452         struct kprobe *ap, *list_p;
1453
1454         ap = get_kprobe(p->addr);
1455         if (unlikely(!ap))
1456                 return NULL;
1457
1458         if (p != ap) {
1459                 list_for_each_entry_rcu(list_p, &ap->list, list)
1460                         if (list_p == p)
1461                         /* kprobe p is a valid probe */
1462                                 goto valid;
1463                 return NULL;
1464         }
1465 valid:
1466         return ap;
1467 }
1468
1469 /* Return error if the kprobe is being re-registered */
1470 static inline int check_kprobe_rereg(struct kprobe *p)
1471 {
1472         int ret = 0;
1473
1474         mutex_lock(&kprobe_mutex);
1475         if (__get_valid_kprobe(p))
1476                 ret = -EINVAL;
1477         mutex_unlock(&kprobe_mutex);
1478
1479         return ret;
1480 }
1481
1482 int __weak arch_check_ftrace_location(struct kprobe *p)
1483 {
1484         unsigned long ftrace_addr;
1485
1486         ftrace_addr = ftrace_location((unsigned long)p->addr);
1487         if (ftrace_addr) {
1488 #ifdef CONFIG_KPROBES_ON_FTRACE
1489                 /* Given address is not on the instruction boundary */
1490                 if ((unsigned long)p->addr != ftrace_addr)
1491                         return -EILSEQ;
1492                 p->flags |= KPROBE_FLAG_FTRACE;
1493 #else   /* !CONFIG_KPROBES_ON_FTRACE */
1494                 return -EINVAL;
1495 #endif
1496         }
1497         return 0;
1498 }
1499
1500 static int check_kprobe_address_safe(struct kprobe *p,
1501                                      struct module **probed_mod)
1502 {
1503         int ret;
1504
1505         ret = arch_check_ftrace_location(p);
1506         if (ret)
1507                 return ret;
1508         jump_label_lock();
1509         preempt_disable();
1510
1511         /* Ensure it is not in reserved area nor out of text */
1512         if (!kernel_text_address((unsigned long) p->addr) ||
1513             within_kprobe_blacklist((unsigned long) p->addr) ||
1514             jump_label_text_reserved(p->addr, p->addr)) {
1515                 ret = -EINVAL;
1516                 goto out;
1517         }
1518
1519         /* Check if are we probing a module */
1520         *probed_mod = __module_text_address((unsigned long) p->addr);
1521         if (*probed_mod) {
1522                 /*
1523                  * We must hold a refcount of the probed module while updating
1524                  * its code to prohibit unexpected unloading.
1525                  */
1526                 if (unlikely(!try_module_get(*probed_mod))) {
1527                         ret = -ENOENT;
1528                         goto out;
1529                 }
1530
1531                 /*
1532                  * If the module freed .init.text, we couldn't insert
1533                  * kprobes in there.
1534                  */
1535                 if (within_module_init((unsigned long)p->addr, *probed_mod) &&
1536                     (*probed_mod)->state != MODULE_STATE_COMING) {
1537                         module_put(*probed_mod);
1538                         *probed_mod = NULL;
1539                         ret = -ENOENT;
1540                 }
1541         }
1542 out:
1543         preempt_enable();
1544         jump_label_unlock();
1545
1546         return ret;
1547 }
1548
1549 int register_kprobe(struct kprobe *p)
1550 {
1551         int ret;
1552         struct kprobe *old_p;
1553         struct module *probed_mod;
1554         kprobe_opcode_t *addr;
1555
1556         /* Adjust probe address from symbol */
1557         addr = kprobe_addr(p);
1558         if (IS_ERR(addr))
1559                 return PTR_ERR(addr);
1560         p->addr = addr;
1561
1562         ret = check_kprobe_rereg(p);
1563         if (ret)
1564                 return ret;
1565
1566         /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */
1567         p->flags &= KPROBE_FLAG_DISABLED;
1568         p->nmissed = 0;
1569         INIT_LIST_HEAD(&p->list);
1570
1571         ret = check_kprobe_address_safe(p, &probed_mod);
1572         if (ret)
1573                 return ret;
1574
1575         mutex_lock(&kprobe_mutex);
1576
1577         old_p = get_kprobe(p->addr);
1578         if (old_p) {
1579                 /* Since this may unoptimize old_p, locking text_mutex. */
1580                 ret = register_aggr_kprobe(old_p, p);
1581                 goto out;
1582         }
1583
1584         cpus_read_lock();
1585         /* Prevent text modification */
1586         mutex_lock(&text_mutex);
1587         ret = prepare_kprobe(p);
1588         mutex_unlock(&text_mutex);
1589         cpus_read_unlock();
1590         if (ret)
1591                 goto out;
1592
1593         INIT_HLIST_NODE(&p->hlist);
1594         hlist_add_head_rcu(&p->hlist,
1595                        &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
1596
1597         if (!kprobes_all_disarmed && !kprobe_disabled(p))
1598                 arm_kprobe(p);
1599
1600         /* Try to optimize kprobe */
1601         try_to_optimize_kprobe(p);
1602 out:
1603         mutex_unlock(&kprobe_mutex);
1604
1605         if (probed_mod)
1606                 module_put(probed_mod);
1607
1608         return ret;
1609 }
1610 EXPORT_SYMBOL_GPL(register_kprobe);
1611
1612 /* Check if all probes on the aggrprobe are disabled */
1613 static int aggr_kprobe_disabled(struct kprobe *ap)
1614 {
1615         struct kprobe *kp;
1616
1617         list_for_each_entry_rcu(kp, &ap->list, list)
1618                 if (!kprobe_disabled(kp))
1619                         /*
1620                          * There is an active probe on the list.
1621                          * We can't disable this ap.
1622                          */
1623                         return 0;
1624
1625         return 1;
1626 }
1627
1628 /* Disable one kprobe: Make sure called under kprobe_mutex is locked */
1629 static struct kprobe *__disable_kprobe(struct kprobe *p)
1630 {
1631         struct kprobe *orig_p;
1632
1633         /* Get an original kprobe for return */
1634         orig_p = __get_valid_kprobe(p);
1635         if (unlikely(orig_p == NULL))
1636                 return NULL;
1637
1638         if (!kprobe_disabled(p)) {
1639                 /* Disable probe if it is a child probe */
1640                 if (p != orig_p)
1641                         p->flags |= KPROBE_FLAG_DISABLED;
1642
1643                 /* Try to disarm and disable this/parent probe */
1644                 if (p == orig_p || aggr_kprobe_disabled(orig_p)) {
1645                         /*
1646                          * If kprobes_all_disarmed is set, orig_p
1647                          * should have already been disarmed, so
1648                          * skip unneed disarming process.
1649                          */
1650                         if (!kprobes_all_disarmed)
1651                                 disarm_kprobe(orig_p, true);
1652                         orig_p->flags |= KPROBE_FLAG_DISABLED;
1653                 }
1654         }
1655
1656         return orig_p;
1657 }
1658
1659 /*
1660  * Unregister a kprobe without a scheduler synchronization.
1661  */
1662 static int __unregister_kprobe_top(struct kprobe *p)
1663 {
1664         struct kprobe *ap, *list_p;
1665
1666         /* Disable kprobe. This will disarm it if needed. */
1667         ap = __disable_kprobe(p);
1668         if (ap == NULL)
1669                 return -EINVAL;
1670
1671         if (ap == p)
1672                 /*
1673                  * This probe is an independent(and non-optimized) kprobe
1674                  * (not an aggrprobe). Remove from the hash list.
1675                  */
1676                 goto disarmed;
1677
1678         /* Following process expects this probe is an aggrprobe */
1679         WARN_ON(!kprobe_aggrprobe(ap));
1680
1681         if (list_is_singular(&ap->list) && kprobe_disarmed(ap))
1682                 /*
1683                  * !disarmed could be happen if the probe is under delayed
1684                  * unoptimizing.
1685                  */
1686                 goto disarmed;
1687         else {
1688                 /* If disabling probe has special handlers, update aggrprobe */
1689                 if (p->break_handler && !kprobe_gone(p))
1690                         ap->break_handler = NULL;
1691                 if (p->post_handler && !kprobe_gone(p)) {
1692                         list_for_each_entry_rcu(list_p, &ap->list, list) {
1693                                 if ((list_p != p) && (list_p->post_handler))
1694                                         goto noclean;
1695                         }
1696                         ap->post_handler = NULL;
1697                 }
1698 noclean:
1699                 /*
1700                  * Remove from the aggrprobe: this path will do nothing in
1701                  * __unregister_kprobe_bottom().
1702                  */
1703                 list_del_rcu(&p->list);
1704                 if (!kprobe_disabled(ap) && !kprobes_all_disarmed)
1705                         /*
1706                          * Try to optimize this probe again, because post
1707                          * handler may have been changed.
1708                          */
1709                         optimize_kprobe(ap);
1710         }
1711         return 0;
1712
1713 disarmed:
1714         BUG_ON(!kprobe_disarmed(ap));
1715         hlist_del_rcu(&ap->hlist);
1716         return 0;
1717 }
1718
1719 static void __unregister_kprobe_bottom(struct kprobe *p)
1720 {
1721         struct kprobe *ap;
1722
1723         if (list_empty(&p->list))
1724                 /* This is an independent kprobe */
1725                 arch_remove_kprobe(p);
1726         else if (list_is_singular(&p->list)) {
1727                 /* This is the last child of an aggrprobe */
1728                 ap = list_entry(p->list.next, struct kprobe, list);
1729                 list_del(&p->list);
1730                 free_aggr_kprobe(ap);
1731         }
1732         /* Otherwise, do nothing. */
1733 }
1734
1735 int register_kprobes(struct kprobe **kps, int num)
1736 {
1737         int i, ret = 0;
1738
1739         if (num <= 0)
1740                 return -EINVAL;
1741         for (i = 0; i < num; i++) {
1742                 ret = register_kprobe(kps[i]);
1743                 if (ret < 0) {
1744                         if (i > 0)
1745                                 unregister_kprobes(kps, i);
1746                         break;
1747                 }
1748         }
1749         return ret;
1750 }
1751 EXPORT_SYMBOL_GPL(register_kprobes);
1752
1753 void unregister_kprobe(struct kprobe *p)
1754 {
1755         unregister_kprobes(&p, 1);
1756 }
1757 EXPORT_SYMBOL_GPL(unregister_kprobe);
1758
1759 void unregister_kprobes(struct kprobe **kps, int num)
1760 {
1761         int i;
1762
1763         if (num <= 0)
1764                 return;
1765         mutex_lock(&kprobe_mutex);
1766         for (i = 0; i < num; i++)
1767                 if (__unregister_kprobe_top(kps[i]) < 0)
1768                         kps[i]->addr = NULL;
1769         mutex_unlock(&kprobe_mutex);
1770
1771         synchronize_sched();
1772         for (i = 0; i < num; i++)
1773                 if (kps[i]->addr)
1774                         __unregister_kprobe_bottom(kps[i]);
1775 }
1776 EXPORT_SYMBOL_GPL(unregister_kprobes);
1777
1778 int __weak kprobe_exceptions_notify(struct notifier_block *self,
1779                                         unsigned long val, void *data)
1780 {
1781         return NOTIFY_DONE;
1782 }
1783 NOKPROBE_SYMBOL(kprobe_exceptions_notify);
1784
1785 static struct notifier_block kprobe_exceptions_nb = {
1786         .notifier_call = kprobe_exceptions_notify,
1787         .priority = 0x7fffffff /* we need to be notified first */
1788 };
1789
1790 unsigned long __weak arch_deref_entry_point(void *entry)
1791 {
1792         return (unsigned long)entry;
1793 }
1794
1795 #if 0
1796 int register_jprobes(struct jprobe **jps, int num)
1797 {
1798         int ret = 0, i;
1799
1800         if (num <= 0)
1801                 return -EINVAL;
1802
1803         for (i = 0; i < num; i++) {
1804                 ret = register_jprobe(jps[i]);
1805
1806                 if (ret < 0) {
1807                         if (i > 0)
1808                                 unregister_jprobes(jps, i);
1809                         break;
1810                 }
1811         }
1812
1813         return ret;
1814 }
1815 EXPORT_SYMBOL_GPL(register_jprobes);
1816
1817 int register_jprobe(struct jprobe *jp)
1818 {
1819         unsigned long addr, offset;
1820         struct kprobe *kp = &jp->kp;
1821
1822         /*
1823          * Verify probepoint as well as the jprobe handler are
1824          * valid function entry points.
1825          */
1826         addr = arch_deref_entry_point(jp->entry);
1827
1828         if (kallsyms_lookup_size_offset(addr, NULL, &offset) && offset == 0 &&
1829             kprobe_on_func_entry(kp->addr, kp->symbol_name, kp->offset)) {
1830                 kp->pre_handler = setjmp_pre_handler;
1831                 kp->break_handler = longjmp_break_handler;
1832                 return register_kprobe(kp);
1833         }
1834
1835         return -EINVAL;
1836 }
1837 EXPORT_SYMBOL_GPL(register_jprobe);
1838
1839 void unregister_jprobe(struct jprobe *jp)
1840 {
1841         unregister_jprobes(&jp, 1);
1842 }
1843 EXPORT_SYMBOL_GPL(unregister_jprobe);
1844
1845 void unregister_jprobes(struct jprobe **jps, int num)
1846 {
1847         int i;
1848
1849         if (num <= 0)
1850                 return;
1851         mutex_lock(&kprobe_mutex);
1852         for (i = 0; i < num; i++)
1853                 if (__unregister_kprobe_top(&jps[i]->kp) < 0)
1854                         jps[i]->kp.addr = NULL;
1855         mutex_unlock(&kprobe_mutex);
1856
1857         synchronize_sched();
1858         for (i = 0; i < num; i++) {
1859                 if (jps[i]->kp.addr)
1860                         __unregister_kprobe_bottom(&jps[i]->kp);
1861         }
1862 }
1863 EXPORT_SYMBOL_GPL(unregister_jprobes);
1864 #endif
1865
1866 #ifdef CONFIG_KRETPROBES
1867 /*
1868  * This kprobe pre_handler is registered with every kretprobe. When probe
1869  * hits it will set up the return probe.
1870  */
1871 static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
1872 {
1873         struct kretprobe *rp = container_of(p, struct kretprobe, kp);
1874         unsigned long hash, flags = 0;
1875         struct kretprobe_instance *ri;
1876
1877         /*
1878          * To avoid deadlocks, prohibit return probing in NMI contexts,
1879          * just skip the probe and increase the (inexact) 'nmissed'
1880          * statistical counter, so that the user is informed that
1881          * something happened:
1882          */
1883         if (unlikely(in_nmi())) {
1884                 rp->nmissed++;
1885                 return 0;
1886         }
1887
1888         /* TODO: consider to only swap the RA after the last pre_handler fired */
1889         hash = hash_ptr(current, KPROBE_HASH_BITS);
1890         raw_spin_lock_irqsave(&rp->lock, flags);
1891         if (!hlist_empty(&rp->free_instances)) {
1892                 ri = hlist_entry(rp->free_instances.first,
1893                                 struct kretprobe_instance, hlist);
1894                 hlist_del(&ri->hlist);
1895                 raw_spin_unlock_irqrestore(&rp->lock, flags);
1896
1897                 ri->rp = rp;
1898                 ri->task = current;
1899
1900                 if (rp->entry_handler && rp->entry_handler(ri, regs)) {
1901                         raw_spin_lock_irqsave(&rp->lock, flags);
1902                         hlist_add_head(&ri->hlist, &rp->free_instances);
1903                         raw_spin_unlock_irqrestore(&rp->lock, flags);
1904                         return 0;
1905                 }
1906
1907                 arch_prepare_kretprobe(ri, regs);
1908
1909                 /* XXX(hch): why is there no hlist_move_head? */
1910                 INIT_HLIST_NODE(&ri->hlist);
1911                 kretprobe_table_lock(hash, &flags);
1912                 hlist_add_head(&ri->hlist, &kretprobe_inst_table[hash]);
1913                 kretprobe_table_unlock(hash, &flags);
1914         } else {
1915                 rp->nmissed++;
1916                 raw_spin_unlock_irqrestore(&rp->lock, flags);
1917         }
1918         return 0;
1919 }
1920 NOKPROBE_SYMBOL(pre_handler_kretprobe);
1921
1922 bool __weak arch_kprobe_on_func_entry(unsigned long offset)
1923 {
1924         return !offset;
1925 }
1926
1927 bool kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset)
1928 {
1929         kprobe_opcode_t *kp_addr = _kprobe_addr(addr, sym, offset);
1930
1931         if (IS_ERR(kp_addr))
1932                 return false;
1933
1934         if (!kallsyms_lookup_size_offset((unsigned long)kp_addr, NULL, &offset) ||
1935                                                 !arch_kprobe_on_func_entry(offset))
1936                 return false;
1937
1938         return true;
1939 }
1940
1941 int register_kretprobe(struct kretprobe *rp)
1942 {
1943         int ret = 0;
1944         struct kretprobe_instance *inst;
1945         int i;
1946         void *addr;
1947
1948         if (!kprobe_on_func_entry(rp->kp.addr, rp->kp.symbol_name, rp->kp.offset))
1949                 return -EINVAL;
1950
1951         if (kretprobe_blacklist_size) {
1952                 addr = kprobe_addr(&rp->kp);
1953                 if (IS_ERR(addr))
1954                         return PTR_ERR(addr);
1955
1956                 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
1957                         if (kretprobe_blacklist[i].addr == addr)
1958                                 return -EINVAL;
1959                 }
1960         }
1961
1962         rp->kp.pre_handler = pre_handler_kretprobe;
1963         rp->kp.post_handler = NULL;
1964         rp->kp.fault_handler = NULL;
1965         rp->kp.break_handler = NULL;
1966
1967         /* Pre-allocate memory for max kretprobe instances */
1968         if (rp->maxactive <= 0) {
1969 #ifdef CONFIG_PREEMPT
1970                 rp->maxactive = max_t(unsigned int, 10, 2*num_possible_cpus());
1971 #else
1972                 rp->maxactive = num_possible_cpus();
1973 #endif
1974         }
1975         raw_spin_lock_init(&rp->lock);
1976         INIT_HLIST_HEAD(&rp->free_instances);
1977         for (i = 0; i < rp->maxactive; i++) {
1978                 inst = kmalloc(sizeof(struct kretprobe_instance) +
1979                                rp->data_size, GFP_KERNEL);
1980                 if (inst == NULL) {
1981                         free_rp_inst(rp);
1982                         return -ENOMEM;
1983                 }
1984                 INIT_HLIST_NODE(&inst->hlist);
1985                 hlist_add_head(&inst->hlist, &rp->free_instances);
1986         }
1987
1988         rp->nmissed = 0;
1989         /* Establish function entry probe point */
1990         ret = register_kprobe(&rp->kp);
1991         if (ret != 0)
1992                 free_rp_inst(rp);
1993         return ret;
1994 }
1995 EXPORT_SYMBOL_GPL(register_kretprobe);
1996
1997 int register_kretprobes(struct kretprobe **rps, int num)
1998 {
1999         int ret = 0, i;
2000
2001         if (num <= 0)
2002                 return -EINVAL;
2003         for (i = 0; i < num; i++) {
2004                 ret = register_kretprobe(rps[i]);
2005                 if (ret < 0) {
2006                         if (i > 0)
2007                                 unregister_kretprobes(rps, i);
2008                         break;
2009                 }
2010         }
2011         return ret;
2012 }
2013 EXPORT_SYMBOL_GPL(register_kretprobes);
2014
2015 void unregister_kretprobe(struct kretprobe *rp)
2016 {
2017         unregister_kretprobes(&rp, 1);
2018 }
2019 EXPORT_SYMBOL_GPL(unregister_kretprobe);
2020
2021 void unregister_kretprobes(struct kretprobe **rps, int num)
2022 {
2023         int i;
2024
2025         if (num <= 0)
2026                 return;
2027         mutex_lock(&kprobe_mutex);
2028         for (i = 0; i < num; i++)
2029                 if (__unregister_kprobe_top(&rps[i]->kp) < 0)
2030                         rps[i]->kp.addr = NULL;
2031         mutex_unlock(&kprobe_mutex);
2032
2033         synchronize_sched();
2034         for (i = 0; i < num; i++) {
2035                 if (rps[i]->kp.addr) {
2036                         __unregister_kprobe_bottom(&rps[i]->kp);
2037                         cleanup_rp_inst(rps[i]);
2038                 }
2039         }
2040 }
2041 EXPORT_SYMBOL_GPL(unregister_kretprobes);
2042
2043 #else /* CONFIG_KRETPROBES */
2044 int register_kretprobe(struct kretprobe *rp)
2045 {
2046         return -ENOSYS;
2047 }
2048 EXPORT_SYMBOL_GPL(register_kretprobe);
2049
2050 int register_kretprobes(struct kretprobe **rps, int num)
2051 {
2052         return -ENOSYS;
2053 }
2054 EXPORT_SYMBOL_GPL(register_kretprobes);
2055
2056 void unregister_kretprobe(struct kretprobe *rp)
2057 {
2058 }
2059 EXPORT_SYMBOL_GPL(unregister_kretprobe);
2060
2061 void unregister_kretprobes(struct kretprobe **rps, int num)
2062 {
2063 }
2064 EXPORT_SYMBOL_GPL(unregister_kretprobes);
2065
2066 static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
2067 {
2068         return 0;
2069 }
2070 NOKPROBE_SYMBOL(pre_handler_kretprobe);
2071
2072 #endif /* CONFIG_KRETPROBES */
2073
2074 /* Set the kprobe gone and remove its instruction buffer. */
2075 static void kill_kprobe(struct kprobe *p)
2076 {
2077         struct kprobe *kp;
2078
2079         p->flags |= KPROBE_FLAG_GONE;
2080         if (kprobe_aggrprobe(p)) {
2081                 /*
2082                  * If this is an aggr_kprobe, we have to list all the
2083                  * chained probes and mark them GONE.
2084                  */
2085                 list_for_each_entry_rcu(kp, &p->list, list)
2086                         kp->flags |= KPROBE_FLAG_GONE;
2087                 p->post_handler = NULL;
2088                 p->break_handler = NULL;
2089                 kill_optimized_kprobe(p);
2090         }
2091         /*
2092          * Here, we can remove insn_slot safely, because no thread calls
2093          * the original probed function (which will be freed soon) any more.
2094          */
2095         arch_remove_kprobe(p);
2096 }
2097
2098 /* Disable one kprobe */
2099 int disable_kprobe(struct kprobe *kp)
2100 {
2101         int ret = 0;
2102
2103         mutex_lock(&kprobe_mutex);
2104
2105         /* Disable this kprobe */
2106         if (__disable_kprobe(kp) == NULL)
2107                 ret = -EINVAL;
2108
2109         mutex_unlock(&kprobe_mutex);
2110         return ret;
2111 }
2112 EXPORT_SYMBOL_GPL(disable_kprobe);
2113
2114 /* Enable one kprobe */
2115 int enable_kprobe(struct kprobe *kp)
2116 {
2117         int ret = 0;
2118         struct kprobe *p;
2119
2120         mutex_lock(&kprobe_mutex);
2121
2122         /* Check whether specified probe is valid. */
2123         p = __get_valid_kprobe(kp);
2124         if (unlikely(p == NULL)) {
2125                 ret = -EINVAL;
2126                 goto out;
2127         }
2128
2129         if (kprobe_gone(kp)) {
2130                 /* This kprobe has gone, we couldn't enable it. */
2131                 ret = -EINVAL;
2132                 goto out;
2133         }
2134
2135         if (p != kp)
2136                 kp->flags &= ~KPROBE_FLAG_DISABLED;
2137
2138         if (!kprobes_all_disarmed && kprobe_disabled(p)) {
2139                 p->flags &= ~KPROBE_FLAG_DISABLED;
2140                 arm_kprobe(p);
2141         }
2142 out:
2143         mutex_unlock(&kprobe_mutex);
2144         return ret;
2145 }
2146 EXPORT_SYMBOL_GPL(enable_kprobe);
2147
2148 void dump_kprobe(struct kprobe *kp)
2149 {
2150         printk(KERN_WARNING "Dumping kprobe:\n");
2151         printk(KERN_WARNING "Name: %s\nAddress: %p\nOffset: %x\n",
2152                kp->symbol_name, kp->addr, kp->offset);
2153 }
2154 NOKPROBE_SYMBOL(dump_kprobe);
2155
2156 /*
2157  * Lookup and populate the kprobe_blacklist.
2158  *
2159  * Unlike the kretprobe blacklist, we'll need to determine
2160  * the range of addresses that belong to the said functions,
2161  * since a kprobe need not necessarily be at the beginning
2162  * of a function.
2163  */
2164 static int __init populate_kprobe_blacklist(unsigned long *start,
2165                                              unsigned long *end)
2166 {
2167         unsigned long *iter;
2168         struct kprobe_blacklist_entry *ent;
2169         unsigned long entry, offset = 0, size = 0;
2170
2171         for (iter = start; iter < end; iter++) {
2172                 entry = arch_deref_entry_point((void *)*iter);
2173
2174                 if (!kernel_text_address(entry) ||
2175                     !kallsyms_lookup_size_offset(entry, &size, &offset)) {
2176                         pr_err("Failed to find blacklist at %p\n",
2177                                 (void *)entry);
2178                         continue;
2179                 }
2180
2181                 ent = kmalloc(sizeof(*ent), GFP_KERNEL);
2182                 if (!ent)
2183                         return -ENOMEM;
2184                 ent->start_addr = entry;
2185                 ent->end_addr = entry + size;
2186                 INIT_LIST_HEAD(&ent->list);
2187                 list_add_tail(&ent->list, &kprobe_blacklist);
2188         }
2189         return 0;
2190 }
2191
2192 #ifdef CONFIG_BPF_KPROBE_OVERRIDE
2193 /* Markers of the _kprobe_error_inject_list section */
2194 extern unsigned long __start_kprobe_error_inject_list[];
2195 extern unsigned long __stop_kprobe_error_inject_list[];
2196
2197 /*
2198  * Lookup and populate the kprobe_error_injection_list.
2199  *
2200  * For safety reasons we only allow certain functions to be overriden with
2201  * bpf_error_injection, so we need to populate the list of the symbols that have
2202  * been marked as safe for overriding.
2203  */
2204 static void populate_kprobe_error_injection_list(unsigned long *start,
2205                                                  unsigned long *end,
2206                                                  void *priv)
2207 {
2208         unsigned long *iter;
2209         struct kprobe_ei_entry *ent;
2210         unsigned long entry, offset = 0, size = 0;
2211
2212         mutex_lock(&kprobe_ei_mutex);
2213         for (iter = start; iter < end; iter++) {
2214                 entry = arch_deref_entry_point((void *)*iter);
2215
2216                 if (!kernel_text_address(entry) ||
2217                     !kallsyms_lookup_size_offset(entry, &size, &offset)) {
2218                         pr_err("Failed to find error inject entry at %p\n",
2219                                 (void *)entry);
2220                         continue;
2221                 }
2222
2223                 ent = kmalloc(sizeof(*ent), GFP_KERNEL);
2224                 if (!ent)
2225                         break;
2226                 ent->start_addr = entry;
2227                 ent->end_addr = entry + size;
2228                 ent->priv = priv;
2229                 INIT_LIST_HEAD(&ent->list);
2230                 list_add_tail(&ent->list, &kprobe_error_injection_list);
2231         }
2232         mutex_unlock(&kprobe_ei_mutex);
2233 }
2234
2235 static void __init populate_kernel_kprobe_ei_list(void)
2236 {
2237         populate_kprobe_error_injection_list(__start_kprobe_error_inject_list,
2238                                              __stop_kprobe_error_inject_list,
2239                                              NULL);
2240 }
2241
2242 static void module_load_kprobe_ei_list(struct module *mod)
2243 {
2244         if (!mod->num_kprobe_ei_funcs)
2245                 return;
2246         populate_kprobe_error_injection_list(mod->kprobe_ei_funcs,
2247                                              mod->kprobe_ei_funcs +
2248                                              mod->num_kprobe_ei_funcs, mod);
2249 }
2250
2251 static void module_unload_kprobe_ei_list(struct module *mod)
2252 {
2253         struct kprobe_ei_entry *ent, *n;
2254         if (!mod->num_kprobe_ei_funcs)
2255                 return;
2256
2257         mutex_lock(&kprobe_ei_mutex);
2258         list_for_each_entry_safe(ent, n, &kprobe_error_injection_list, list) {
2259                 if (ent->priv == mod) {
2260                         list_del_init(&ent->list);
2261                         kfree(ent);
2262                 }
2263         }
2264         mutex_unlock(&kprobe_ei_mutex);
2265 }
2266 #else
2267 static inline void __init populate_kernel_kprobe_ei_list(void) {}
2268 static inline void module_load_kprobe_ei_list(struct module *m) {}
2269 static inline void module_unload_kprobe_ei_list(struct module *m) {}
2270 #endif
2271
2272 /* Module notifier call back, checking kprobes on the module */
2273 static int kprobes_module_callback(struct notifier_block *nb,
2274                                    unsigned long val, void *data)
2275 {
2276         struct module *mod = data;
2277         struct hlist_head *head;
2278         struct kprobe *p;
2279         unsigned int i;
2280         int checkcore = (val == MODULE_STATE_GOING);
2281
2282         if (val == MODULE_STATE_COMING)
2283                 module_load_kprobe_ei_list(mod);
2284         else if (val == MODULE_STATE_GOING)
2285                 module_unload_kprobe_ei_list(mod);
2286
2287         if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE)
2288                 return NOTIFY_DONE;
2289
2290         /*
2291          * When MODULE_STATE_GOING was notified, both of module .text and
2292          * .init.text sections would be freed. When MODULE_STATE_LIVE was
2293          * notified, only .init.text section would be freed. We need to
2294          * disable kprobes which have been inserted in the sections.
2295          */
2296         mutex_lock(&kprobe_mutex);
2297         for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2298                 head = &kprobe_table[i];
2299                 hlist_for_each_entry_rcu(p, head, hlist)
2300                         if (within_module_init((unsigned long)p->addr, mod) ||
2301                             (checkcore &&
2302                              within_module_core((unsigned long)p->addr, mod))) {
2303                                 /*
2304                                  * The vaddr this probe is installed will soon
2305                                  * be vfreed buy not synced to disk. Hence,
2306                                  * disarming the breakpoint isn't needed.
2307                                  *
2308                                  * Note, this will also move any optimized probes
2309                                  * that are pending to be removed from their
2310                                  * corresponding lists to the freeing_list and
2311                                  * will not be touched by the delayed
2312                                  * kprobe_optimizer work handler.
2313                                  */
2314                                 kill_kprobe(p);
2315                         }
2316         }
2317         mutex_unlock(&kprobe_mutex);
2318         return NOTIFY_DONE;
2319 }
2320
2321 static struct notifier_block kprobe_module_nb = {
2322         .notifier_call = kprobes_module_callback,
2323         .priority = 0
2324 };
2325
2326 /* Markers of _kprobe_blacklist section */
2327 extern unsigned long __start_kprobe_blacklist[];
2328 extern unsigned long __stop_kprobe_blacklist[];
2329
2330 static int __init init_kprobes(void)
2331 {
2332         int i, err = 0;
2333
2334         /* FIXME allocate the probe table, currently defined statically */
2335         /* initialize all list heads */
2336         for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2337                 INIT_HLIST_HEAD(&kprobe_table[i]);
2338                 INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
2339                 raw_spin_lock_init(&(kretprobe_table_locks[i].lock));
2340         }
2341
2342         err = populate_kprobe_blacklist(__start_kprobe_blacklist,
2343                                         __stop_kprobe_blacklist);
2344         if (err) {
2345                 pr_err("kprobes: failed to populate blacklist: %d\n", err);
2346                 pr_err("Please take care of using kprobes.\n");
2347         }
2348
2349         populate_kernel_kprobe_ei_list();
2350
2351         if (kretprobe_blacklist_size) {
2352                 /* lookup the function address from its name */
2353                 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
2354                         kretprobe_blacklist[i].addr =
2355                                 kprobe_lookup_name(kretprobe_blacklist[i].name, 0);
2356                         if (!kretprobe_blacklist[i].addr)
2357                                 printk("kretprobe: lookup failed: %s\n",
2358                                        kretprobe_blacklist[i].name);
2359                 }
2360         }
2361
2362 #if defined(CONFIG_OPTPROBES)
2363 #if defined(__ARCH_WANT_KPROBES_INSN_SLOT)
2364         /* Init kprobe_optinsn_slots */
2365         kprobe_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
2366 #endif
2367         /* By default, kprobes can be optimized */
2368         kprobes_allow_optimization = true;
2369 #endif
2370
2371         /* By default, kprobes are armed */
2372         kprobes_all_disarmed = false;
2373
2374         err = arch_init_kprobes();
2375         if (!err)
2376                 err = register_die_notifier(&kprobe_exceptions_nb);
2377         if (!err)
2378                 err = register_module_notifier(&kprobe_module_nb);
2379
2380         kprobes_initialized = (err == 0);
2381
2382         if (!err)
2383                 init_test_probes();
2384         return err;
2385 }
2386
2387 #ifdef CONFIG_DEBUG_FS
2388 static void report_probe(struct seq_file *pi, struct kprobe *p,
2389                 const char *sym, int offset, char *modname, struct kprobe *pp)
2390 {
2391         char *kprobe_type;
2392
2393         if (p->pre_handler == pre_handler_kretprobe)
2394                 kprobe_type = "r";
2395         else if (p->pre_handler == setjmp_pre_handler)
2396                 kprobe_type = "j";
2397         else
2398                 kprobe_type = "k";
2399
2400         if (sym)
2401                 seq_printf(pi, "%p  %s  %s+0x%x  %s ",
2402                         p->addr, kprobe_type, sym, offset,
2403                         (modname ? modname : " "));
2404         else
2405                 seq_printf(pi, "%p  %s  %p ",
2406                         p->addr, kprobe_type, p->addr);
2407
2408         if (!pp)
2409                 pp = p;
2410         seq_printf(pi, "%s%s%s%s\n",
2411                 (kprobe_gone(p) ? "[GONE]" : ""),
2412                 ((kprobe_disabled(p) && !kprobe_gone(p)) ?  "[DISABLED]" : ""),
2413                 (kprobe_optimized(pp) ? "[OPTIMIZED]" : ""),
2414                 (kprobe_ftrace(pp) ? "[FTRACE]" : ""));
2415 }
2416
2417 static void *kprobe_seq_start(struct seq_file *f, loff_t *pos)
2418 {
2419         return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
2420 }
2421
2422 static void *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos)
2423 {
2424         (*pos)++;
2425         if (*pos >= KPROBE_TABLE_SIZE)
2426                 return NULL;
2427         return pos;
2428 }
2429
2430 static void kprobe_seq_stop(struct seq_file *f, void *v)
2431 {
2432         /* Nothing to do */
2433 }
2434
2435 static int show_kprobe_addr(struct seq_file *pi, void *v)
2436 {
2437         struct hlist_head *head;
2438         struct kprobe *p, *kp;
2439         const char *sym = NULL;
2440         unsigned int i = *(loff_t *) v;
2441         unsigned long offset = 0;
2442         char *modname, namebuf[KSYM_NAME_LEN];
2443
2444         head = &kprobe_table[i];
2445         preempt_disable();
2446         hlist_for_each_entry_rcu(p, head, hlist) {
2447                 sym = kallsyms_lookup((unsigned long)p->addr, NULL,
2448                                         &offset, &modname, namebuf);
2449                 if (kprobe_aggrprobe(p)) {
2450                         list_for_each_entry_rcu(kp, &p->list, list)
2451                                 report_probe(pi, kp, sym, offset, modname, p);
2452                 } else
2453                         report_probe(pi, p, sym, offset, modname, NULL);
2454         }
2455         preempt_enable();
2456         return 0;
2457 }
2458
2459 static const struct seq_operations kprobes_seq_ops = {
2460         .start = kprobe_seq_start,
2461         .next  = kprobe_seq_next,
2462         .stop  = kprobe_seq_stop,
2463         .show  = show_kprobe_addr
2464 };
2465
2466 static int kprobes_open(struct inode *inode, struct file *filp)
2467 {
2468         return seq_open(filp, &kprobes_seq_ops);
2469 }
2470
2471 static const struct file_operations debugfs_kprobes_operations = {
2472         .open           = kprobes_open,
2473         .read           = seq_read,
2474         .llseek         = seq_lseek,
2475         .release        = seq_release,
2476 };
2477
2478 /* kprobes/blacklist -- shows which functions can not be probed */
2479 static void *kprobe_blacklist_seq_start(struct seq_file *m, loff_t *pos)
2480 {
2481         return seq_list_start(&kprobe_blacklist, *pos);
2482 }
2483
2484 static void *kprobe_blacklist_seq_next(struct seq_file *m, void *v, loff_t *pos)
2485 {
2486         return seq_list_next(v, &kprobe_blacklist, pos);
2487 }
2488
2489 static int kprobe_blacklist_seq_show(struct seq_file *m, void *v)
2490 {
2491         struct kprobe_blacklist_entry *ent =
2492                 list_entry(v, struct kprobe_blacklist_entry, list);
2493
2494         seq_printf(m, "0x%p-0x%p\t%ps\n", (void *)ent->start_addr,
2495                    (void *)ent->end_addr, (void *)ent->start_addr);
2496         return 0;
2497 }
2498
2499 static const struct seq_operations kprobe_blacklist_seq_ops = {
2500         .start = kprobe_blacklist_seq_start,
2501         .next  = kprobe_blacklist_seq_next,
2502         .stop  = kprobe_seq_stop,       /* Reuse void function */
2503         .show  = kprobe_blacklist_seq_show,
2504 };
2505
2506 static int kprobe_blacklist_open(struct inode *inode, struct file *filp)
2507 {
2508         return seq_open(filp, &kprobe_blacklist_seq_ops);
2509 }
2510
2511 static const struct file_operations debugfs_kprobe_blacklist_ops = {
2512         .open           = kprobe_blacklist_open,
2513         .read           = seq_read,
2514         .llseek         = seq_lseek,
2515         .release        = seq_release,
2516 };
2517
2518 /*
2519  * kprobes/error_injection_list -- shows which functions can be overriden for
2520  * error injection.
2521  * */
2522 static void *kprobe_ei_seq_start(struct seq_file *m, loff_t *pos)
2523 {
2524         mutex_lock(&kprobe_ei_mutex);
2525         return seq_list_start(&kprobe_error_injection_list, *pos);
2526 }
2527
2528 static void kprobe_ei_seq_stop(struct seq_file *m, void *v)
2529 {
2530         mutex_unlock(&kprobe_ei_mutex);
2531 }
2532
2533 static void *kprobe_ei_seq_next(struct seq_file *m, void *v, loff_t *pos)
2534 {
2535         return seq_list_next(v, &kprobe_error_injection_list, pos);
2536 }
2537
2538 static int kprobe_ei_seq_show(struct seq_file *m, void *v)
2539 {
2540         char buffer[KSYM_SYMBOL_LEN];
2541         struct kprobe_ei_entry *ent =
2542                 list_entry(v, struct kprobe_ei_entry, list);
2543
2544         sprint_symbol(buffer, ent->start_addr);
2545         seq_printf(m, "%s\n", buffer);
2546         return 0;
2547 }
2548
2549 static const struct seq_operations kprobe_ei_seq_ops = {
2550         .start = kprobe_ei_seq_start,
2551         .next  = kprobe_ei_seq_next,
2552         .stop  = kprobe_ei_seq_stop,
2553         .show  = kprobe_ei_seq_show,
2554 };
2555
2556 static int kprobe_ei_open(struct inode *inode, struct file *filp)
2557 {
2558         return seq_open(filp, &kprobe_ei_seq_ops);
2559 }
2560
2561 static const struct file_operations debugfs_kprobe_ei_ops = {
2562         .open           = kprobe_ei_open,
2563         .read           = seq_read,
2564         .llseek         = seq_lseek,
2565         .release        = seq_release,
2566 };
2567
2568 static void arm_all_kprobes(void)
2569 {
2570         struct hlist_head *head;
2571         struct kprobe *p;
2572         unsigned int i;
2573
2574         mutex_lock(&kprobe_mutex);
2575
2576         /* If kprobes are armed, just return */
2577         if (!kprobes_all_disarmed)
2578                 goto already_enabled;
2579
2580         /*
2581          * optimize_kprobe() called by arm_kprobe() checks
2582          * kprobes_all_disarmed, so set kprobes_all_disarmed before
2583          * arm_kprobe.
2584          */
2585         kprobes_all_disarmed = false;
2586         /* Arming kprobes doesn't optimize kprobe itself */
2587         for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2588                 head = &kprobe_table[i];
2589                 hlist_for_each_entry_rcu(p, head, hlist)
2590                         if (!kprobe_disabled(p))
2591                                 arm_kprobe(p);
2592         }
2593
2594         printk(KERN_INFO "Kprobes globally enabled\n");
2595
2596 already_enabled:
2597         mutex_unlock(&kprobe_mutex);
2598         return;
2599 }
2600
2601 static void disarm_all_kprobes(void)
2602 {
2603         struct hlist_head *head;
2604         struct kprobe *p;
2605         unsigned int i;
2606
2607         mutex_lock(&kprobe_mutex);
2608
2609         /* If kprobes are already disarmed, just return */
2610         if (kprobes_all_disarmed) {
2611                 mutex_unlock(&kprobe_mutex);
2612                 return;
2613         }
2614
2615         kprobes_all_disarmed = true;
2616         printk(KERN_INFO "Kprobes globally disabled\n");
2617
2618         for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2619                 head = &kprobe_table[i];
2620                 hlist_for_each_entry_rcu(p, head, hlist) {
2621                         if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p))
2622                                 disarm_kprobe(p, false);
2623                 }
2624         }
2625         mutex_unlock(&kprobe_mutex);
2626
2627         /* Wait for disarming all kprobes by optimizer */
2628         wait_for_kprobe_optimizer();
2629 }
2630
2631 /*
2632  * XXX: The debugfs bool file interface doesn't allow for callbacks
2633  * when the bool state is switched. We can reuse that facility when
2634  * available
2635  */
2636 static ssize_t read_enabled_file_bool(struct file *file,
2637                char __user *user_buf, size_t count, loff_t *ppos)
2638 {
2639         char buf[3];
2640
2641         if (!kprobes_all_disarmed)
2642                 buf[0] = '1';
2643         else
2644                 buf[0] = '0';
2645         buf[1] = '\n';
2646         buf[2] = 0x00;
2647         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
2648 }
2649
2650 static ssize_t write_enabled_file_bool(struct file *file,
2651                const char __user *user_buf, size_t count, loff_t *ppos)
2652 {
2653         char buf[32];
2654         size_t buf_size;
2655
2656         buf_size = min(count, (sizeof(buf)-1));
2657         if (copy_from_user(buf, user_buf, buf_size))
2658                 return -EFAULT;
2659
2660         buf[buf_size] = '\0';
2661         switch (buf[0]) {
2662         case 'y':
2663         case 'Y':
2664         case '1':
2665                 arm_all_kprobes();
2666                 break;
2667         case 'n':
2668         case 'N':
2669         case '0':
2670                 disarm_all_kprobes();
2671                 break;
2672         default:
2673                 return -EINVAL;
2674         }
2675
2676         return count;
2677 }
2678
2679 static const struct file_operations fops_kp = {
2680         .read =         read_enabled_file_bool,
2681         .write =        write_enabled_file_bool,
2682         .llseek =       default_llseek,
2683 };
2684
2685 static int __init debugfs_kprobe_init(void)
2686 {
2687         struct dentry *dir, *file;
2688         unsigned int value = 1;
2689
2690         dir = debugfs_create_dir("kprobes", NULL);
2691         if (!dir)
2692                 return -ENOMEM;
2693
2694         file = debugfs_create_file("list", 0444, dir, NULL,
2695                                 &debugfs_kprobes_operations);
2696         if (!file)
2697                 goto error;
2698
2699         file = debugfs_create_file("enabled", 0600, dir,
2700                                         &value, &fops_kp);
2701         if (!file)
2702                 goto error;
2703
2704         file = debugfs_create_file("blacklist", 0444, dir, NULL,
2705                                 &debugfs_kprobe_blacklist_ops);
2706         if (!file)
2707                 goto error;
2708
2709         file = debugfs_create_file("error_injection_list", 0444, dir, NULL,
2710                                   &debugfs_kprobe_ei_ops);
2711         if (!file)
2712                 goto error;
2713
2714         return 0;
2715
2716 error:
2717         debugfs_remove(dir);
2718         return -ENOMEM;
2719 }
2720
2721 late_initcall(debugfs_kprobe_init);
2722 #endif /* CONFIG_DEBUG_FS */
2723
2724 module_init(init_kprobes);
2725
2726 /* defined in arch/.../kernel/kprobes.c */
2727 EXPORT_SYMBOL_GPL(jprobe_return);