Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[sfrench/cifs-2.6.git] / kernel / kprobes.c
1 /*
2  *  Kernel Probes (KProbes)
3  *  kernel/kprobes.c
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18  *
19  * Copyright (C) IBM Corporation, 2002, 2004
20  *
21  * 2002-Oct     Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
22  *              Probes initial implementation (includes suggestions from
23  *              Rusty Russell).
24  * 2004-Aug     Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
25  *              hlists and exceptions notifier as suggested by Andi Kleen.
26  * 2004-July    Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
27  *              interface to access function arguments.
28  * 2004-Sep     Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
29  *              exceptions notifier to be first on the priority list.
30  * 2005-May     Hien Nguyen <hien@us.ibm.com>, Jim Keniston
31  *              <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
32  *              <prasanna@in.ibm.com> added function-return probes.
33  */
34 #include <linux/kprobes.h>
35 #include <linux/hash.h>
36 #include <linux/init.h>
37 #include <linux/slab.h>
38 #include <linux/stddef.h>
39 #include <linux/module.h>
40 #include <linux/moduleloader.h>
41 #include <linux/kallsyms.h>
42 #include <linux/freezer.h>
43 #include <linux/seq_file.h>
44 #include <linux/debugfs.h>
45 #include <linux/kdebug.h>
46 #include <linux/memory.h>
47
48 #include <asm-generic/sections.h>
49 #include <asm/cacheflush.h>
50 #include <asm/errno.h>
51 #include <asm/uaccess.h>
52
53 #define KPROBE_HASH_BITS 6
54 #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
55
56
57 /*
58  * Some oddball architectures like 64bit powerpc have function descriptors
59  * so this must be overridable.
60  */
61 #ifndef kprobe_lookup_name
62 #define kprobe_lookup_name(name, addr) \
63         addr = ((kprobe_opcode_t *)(kallsyms_lookup_name(name)))
64 #endif
65
66 static int kprobes_initialized;
67 static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
68 static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
69
70 /* NOTE: change this value only with kprobe_mutex held */
71 static bool kprobes_all_disarmed;
72
73 static DEFINE_MUTEX(kprobe_mutex);      /* Protects kprobe_table */
74 static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
75 static struct {
76         spinlock_t lock ____cacheline_aligned_in_smp;
77 } kretprobe_table_locks[KPROBE_TABLE_SIZE];
78
79 static spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
80 {
81         return &(kretprobe_table_locks[hash].lock);
82 }
83
84 /*
85  * Normally, functions that we'd want to prohibit kprobes in, are marked
86  * __kprobes. But, there are cases where such functions already belong to
87  * a different section (__sched for preempt_schedule)
88  *
89  * For such cases, we now have a blacklist
90  */
91 static struct kprobe_blackpoint kprobe_blacklist[] = {
92         {"preempt_schedule",},
93         {"native_get_debugreg",},
94         {"irq_entries_start",},
95         {"common_interrupt",},
96         {NULL}    /* Terminator */
97 };
98
99 #ifdef __ARCH_WANT_KPROBES_INSN_SLOT
100 /*
101  * kprobe->ainsn.insn points to the copy of the instruction to be
102  * single-stepped. x86_64, POWER4 and above have no-exec support and
103  * stepping on the instruction on a vmalloced/kmalloced/data page
104  * is a recipe for disaster
105  */
106 #define INSNS_PER_PAGE  (PAGE_SIZE/(MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
107
108 struct kprobe_insn_page {
109         struct list_head list;
110         kprobe_opcode_t *insns;         /* Page of instruction slots */
111         char slot_used[INSNS_PER_PAGE];
112         int nused;
113         int ngarbage;
114 };
115
116 enum kprobe_slot_state {
117         SLOT_CLEAN = 0,
118         SLOT_DIRTY = 1,
119         SLOT_USED = 2,
120 };
121
122 static DEFINE_MUTEX(kprobe_insn_mutex); /* Protects kprobe_insn_pages */
123 static LIST_HEAD(kprobe_insn_pages);
124 static int kprobe_garbage_slots;
125 static int collect_garbage_slots(void);
126
127 static int __kprobes check_safety(void)
128 {
129         int ret = 0;
130 #if defined(CONFIG_PREEMPT) && defined(CONFIG_FREEZER)
131         ret = freeze_processes();
132         if (ret == 0) {
133                 struct task_struct *p, *q;
134                 do_each_thread(p, q) {
135                         if (p != current && p->state == TASK_RUNNING &&
136                             p->pid != 0) {
137                                 printk("Check failed: %s is running\n",p->comm);
138                                 ret = -1;
139                                 goto loop_end;
140                         }
141                 } while_each_thread(p, q);
142         }
143 loop_end:
144         thaw_processes();
145 #else
146         synchronize_sched();
147 #endif
148         return ret;
149 }
150
151 /**
152  * __get_insn_slot() - Find a slot on an executable page for an instruction.
153  * We allocate an executable page if there's no room on existing ones.
154  */
155 static kprobe_opcode_t __kprobes *__get_insn_slot(void)
156 {
157         struct kprobe_insn_page *kip;
158
159  retry:
160         list_for_each_entry(kip, &kprobe_insn_pages, list) {
161                 if (kip->nused < INSNS_PER_PAGE) {
162                         int i;
163                         for (i = 0; i < INSNS_PER_PAGE; i++) {
164                                 if (kip->slot_used[i] == SLOT_CLEAN) {
165                                         kip->slot_used[i] = SLOT_USED;
166                                         kip->nused++;
167                                         return kip->insns + (i * MAX_INSN_SIZE);
168                                 }
169                         }
170                         /* Surprise!  No unused slots.  Fix kip->nused. */
171                         kip->nused = INSNS_PER_PAGE;
172                 }
173         }
174
175         /* If there are any garbage slots, collect it and try again. */
176         if (kprobe_garbage_slots && collect_garbage_slots() == 0) {
177                 goto retry;
178         }
179         /* All out of space.  Need to allocate a new page. Use slot 0. */
180         kip = kmalloc(sizeof(struct kprobe_insn_page), GFP_KERNEL);
181         if (!kip)
182                 return NULL;
183
184         /*
185          * Use module_alloc so this page is within +/- 2GB of where the
186          * kernel image and loaded module images reside. This is required
187          * so x86_64 can correctly handle the %rip-relative fixups.
188          */
189         kip->insns = module_alloc(PAGE_SIZE);
190         if (!kip->insns) {
191                 kfree(kip);
192                 return NULL;
193         }
194         INIT_LIST_HEAD(&kip->list);
195         list_add(&kip->list, &kprobe_insn_pages);
196         memset(kip->slot_used, SLOT_CLEAN, INSNS_PER_PAGE);
197         kip->slot_used[0] = SLOT_USED;
198         kip->nused = 1;
199         kip->ngarbage = 0;
200         return kip->insns;
201 }
202
203 kprobe_opcode_t __kprobes *get_insn_slot(void)
204 {
205         kprobe_opcode_t *ret;
206         mutex_lock(&kprobe_insn_mutex);
207         ret = __get_insn_slot();
208         mutex_unlock(&kprobe_insn_mutex);
209         return ret;
210 }
211
212 /* Return 1 if all garbages are collected, otherwise 0. */
213 static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
214 {
215         kip->slot_used[idx] = SLOT_CLEAN;
216         kip->nused--;
217         if (kip->nused == 0) {
218                 /*
219                  * Page is no longer in use.  Free it unless
220                  * it's the last one.  We keep the last one
221                  * so as not to have to set it up again the
222                  * next time somebody inserts a probe.
223                  */
224                 if (!list_is_singular(&kprobe_insn_pages)) {
225                         list_del(&kip->list);
226                         module_free(NULL, kip->insns);
227                         kfree(kip);
228                 }
229                 return 1;
230         }
231         return 0;
232 }
233
234 static int __kprobes collect_garbage_slots(void)
235 {
236         struct kprobe_insn_page *kip, *next;
237
238         /* Ensure no-one is preepmted on the garbages */
239         if (check_safety())
240                 return -EAGAIN;
241
242         list_for_each_entry_safe(kip, next, &kprobe_insn_pages, list) {
243                 int i;
244                 if (kip->ngarbage == 0)
245                         continue;
246                 kip->ngarbage = 0;      /* we will collect all garbages */
247                 for (i = 0; i < INSNS_PER_PAGE; i++) {
248                         if (kip->slot_used[i] == SLOT_DIRTY &&
249                             collect_one_slot(kip, i))
250                                 break;
251                 }
252         }
253         kprobe_garbage_slots = 0;
254         return 0;
255 }
256
257 void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty)
258 {
259         struct kprobe_insn_page *kip;
260
261         mutex_lock(&kprobe_insn_mutex);
262         list_for_each_entry(kip, &kprobe_insn_pages, list) {
263                 if (kip->insns <= slot &&
264                     slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) {
265                         int i = (slot - kip->insns) / MAX_INSN_SIZE;
266                         if (dirty) {
267                                 kip->slot_used[i] = SLOT_DIRTY;
268                                 kip->ngarbage++;
269                         } else
270                                 collect_one_slot(kip, i);
271                         break;
272                 }
273         }
274
275         if (dirty && ++kprobe_garbage_slots > INSNS_PER_PAGE)
276                 collect_garbage_slots();
277
278         mutex_unlock(&kprobe_insn_mutex);
279 }
280 #endif
281
282 /* We have preemption disabled.. so it is safe to use __ versions */
283 static inline void set_kprobe_instance(struct kprobe *kp)
284 {
285         __get_cpu_var(kprobe_instance) = kp;
286 }
287
288 static inline void reset_kprobe_instance(void)
289 {
290         __get_cpu_var(kprobe_instance) = NULL;
291 }
292
293 /*
294  * This routine is called either:
295  *      - under the kprobe_mutex - during kprobe_[un]register()
296  *                              OR
297  *      - with preemption disabled - from arch/xxx/kernel/kprobes.c
298  */
299 struct kprobe __kprobes *get_kprobe(void *addr)
300 {
301         struct hlist_head *head;
302         struct hlist_node *node;
303         struct kprobe *p;
304
305         head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
306         hlist_for_each_entry_rcu(p, node, head, hlist) {
307                 if (p->addr == addr)
308                         return p;
309         }
310         return NULL;
311 }
312
313 /* Arm a kprobe with text_mutex */
314 static void __kprobes arm_kprobe(struct kprobe *kp)
315 {
316         mutex_lock(&text_mutex);
317         arch_arm_kprobe(kp);
318         mutex_unlock(&text_mutex);
319 }
320
321 /* Disarm a kprobe with text_mutex */
322 static void __kprobes disarm_kprobe(struct kprobe *kp)
323 {
324         mutex_lock(&text_mutex);
325         arch_disarm_kprobe(kp);
326         mutex_unlock(&text_mutex);
327 }
328
329 /*
330  * Aggregate handlers for multiple kprobes support - these handlers
331  * take care of invoking the individual kprobe handlers on p->list
332  */
333 static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
334 {
335         struct kprobe *kp;
336
337         list_for_each_entry_rcu(kp, &p->list, list) {
338                 if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
339                         set_kprobe_instance(kp);
340                         if (kp->pre_handler(kp, regs))
341                                 return 1;
342                 }
343                 reset_kprobe_instance();
344         }
345         return 0;
346 }
347
348 static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
349                                         unsigned long flags)
350 {
351         struct kprobe *kp;
352
353         list_for_each_entry_rcu(kp, &p->list, list) {
354                 if (kp->post_handler && likely(!kprobe_disabled(kp))) {
355                         set_kprobe_instance(kp);
356                         kp->post_handler(kp, regs, flags);
357                         reset_kprobe_instance();
358                 }
359         }
360 }
361
362 static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
363                                         int trapnr)
364 {
365         struct kprobe *cur = __get_cpu_var(kprobe_instance);
366
367         /*
368          * if we faulted "during" the execution of a user specified
369          * probe handler, invoke just that probe's fault handler
370          */
371         if (cur && cur->fault_handler) {
372                 if (cur->fault_handler(cur, regs, trapnr))
373                         return 1;
374         }
375         return 0;
376 }
377
378 static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
379 {
380         struct kprobe *cur = __get_cpu_var(kprobe_instance);
381         int ret = 0;
382
383         if (cur && cur->break_handler) {
384                 if (cur->break_handler(cur, regs))
385                         ret = 1;
386         }
387         reset_kprobe_instance();
388         return ret;
389 }
390
391 /* Walks the list and increments nmissed count for multiprobe case */
392 void __kprobes kprobes_inc_nmissed_count(struct kprobe *p)
393 {
394         struct kprobe *kp;
395         if (p->pre_handler != aggr_pre_handler) {
396                 p->nmissed++;
397         } else {
398                 list_for_each_entry_rcu(kp, &p->list, list)
399                         kp->nmissed++;
400         }
401         return;
402 }
403
404 void __kprobes recycle_rp_inst(struct kretprobe_instance *ri,
405                                 struct hlist_head *head)
406 {
407         struct kretprobe *rp = ri->rp;
408
409         /* remove rp inst off the rprobe_inst_table */
410         hlist_del(&ri->hlist);
411         INIT_HLIST_NODE(&ri->hlist);
412         if (likely(rp)) {
413                 spin_lock(&rp->lock);
414                 hlist_add_head(&ri->hlist, &rp->free_instances);
415                 spin_unlock(&rp->lock);
416         } else
417                 /* Unregistering */
418                 hlist_add_head(&ri->hlist, head);
419 }
420
421 void __kprobes kretprobe_hash_lock(struct task_struct *tsk,
422                          struct hlist_head **head, unsigned long *flags)
423 {
424         unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
425         spinlock_t *hlist_lock;
426
427         *head = &kretprobe_inst_table[hash];
428         hlist_lock = kretprobe_table_lock_ptr(hash);
429         spin_lock_irqsave(hlist_lock, *flags);
430 }
431
432 static void __kprobes kretprobe_table_lock(unsigned long hash,
433         unsigned long *flags)
434 {
435         spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
436         spin_lock_irqsave(hlist_lock, *flags);
437 }
438
439 void __kprobes kretprobe_hash_unlock(struct task_struct *tsk,
440         unsigned long *flags)
441 {
442         unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
443         spinlock_t *hlist_lock;
444
445         hlist_lock = kretprobe_table_lock_ptr(hash);
446         spin_unlock_irqrestore(hlist_lock, *flags);
447 }
448
449 void __kprobes kretprobe_table_unlock(unsigned long hash, unsigned long *flags)
450 {
451         spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
452         spin_unlock_irqrestore(hlist_lock, *flags);
453 }
454
455 /*
456  * This function is called from finish_task_switch when task tk becomes dead,
457  * so that we can recycle any function-return probe instances associated
458  * with this task. These left over instances represent probed functions
459  * that have been called but will never return.
460  */
461 void __kprobes kprobe_flush_task(struct task_struct *tk)
462 {
463         struct kretprobe_instance *ri;
464         struct hlist_head *head, empty_rp;
465         struct hlist_node *node, *tmp;
466         unsigned long hash, flags = 0;
467
468         if (unlikely(!kprobes_initialized))
469                 /* Early boot.  kretprobe_table_locks not yet initialized. */
470                 return;
471
472         hash = hash_ptr(tk, KPROBE_HASH_BITS);
473         head = &kretprobe_inst_table[hash];
474         kretprobe_table_lock(hash, &flags);
475         hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
476                 if (ri->task == tk)
477                         recycle_rp_inst(ri, &empty_rp);
478         }
479         kretprobe_table_unlock(hash, &flags);
480         INIT_HLIST_HEAD(&empty_rp);
481         hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
482                 hlist_del(&ri->hlist);
483                 kfree(ri);
484         }
485 }
486
487 static inline void free_rp_inst(struct kretprobe *rp)
488 {
489         struct kretprobe_instance *ri;
490         struct hlist_node *pos, *next;
491
492         hlist_for_each_entry_safe(ri, pos, next, &rp->free_instances, hlist) {
493                 hlist_del(&ri->hlist);
494                 kfree(ri);
495         }
496 }
497
498 static void __kprobes cleanup_rp_inst(struct kretprobe *rp)
499 {
500         unsigned long flags, hash;
501         struct kretprobe_instance *ri;
502         struct hlist_node *pos, *next;
503         struct hlist_head *head;
504
505         /* No race here */
506         for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) {
507                 kretprobe_table_lock(hash, &flags);
508                 head = &kretprobe_inst_table[hash];
509                 hlist_for_each_entry_safe(ri, pos, next, head, hlist) {
510                         if (ri->rp == rp)
511                                 ri->rp = NULL;
512                 }
513                 kretprobe_table_unlock(hash, &flags);
514         }
515         free_rp_inst(rp);
516 }
517
518 /*
519  * Keep all fields in the kprobe consistent
520  */
521 static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p)
522 {
523         memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t));
524         memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn));
525 }
526
527 /*
528 * Add the new probe to ap->list. Fail if this is the
529 * second jprobe at the address - two jprobes can't coexist
530 */
531 static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p)
532 {
533         BUG_ON(kprobe_gone(ap) || kprobe_gone(p));
534         if (p->break_handler) {
535                 if (ap->break_handler)
536                         return -EEXIST;
537                 list_add_tail_rcu(&p->list, &ap->list);
538                 ap->break_handler = aggr_break_handler;
539         } else
540                 list_add_rcu(&p->list, &ap->list);
541         if (p->post_handler && !ap->post_handler)
542                 ap->post_handler = aggr_post_handler;
543
544         if (kprobe_disabled(ap) && !kprobe_disabled(p)) {
545                 ap->flags &= ~KPROBE_FLAG_DISABLED;
546                 if (!kprobes_all_disarmed)
547                         /* Arm the breakpoint again. */
548                         arm_kprobe(ap);
549         }
550         return 0;
551 }
552
553 /*
554  * Fill in the required fields of the "manager kprobe". Replace the
555  * earlier kprobe in the hlist with the manager kprobe
556  */
557 static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
558 {
559         copy_kprobe(p, ap);
560         flush_insn_slot(ap);
561         ap->addr = p->addr;
562         ap->flags = p->flags;
563         ap->pre_handler = aggr_pre_handler;
564         ap->fault_handler = aggr_fault_handler;
565         /* We don't care the kprobe which has gone. */
566         if (p->post_handler && !kprobe_gone(p))
567                 ap->post_handler = aggr_post_handler;
568         if (p->break_handler && !kprobe_gone(p))
569                 ap->break_handler = aggr_break_handler;
570
571         INIT_LIST_HEAD(&ap->list);
572         list_add_rcu(&p->list, &ap->list);
573
574         hlist_replace_rcu(&p->hlist, &ap->hlist);
575 }
576
577 /*
578  * This is the second or subsequent kprobe at the address - handle
579  * the intricacies
580  */
581 static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
582                                           struct kprobe *p)
583 {
584         int ret = 0;
585         struct kprobe *ap = old_p;
586
587         if (old_p->pre_handler != aggr_pre_handler) {
588                 /* If old_p is not an aggr_probe, create new aggr_kprobe. */
589                 ap = kzalloc(sizeof(struct kprobe), GFP_KERNEL);
590                 if (!ap)
591                         return -ENOMEM;
592                 add_aggr_kprobe(ap, old_p);
593         }
594
595         if (kprobe_gone(ap)) {
596                 /*
597                  * Attempting to insert new probe at the same location that
598                  * had a probe in the module vaddr area which already
599                  * freed. So, the instruction slot has already been
600                  * released. We need a new slot for the new probe.
601                  */
602                 ret = arch_prepare_kprobe(ap);
603                 if (ret)
604                         /*
605                          * Even if fail to allocate new slot, don't need to
606                          * free aggr_probe. It will be used next time, or
607                          * freed by unregister_kprobe.
608                          */
609                         return ret;
610
611                 /*
612                  * Clear gone flag to prevent allocating new slot again, and
613                  * set disabled flag because it is not armed yet.
614                  */
615                 ap->flags = (ap->flags & ~KPROBE_FLAG_GONE)
616                             | KPROBE_FLAG_DISABLED;
617         }
618
619         copy_kprobe(ap, p);
620         return add_new_kprobe(ap, p);
621 }
622
623 /* Try to disable aggr_kprobe, and return 1 if succeeded.*/
624 static int __kprobes try_to_disable_aggr_kprobe(struct kprobe *p)
625 {
626         struct kprobe *kp;
627
628         list_for_each_entry_rcu(kp, &p->list, list) {
629                 if (!kprobe_disabled(kp))
630                         /*
631                          * There is an active probe on the list.
632                          * We can't disable aggr_kprobe.
633                          */
634                         return 0;
635         }
636         p->flags |= KPROBE_FLAG_DISABLED;
637         return 1;
638 }
639
640 static int __kprobes in_kprobes_functions(unsigned long addr)
641 {
642         struct kprobe_blackpoint *kb;
643
644         if (addr >= (unsigned long)__kprobes_text_start &&
645             addr < (unsigned long)__kprobes_text_end)
646                 return -EINVAL;
647         /*
648          * If there exists a kprobe_blacklist, verify and
649          * fail any probe registration in the prohibited area
650          */
651         for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
652                 if (kb->start_addr) {
653                         if (addr >= kb->start_addr &&
654                             addr < (kb->start_addr + kb->range))
655                                 return -EINVAL;
656                 }
657         }
658         return 0;
659 }
660
661 /*
662  * If we have a symbol_name argument, look it up and add the offset field
663  * to it. This way, we can specify a relative address to a symbol.
664  */
665 static kprobe_opcode_t __kprobes *kprobe_addr(struct kprobe *p)
666 {
667         kprobe_opcode_t *addr = p->addr;
668         if (p->symbol_name) {
669                 if (addr)
670                         return NULL;
671                 kprobe_lookup_name(p->symbol_name, addr);
672         }
673
674         if (!addr)
675                 return NULL;
676         return (kprobe_opcode_t *)(((char *)addr) + p->offset);
677 }
678
679 /* Check passed kprobe is valid and return kprobe in kprobe_table. */
680 static struct kprobe * __kprobes __get_valid_kprobe(struct kprobe *p)
681 {
682         struct kprobe *old_p, *list_p;
683
684         old_p = get_kprobe(p->addr);
685         if (unlikely(!old_p))
686                 return NULL;
687
688         if (p != old_p) {
689                 list_for_each_entry_rcu(list_p, &old_p->list, list)
690                         if (list_p == p)
691                         /* kprobe p is a valid probe */
692                                 goto valid;
693                 return NULL;
694         }
695 valid:
696         return old_p;
697 }
698
699 /* Return error if the kprobe is being re-registered */
700 static inline int check_kprobe_rereg(struct kprobe *p)
701 {
702         int ret = 0;
703         struct kprobe *old_p;
704
705         mutex_lock(&kprobe_mutex);
706         old_p = __get_valid_kprobe(p);
707         if (old_p)
708                 ret = -EINVAL;
709         mutex_unlock(&kprobe_mutex);
710         return ret;
711 }
712
713 int __kprobes register_kprobe(struct kprobe *p)
714 {
715         int ret = 0;
716         struct kprobe *old_p;
717         struct module *probed_mod;
718         kprobe_opcode_t *addr;
719
720         addr = kprobe_addr(p);
721         if (!addr)
722                 return -EINVAL;
723         p->addr = addr;
724
725         ret = check_kprobe_rereg(p);
726         if (ret)
727                 return ret;
728
729         preempt_disable();
730         if (!kernel_text_address((unsigned long) p->addr) ||
731             in_kprobes_functions((unsigned long) p->addr)) {
732                 preempt_enable();
733                 return -EINVAL;
734         }
735
736         /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */
737         p->flags &= KPROBE_FLAG_DISABLED;
738
739         /*
740          * Check if are we probing a module.
741          */
742         probed_mod = __module_text_address((unsigned long) p->addr);
743         if (probed_mod) {
744                 /*
745                  * We must hold a refcount of the probed module while updating
746                  * its code to prohibit unexpected unloading.
747                  */
748                 if (unlikely(!try_module_get(probed_mod))) {
749                         preempt_enable();
750                         return -EINVAL;
751                 }
752                 /*
753                  * If the module freed .init.text, we couldn't insert
754                  * kprobes in there.
755                  */
756                 if (within_module_init((unsigned long)p->addr, probed_mod) &&
757                     probed_mod->state != MODULE_STATE_COMING) {
758                         module_put(probed_mod);
759                         preempt_enable();
760                         return -EINVAL;
761                 }
762         }
763         preempt_enable();
764
765         p->nmissed = 0;
766         INIT_LIST_HEAD(&p->list);
767         mutex_lock(&kprobe_mutex);
768         old_p = get_kprobe(p->addr);
769         if (old_p) {
770                 ret = register_aggr_kprobe(old_p, p);
771                 goto out;
772         }
773
774         mutex_lock(&text_mutex);
775         ret = arch_prepare_kprobe(p);
776         if (ret)
777                 goto out_unlock_text;
778
779         INIT_HLIST_NODE(&p->hlist);
780         hlist_add_head_rcu(&p->hlist,
781                        &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
782
783         if (!kprobes_all_disarmed && !kprobe_disabled(p))
784                 arch_arm_kprobe(p);
785
786 out_unlock_text:
787         mutex_unlock(&text_mutex);
788 out:
789         mutex_unlock(&kprobe_mutex);
790
791         if (probed_mod)
792                 module_put(probed_mod);
793
794         return ret;
795 }
796 EXPORT_SYMBOL_GPL(register_kprobe);
797
798 /*
799  * Unregister a kprobe without a scheduler synchronization.
800  */
801 static int __kprobes __unregister_kprobe_top(struct kprobe *p)
802 {
803         struct kprobe *old_p, *list_p;
804
805         old_p = __get_valid_kprobe(p);
806         if (old_p == NULL)
807                 return -EINVAL;
808
809         if (old_p == p ||
810             (old_p->pre_handler == aggr_pre_handler &&
811              list_is_singular(&old_p->list))) {
812                 /*
813                  * Only probe on the hash list. Disarm only if kprobes are
814                  * enabled and not gone - otherwise, the breakpoint would
815                  * already have been removed. We save on flushing icache.
816                  */
817                 if (!kprobes_all_disarmed && !kprobe_disabled(old_p))
818                         disarm_kprobe(p);
819                 hlist_del_rcu(&old_p->hlist);
820         } else {
821                 if (p->break_handler && !kprobe_gone(p))
822                         old_p->break_handler = NULL;
823                 if (p->post_handler && !kprobe_gone(p)) {
824                         list_for_each_entry_rcu(list_p, &old_p->list, list) {
825                                 if ((list_p != p) && (list_p->post_handler))
826                                         goto noclean;
827                         }
828                         old_p->post_handler = NULL;
829                 }
830 noclean:
831                 list_del_rcu(&p->list);
832                 if (!kprobe_disabled(old_p)) {
833                         try_to_disable_aggr_kprobe(old_p);
834                         if (!kprobes_all_disarmed && kprobe_disabled(old_p))
835                                 disarm_kprobe(old_p);
836                 }
837         }
838         return 0;
839 }
840
841 static void __kprobes __unregister_kprobe_bottom(struct kprobe *p)
842 {
843         struct kprobe *old_p;
844
845         if (list_empty(&p->list))
846                 arch_remove_kprobe(p);
847         else if (list_is_singular(&p->list)) {
848                 /* "p" is the last child of an aggr_kprobe */
849                 old_p = list_entry(p->list.next, struct kprobe, list);
850                 list_del(&p->list);
851                 arch_remove_kprobe(old_p);
852                 kfree(old_p);
853         }
854 }
855
856 int __kprobes register_kprobes(struct kprobe **kps, int num)
857 {
858         int i, ret = 0;
859
860         if (num <= 0)
861                 return -EINVAL;
862         for (i = 0; i < num; i++) {
863                 ret = register_kprobe(kps[i]);
864                 if (ret < 0) {
865                         if (i > 0)
866                                 unregister_kprobes(kps, i);
867                         break;
868                 }
869         }
870         return ret;
871 }
872 EXPORT_SYMBOL_GPL(register_kprobes);
873
874 void __kprobes unregister_kprobe(struct kprobe *p)
875 {
876         unregister_kprobes(&p, 1);
877 }
878 EXPORT_SYMBOL_GPL(unregister_kprobe);
879
880 void __kprobes unregister_kprobes(struct kprobe **kps, int num)
881 {
882         int i;
883
884         if (num <= 0)
885                 return;
886         mutex_lock(&kprobe_mutex);
887         for (i = 0; i < num; i++)
888                 if (__unregister_kprobe_top(kps[i]) < 0)
889                         kps[i]->addr = NULL;
890         mutex_unlock(&kprobe_mutex);
891
892         synchronize_sched();
893         for (i = 0; i < num; i++)
894                 if (kps[i]->addr)
895                         __unregister_kprobe_bottom(kps[i]);
896 }
897 EXPORT_SYMBOL_GPL(unregister_kprobes);
898
899 static struct notifier_block kprobe_exceptions_nb = {
900         .notifier_call = kprobe_exceptions_notify,
901         .priority = 0x7fffffff /* we need to be notified first */
902 };
903
904 unsigned long __weak arch_deref_entry_point(void *entry)
905 {
906         return (unsigned long)entry;
907 }
908
909 int __kprobes register_jprobes(struct jprobe **jps, int num)
910 {
911         struct jprobe *jp;
912         int ret = 0, i;
913
914         if (num <= 0)
915                 return -EINVAL;
916         for (i = 0; i < num; i++) {
917                 unsigned long addr;
918                 jp = jps[i];
919                 addr = arch_deref_entry_point(jp->entry);
920
921                 if (!kernel_text_address(addr))
922                         ret = -EINVAL;
923                 else {
924                         /* Todo: Verify probepoint is a function entry point */
925                         jp->kp.pre_handler = setjmp_pre_handler;
926                         jp->kp.break_handler = longjmp_break_handler;
927                         ret = register_kprobe(&jp->kp);
928                 }
929                 if (ret < 0) {
930                         if (i > 0)
931                                 unregister_jprobes(jps, i);
932                         break;
933                 }
934         }
935         return ret;
936 }
937 EXPORT_SYMBOL_GPL(register_jprobes);
938
939 int __kprobes register_jprobe(struct jprobe *jp)
940 {
941         return register_jprobes(&jp, 1);
942 }
943 EXPORT_SYMBOL_GPL(register_jprobe);
944
945 void __kprobes unregister_jprobe(struct jprobe *jp)
946 {
947         unregister_jprobes(&jp, 1);
948 }
949 EXPORT_SYMBOL_GPL(unregister_jprobe);
950
951 void __kprobes unregister_jprobes(struct jprobe **jps, int num)
952 {
953         int i;
954
955         if (num <= 0)
956                 return;
957         mutex_lock(&kprobe_mutex);
958         for (i = 0; i < num; i++)
959                 if (__unregister_kprobe_top(&jps[i]->kp) < 0)
960                         jps[i]->kp.addr = NULL;
961         mutex_unlock(&kprobe_mutex);
962
963         synchronize_sched();
964         for (i = 0; i < num; i++) {
965                 if (jps[i]->kp.addr)
966                         __unregister_kprobe_bottom(&jps[i]->kp);
967         }
968 }
969 EXPORT_SYMBOL_GPL(unregister_jprobes);
970
971 #ifdef CONFIG_KRETPROBES
972 /*
973  * This kprobe pre_handler is registered with every kretprobe. When probe
974  * hits it will set up the return probe.
975  */
976 static int __kprobes pre_handler_kretprobe(struct kprobe *p,
977                                            struct pt_regs *regs)
978 {
979         struct kretprobe *rp = container_of(p, struct kretprobe, kp);
980         unsigned long hash, flags = 0;
981         struct kretprobe_instance *ri;
982
983         /*TODO: consider to only swap the RA after the last pre_handler fired */
984         hash = hash_ptr(current, KPROBE_HASH_BITS);
985         spin_lock_irqsave(&rp->lock, flags);
986         if (!hlist_empty(&rp->free_instances)) {
987                 ri = hlist_entry(rp->free_instances.first,
988                                 struct kretprobe_instance, hlist);
989                 hlist_del(&ri->hlist);
990                 spin_unlock_irqrestore(&rp->lock, flags);
991
992                 ri->rp = rp;
993                 ri->task = current;
994
995                 if (rp->entry_handler && rp->entry_handler(ri, regs))
996                         return 0;
997
998                 arch_prepare_kretprobe(ri, regs);
999
1000                 /* XXX(hch): why is there no hlist_move_head? */
1001                 INIT_HLIST_NODE(&ri->hlist);
1002                 kretprobe_table_lock(hash, &flags);
1003                 hlist_add_head(&ri->hlist, &kretprobe_inst_table[hash]);
1004                 kretprobe_table_unlock(hash, &flags);
1005         } else {
1006                 rp->nmissed++;
1007                 spin_unlock_irqrestore(&rp->lock, flags);
1008         }
1009         return 0;
1010 }
1011
1012 int __kprobes register_kretprobe(struct kretprobe *rp)
1013 {
1014         int ret = 0;
1015         struct kretprobe_instance *inst;
1016         int i;
1017         void *addr;
1018
1019         if (kretprobe_blacklist_size) {
1020                 addr = kprobe_addr(&rp->kp);
1021                 if (!addr)
1022                         return -EINVAL;
1023
1024                 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
1025                         if (kretprobe_blacklist[i].addr == addr)
1026                                 return -EINVAL;
1027                 }
1028         }
1029
1030         rp->kp.pre_handler = pre_handler_kretprobe;
1031         rp->kp.post_handler = NULL;
1032         rp->kp.fault_handler = NULL;
1033         rp->kp.break_handler = NULL;
1034
1035         /* Pre-allocate memory for max kretprobe instances */
1036         if (rp->maxactive <= 0) {
1037 #ifdef CONFIG_PREEMPT
1038                 rp->maxactive = max(10, 2 * num_possible_cpus());
1039 #else
1040                 rp->maxactive = num_possible_cpus();
1041 #endif
1042         }
1043         spin_lock_init(&rp->lock);
1044         INIT_HLIST_HEAD(&rp->free_instances);
1045         for (i = 0; i < rp->maxactive; i++) {
1046                 inst = kmalloc(sizeof(struct kretprobe_instance) +
1047                                rp->data_size, GFP_KERNEL);
1048                 if (inst == NULL) {
1049                         free_rp_inst(rp);
1050                         return -ENOMEM;
1051                 }
1052                 INIT_HLIST_NODE(&inst->hlist);
1053                 hlist_add_head(&inst->hlist, &rp->free_instances);
1054         }
1055
1056         rp->nmissed = 0;
1057         /* Establish function entry probe point */
1058         ret = register_kprobe(&rp->kp);
1059         if (ret != 0)
1060                 free_rp_inst(rp);
1061         return ret;
1062 }
1063 EXPORT_SYMBOL_GPL(register_kretprobe);
1064
1065 int __kprobes register_kretprobes(struct kretprobe **rps, int num)
1066 {
1067         int ret = 0, i;
1068
1069         if (num <= 0)
1070                 return -EINVAL;
1071         for (i = 0; i < num; i++) {
1072                 ret = register_kretprobe(rps[i]);
1073                 if (ret < 0) {
1074                         if (i > 0)
1075                                 unregister_kretprobes(rps, i);
1076                         break;
1077                 }
1078         }
1079         return ret;
1080 }
1081 EXPORT_SYMBOL_GPL(register_kretprobes);
1082
1083 void __kprobes unregister_kretprobe(struct kretprobe *rp)
1084 {
1085         unregister_kretprobes(&rp, 1);
1086 }
1087 EXPORT_SYMBOL_GPL(unregister_kretprobe);
1088
1089 void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
1090 {
1091         int i;
1092
1093         if (num <= 0)
1094                 return;
1095         mutex_lock(&kprobe_mutex);
1096         for (i = 0; i < num; i++)
1097                 if (__unregister_kprobe_top(&rps[i]->kp) < 0)
1098                         rps[i]->kp.addr = NULL;
1099         mutex_unlock(&kprobe_mutex);
1100
1101         synchronize_sched();
1102         for (i = 0; i < num; i++) {
1103                 if (rps[i]->kp.addr) {
1104                         __unregister_kprobe_bottom(&rps[i]->kp);
1105                         cleanup_rp_inst(rps[i]);
1106                 }
1107         }
1108 }
1109 EXPORT_SYMBOL_GPL(unregister_kretprobes);
1110
1111 #else /* CONFIG_KRETPROBES */
1112 int __kprobes register_kretprobe(struct kretprobe *rp)
1113 {
1114         return -ENOSYS;
1115 }
1116 EXPORT_SYMBOL_GPL(register_kretprobe);
1117
1118 int __kprobes register_kretprobes(struct kretprobe **rps, int num)
1119 {
1120         return -ENOSYS;
1121 }
1122 EXPORT_SYMBOL_GPL(register_kretprobes);
1123
1124 void __kprobes unregister_kretprobe(struct kretprobe *rp)
1125 {
1126 }
1127 EXPORT_SYMBOL_GPL(unregister_kretprobe);
1128
1129 void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
1130 {
1131 }
1132 EXPORT_SYMBOL_GPL(unregister_kretprobes);
1133
1134 static int __kprobes pre_handler_kretprobe(struct kprobe *p,
1135                                            struct pt_regs *regs)
1136 {
1137         return 0;
1138 }
1139
1140 #endif /* CONFIG_KRETPROBES */
1141
1142 /* Set the kprobe gone and remove its instruction buffer. */
1143 static void __kprobes kill_kprobe(struct kprobe *p)
1144 {
1145         struct kprobe *kp;
1146
1147         p->flags |= KPROBE_FLAG_GONE;
1148         if (p->pre_handler == aggr_pre_handler) {
1149                 /*
1150                  * If this is an aggr_kprobe, we have to list all the
1151                  * chained probes and mark them GONE.
1152                  */
1153                 list_for_each_entry_rcu(kp, &p->list, list)
1154                         kp->flags |= KPROBE_FLAG_GONE;
1155                 p->post_handler = NULL;
1156                 p->break_handler = NULL;
1157         }
1158         /*
1159          * Here, we can remove insn_slot safely, because no thread calls
1160          * the original probed function (which will be freed soon) any more.
1161          */
1162         arch_remove_kprobe(p);
1163 }
1164
1165 void __kprobes dump_kprobe(struct kprobe *kp)
1166 {
1167         printk(KERN_WARNING "Dumping kprobe:\n");
1168         printk(KERN_WARNING "Name: %s\nAddress: %p\nOffset: %x\n",
1169                kp->symbol_name, kp->addr, kp->offset);
1170 }
1171
1172 /* Module notifier call back, checking kprobes on the module */
1173 static int __kprobes kprobes_module_callback(struct notifier_block *nb,
1174                                              unsigned long val, void *data)
1175 {
1176         struct module *mod = data;
1177         struct hlist_head *head;
1178         struct hlist_node *node;
1179         struct kprobe *p;
1180         unsigned int i;
1181         int checkcore = (val == MODULE_STATE_GOING);
1182
1183         if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE)
1184                 return NOTIFY_DONE;
1185
1186         /*
1187          * When MODULE_STATE_GOING was notified, both of module .text and
1188          * .init.text sections would be freed. When MODULE_STATE_LIVE was
1189          * notified, only .init.text section would be freed. We need to
1190          * disable kprobes which have been inserted in the sections.
1191          */
1192         mutex_lock(&kprobe_mutex);
1193         for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
1194                 head = &kprobe_table[i];
1195                 hlist_for_each_entry_rcu(p, node, head, hlist)
1196                         if (within_module_init((unsigned long)p->addr, mod) ||
1197                             (checkcore &&
1198                              within_module_core((unsigned long)p->addr, mod))) {
1199                                 /*
1200                                  * The vaddr this probe is installed will soon
1201                                  * be vfreed buy not synced to disk. Hence,
1202                                  * disarming the breakpoint isn't needed.
1203                                  */
1204                                 kill_kprobe(p);
1205                         }
1206         }
1207         mutex_unlock(&kprobe_mutex);
1208         return NOTIFY_DONE;
1209 }
1210
1211 static struct notifier_block kprobe_module_nb = {
1212         .notifier_call = kprobes_module_callback,
1213         .priority = 0
1214 };
1215
1216 static int __init init_kprobes(void)
1217 {
1218         int i, err = 0;
1219         unsigned long offset = 0, size = 0;
1220         char *modname, namebuf[128];
1221         const char *symbol_name;
1222         void *addr;
1223         struct kprobe_blackpoint *kb;
1224
1225         /* FIXME allocate the probe table, currently defined statically */
1226         /* initialize all list heads */
1227         for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
1228                 INIT_HLIST_HEAD(&kprobe_table[i]);
1229                 INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
1230                 spin_lock_init(&(kretprobe_table_locks[i].lock));
1231         }
1232
1233         /*
1234          * Lookup and populate the kprobe_blacklist.
1235          *
1236          * Unlike the kretprobe blacklist, we'll need to determine
1237          * the range of addresses that belong to the said functions,
1238          * since a kprobe need not necessarily be at the beginning
1239          * of a function.
1240          */
1241         for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
1242                 kprobe_lookup_name(kb->name, addr);
1243                 if (!addr)
1244                         continue;
1245
1246                 kb->start_addr = (unsigned long)addr;
1247                 symbol_name = kallsyms_lookup(kb->start_addr,
1248                                 &size, &offset, &modname, namebuf);
1249                 if (!symbol_name)
1250                         kb->range = 0;
1251                 else
1252                         kb->range = size;
1253         }
1254
1255         if (kretprobe_blacklist_size) {
1256                 /* lookup the function address from its name */
1257                 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
1258                         kprobe_lookup_name(kretprobe_blacklist[i].name,
1259                                            kretprobe_blacklist[i].addr);
1260                         if (!kretprobe_blacklist[i].addr)
1261                                 printk("kretprobe: lookup failed: %s\n",
1262                                        kretprobe_blacklist[i].name);
1263                 }
1264         }
1265
1266         /* By default, kprobes are armed */
1267         kprobes_all_disarmed = false;
1268
1269         err = arch_init_kprobes();
1270         if (!err)
1271                 err = register_die_notifier(&kprobe_exceptions_nb);
1272         if (!err)
1273                 err = register_module_notifier(&kprobe_module_nb);
1274
1275         kprobes_initialized = (err == 0);
1276
1277         if (!err)
1278                 init_test_probes();
1279         return err;
1280 }
1281
1282 #ifdef CONFIG_DEBUG_FS
1283 static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
1284                 const char *sym, int offset,char *modname)
1285 {
1286         char *kprobe_type;
1287
1288         if (p->pre_handler == pre_handler_kretprobe)
1289                 kprobe_type = "r";
1290         else if (p->pre_handler == setjmp_pre_handler)
1291                 kprobe_type = "j";
1292         else
1293                 kprobe_type = "k";
1294         if (sym)
1295                 seq_printf(pi, "%p  %s  %s+0x%x  %s %s%s\n",
1296                         p->addr, kprobe_type, sym, offset,
1297                         (modname ? modname : " "),
1298                         (kprobe_gone(p) ? "[GONE]" : ""),
1299                         ((kprobe_disabled(p) && !kprobe_gone(p)) ?
1300                          "[DISABLED]" : ""));
1301         else
1302                 seq_printf(pi, "%p  %s  %p %s%s\n",
1303                         p->addr, kprobe_type, p->addr,
1304                         (kprobe_gone(p) ? "[GONE]" : ""),
1305                         ((kprobe_disabled(p) && !kprobe_gone(p)) ?
1306                          "[DISABLED]" : ""));
1307 }
1308
1309 static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos)
1310 {
1311         return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
1312 }
1313
1314 static void __kprobes *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos)
1315 {
1316         (*pos)++;
1317         if (*pos >= KPROBE_TABLE_SIZE)
1318                 return NULL;
1319         return pos;
1320 }
1321
1322 static void __kprobes kprobe_seq_stop(struct seq_file *f, void *v)
1323 {
1324         /* Nothing to do */
1325 }
1326
1327 static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
1328 {
1329         struct hlist_head *head;
1330         struct hlist_node *node;
1331         struct kprobe *p, *kp;
1332         const char *sym = NULL;
1333         unsigned int i = *(loff_t *) v;
1334         unsigned long offset = 0;
1335         char *modname, namebuf[128];
1336
1337         head = &kprobe_table[i];
1338         preempt_disable();
1339         hlist_for_each_entry_rcu(p, node, head, hlist) {
1340                 sym = kallsyms_lookup((unsigned long)p->addr, NULL,
1341                                         &offset, &modname, namebuf);
1342                 if (p->pre_handler == aggr_pre_handler) {
1343                         list_for_each_entry_rcu(kp, &p->list, list)
1344                                 report_probe(pi, kp, sym, offset, modname);
1345                 } else
1346                         report_probe(pi, p, sym, offset, modname);
1347         }
1348         preempt_enable();
1349         return 0;
1350 }
1351
1352 static const struct seq_operations kprobes_seq_ops = {
1353         .start = kprobe_seq_start,
1354         .next  = kprobe_seq_next,
1355         .stop  = kprobe_seq_stop,
1356         .show  = show_kprobe_addr
1357 };
1358
1359 static int __kprobes kprobes_open(struct inode *inode, struct file *filp)
1360 {
1361         return seq_open(filp, &kprobes_seq_ops);
1362 }
1363
1364 static const struct file_operations debugfs_kprobes_operations = {
1365         .open           = kprobes_open,
1366         .read           = seq_read,
1367         .llseek         = seq_lseek,
1368         .release        = seq_release,
1369 };
1370
1371 /* Disable one kprobe */
1372 int __kprobes disable_kprobe(struct kprobe *kp)
1373 {
1374         int ret = 0;
1375         struct kprobe *p;
1376
1377         mutex_lock(&kprobe_mutex);
1378
1379         /* Check whether specified probe is valid. */
1380         p = __get_valid_kprobe(kp);
1381         if (unlikely(p == NULL)) {
1382                 ret = -EINVAL;
1383                 goto out;
1384         }
1385
1386         /* If the probe is already disabled (or gone), just return */
1387         if (kprobe_disabled(kp))
1388                 goto out;
1389
1390         kp->flags |= KPROBE_FLAG_DISABLED;
1391         if (p != kp)
1392                 /* When kp != p, p is always enabled. */
1393                 try_to_disable_aggr_kprobe(p);
1394
1395         if (!kprobes_all_disarmed && kprobe_disabled(p))
1396                 disarm_kprobe(p);
1397 out:
1398         mutex_unlock(&kprobe_mutex);
1399         return ret;
1400 }
1401 EXPORT_SYMBOL_GPL(disable_kprobe);
1402
1403 /* Enable one kprobe */
1404 int __kprobes enable_kprobe(struct kprobe *kp)
1405 {
1406         int ret = 0;
1407         struct kprobe *p;
1408
1409         mutex_lock(&kprobe_mutex);
1410
1411         /* Check whether specified probe is valid. */
1412         p = __get_valid_kprobe(kp);
1413         if (unlikely(p == NULL)) {
1414                 ret = -EINVAL;
1415                 goto out;
1416         }
1417
1418         if (kprobe_gone(kp)) {
1419                 /* This kprobe has gone, we couldn't enable it. */
1420                 ret = -EINVAL;
1421                 goto out;
1422         }
1423
1424         if (!kprobes_all_disarmed && kprobe_disabled(p))
1425                 arm_kprobe(p);
1426
1427         p->flags &= ~KPROBE_FLAG_DISABLED;
1428         if (p != kp)
1429                 kp->flags &= ~KPROBE_FLAG_DISABLED;
1430 out:
1431         mutex_unlock(&kprobe_mutex);
1432         return ret;
1433 }
1434 EXPORT_SYMBOL_GPL(enable_kprobe);
1435
1436 static void __kprobes arm_all_kprobes(void)
1437 {
1438         struct hlist_head *head;
1439         struct hlist_node *node;
1440         struct kprobe *p;
1441         unsigned int i;
1442
1443         mutex_lock(&kprobe_mutex);
1444
1445         /* If kprobes are armed, just return */
1446         if (!kprobes_all_disarmed)
1447                 goto already_enabled;
1448
1449         mutex_lock(&text_mutex);
1450         for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
1451                 head = &kprobe_table[i];
1452                 hlist_for_each_entry_rcu(p, node, head, hlist)
1453                         if (!kprobe_disabled(p))
1454                                 arch_arm_kprobe(p);
1455         }
1456         mutex_unlock(&text_mutex);
1457
1458         kprobes_all_disarmed = false;
1459         printk(KERN_INFO "Kprobes globally enabled\n");
1460
1461 already_enabled:
1462         mutex_unlock(&kprobe_mutex);
1463         return;
1464 }
1465
1466 static void __kprobes disarm_all_kprobes(void)
1467 {
1468         struct hlist_head *head;
1469         struct hlist_node *node;
1470         struct kprobe *p;
1471         unsigned int i;
1472
1473         mutex_lock(&kprobe_mutex);
1474
1475         /* If kprobes are already disarmed, just return */
1476         if (kprobes_all_disarmed)
1477                 goto already_disabled;
1478
1479         kprobes_all_disarmed = true;
1480         printk(KERN_INFO "Kprobes globally disabled\n");
1481         mutex_lock(&text_mutex);
1482         for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
1483                 head = &kprobe_table[i];
1484                 hlist_for_each_entry_rcu(p, node, head, hlist) {
1485                         if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p))
1486                                 arch_disarm_kprobe(p);
1487                 }
1488         }
1489
1490         mutex_unlock(&text_mutex);
1491         mutex_unlock(&kprobe_mutex);
1492         /* Allow all currently running kprobes to complete */
1493         synchronize_sched();
1494         return;
1495
1496 already_disabled:
1497         mutex_unlock(&kprobe_mutex);
1498         return;
1499 }
1500
1501 /*
1502  * XXX: The debugfs bool file interface doesn't allow for callbacks
1503  * when the bool state is switched. We can reuse that facility when
1504  * available
1505  */
1506 static ssize_t read_enabled_file_bool(struct file *file,
1507                char __user *user_buf, size_t count, loff_t *ppos)
1508 {
1509         char buf[3];
1510
1511         if (!kprobes_all_disarmed)
1512                 buf[0] = '1';
1513         else
1514                 buf[0] = '0';
1515         buf[1] = '\n';
1516         buf[2] = 0x00;
1517         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
1518 }
1519
1520 static ssize_t write_enabled_file_bool(struct file *file,
1521                const char __user *user_buf, size_t count, loff_t *ppos)
1522 {
1523         char buf[32];
1524         int buf_size;
1525
1526         buf_size = min(count, (sizeof(buf)-1));
1527         if (copy_from_user(buf, user_buf, buf_size))
1528                 return -EFAULT;
1529
1530         switch (buf[0]) {
1531         case 'y':
1532         case 'Y':
1533         case '1':
1534                 arm_all_kprobes();
1535                 break;
1536         case 'n':
1537         case 'N':
1538         case '0':
1539                 disarm_all_kprobes();
1540                 break;
1541         }
1542
1543         return count;
1544 }
1545
1546 static const struct file_operations fops_kp = {
1547         .read =         read_enabled_file_bool,
1548         .write =        write_enabled_file_bool,
1549 };
1550
1551 static int __kprobes debugfs_kprobe_init(void)
1552 {
1553         struct dentry *dir, *file;
1554         unsigned int value = 1;
1555
1556         dir = debugfs_create_dir("kprobes", NULL);
1557         if (!dir)
1558                 return -ENOMEM;
1559
1560         file = debugfs_create_file("list", 0444, dir, NULL,
1561                                 &debugfs_kprobes_operations);
1562         if (!file) {
1563                 debugfs_remove(dir);
1564                 return -ENOMEM;
1565         }
1566
1567         file = debugfs_create_file("enabled", 0600, dir,
1568                                         &value, &fops_kp);
1569         if (!file) {
1570                 debugfs_remove(dir);
1571                 return -ENOMEM;
1572         }
1573
1574         return 0;
1575 }
1576
1577 late_initcall(debugfs_kprobe_init);
1578 #endif /* CONFIG_DEBUG_FS */
1579
1580 module_init(init_kprobes);
1581
1582 /* defined in arch/.../kernel/kprobes.c */
1583 EXPORT_SYMBOL_GPL(jprobe_return);