Merge branches 'acpi-scan', 'acpi-resource', 'acpi-apei', 'acpi-extlog' and 'acpi...
[sfrench/cifs-2.6.git] / kernel / debug / debug_core.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Kernel Debug Core
4  *
5  * Maintainer: Jason Wessel <jason.wessel@windriver.com>
6  *
7  * Copyright (C) 2000-2001 VERITAS Software Corporation.
8  * Copyright (C) 2002-2004 Timesys Corporation
9  * Copyright (C) 2003-2004 Amit S. Kale <amitkale@linsyssoft.com>
10  * Copyright (C) 2004 Pavel Machek <pavel@ucw.cz>
11  * Copyright (C) 2004-2006 Tom Rini <trini@kernel.crashing.org>
12  * Copyright (C) 2004-2006 LinSysSoft Technologies Pvt. Ltd.
13  * Copyright (C) 2005-2009 Wind River Systems, Inc.
14  * Copyright (C) 2007 MontaVista Software, Inc.
15  * Copyright (C) 2008 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
16  *
17  * Contributors at various stages not listed above:
18  *  Jason Wessel ( jason.wessel@windriver.com )
19  *  George Anzinger <george@mvista.com>
20  *  Anurekh Saxena (anurekh.saxena@timesys.com)
21  *  Lake Stevens Instrument Division (Glenn Engel)
22  *  Jim Kingdon, Cygnus Support.
23  *
24  * Original KGDB stub: David Grothe <dave@gcom.com>,
25  * Tigran Aivazian <tigran@sco.com>
26  */
27
28 #define pr_fmt(fmt) "KGDB: " fmt
29
30 #include <linux/pid_namespace.h>
31 #include <linux/clocksource.h>
32 #include <linux/serial_core.h>
33 #include <linux/interrupt.h>
34 #include <linux/spinlock.h>
35 #include <linux/console.h>
36 #include <linux/threads.h>
37 #include <linux/uaccess.h>
38 #include <linux/kernel.h>
39 #include <linux/module.h>
40 #include <linux/ptrace.h>
41 #include <linux/string.h>
42 #include <linux/delay.h>
43 #include <linux/sched.h>
44 #include <linux/sysrq.h>
45 #include <linux/reboot.h>
46 #include <linux/init.h>
47 #include <linux/kgdb.h>
48 #include <linux/kdb.h>
49 #include <linux/nmi.h>
50 #include <linux/pid.h>
51 #include <linux/smp.h>
52 #include <linux/mm.h>
53 #include <linux/rcupdate.h>
54 #include <linux/irq.h>
55 #include <linux/security.h>
56
57 #include <asm/cacheflush.h>
58 #include <asm/byteorder.h>
59 #include <linux/atomic.h>
60
61 #include "debug_core.h"
62
63 static int kgdb_break_asap;
64
65 struct debuggerinfo_struct kgdb_info[NR_CPUS];
66
67 /* kgdb_connected - Is a host GDB connected to us? */
68 int                             kgdb_connected;
69 EXPORT_SYMBOL_GPL(kgdb_connected);
70
71 /* All the KGDB handlers are installed */
72 int                     kgdb_io_module_registered;
73
74 /* Guard for recursive entry */
75 static int                      exception_level;
76
77 struct kgdb_io          *dbg_io_ops;
78 static DEFINE_SPINLOCK(kgdb_registration_lock);
79
80 /* Action for the reboot notifier, a global allow kdb to change it */
81 static int kgdbreboot;
82 /* kgdb console driver is loaded */
83 static int kgdb_con_registered;
84 /* determine if kgdb console output should be used */
85 static int kgdb_use_con;
86 /* Flag for alternate operations for early debugging */
87 bool dbg_is_early = true;
88 /* Next cpu to become the master debug core */
89 int dbg_switch_cpu;
90
91 /* Use kdb or gdbserver mode */
92 int dbg_kdb_mode = 1;
93
94 module_param(kgdb_use_con, int, 0644);
95 module_param(kgdbreboot, int, 0644);
96
97 /*
98  * Holds information about breakpoints in a kernel. These breakpoints are
99  * added and removed by gdb.
100  */
101 static struct kgdb_bkpt         kgdb_break[KGDB_MAX_BREAKPOINTS] = {
102         [0 ... KGDB_MAX_BREAKPOINTS-1] = { .state = BP_UNDEFINED }
103 };
104
105 /*
106  * The CPU# of the active CPU, or -1 if none:
107  */
108 atomic_t                        kgdb_active = ATOMIC_INIT(-1);
109 EXPORT_SYMBOL_GPL(kgdb_active);
110 static DEFINE_RAW_SPINLOCK(dbg_master_lock);
111 static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
112
113 /*
114  * We use NR_CPUs not PERCPU, in case kgdb is used to debug early
115  * bootup code (which might not have percpu set up yet):
116  */
117 static atomic_t                 masters_in_kgdb;
118 static atomic_t                 slaves_in_kgdb;
119 atomic_t                        kgdb_setting_breakpoint;
120
121 struct task_struct              *kgdb_usethread;
122 struct task_struct              *kgdb_contthread;
123
124 int                             kgdb_single_step;
125 static pid_t                    kgdb_sstep_pid;
126
127 /* to keep track of the CPU which is doing the single stepping*/
128 atomic_t                        kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
129
130 /*
131  * If you are debugging a problem where roundup (the collection of
132  * all other CPUs) is a problem [this should be extremely rare],
133  * then use the nokgdbroundup option to avoid roundup. In that case
134  * the other CPUs might interfere with your debugging context, so
135  * use this with care:
136  */
137 static int kgdb_do_roundup = 1;
138
139 static int __init opt_nokgdbroundup(char *str)
140 {
141         kgdb_do_roundup = 0;
142
143         return 0;
144 }
145
146 early_param("nokgdbroundup", opt_nokgdbroundup);
147
148 /*
149  * Finally, some KGDB code :-)
150  */
151
152 /*
153  * Weak aliases for breakpoint management,
154  * can be overridden by architectures when needed:
155  */
156 int __weak kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
157 {
158         int err;
159
160         err = copy_from_kernel_nofault(bpt->saved_instr, (char *)bpt->bpt_addr,
161                                 BREAK_INSTR_SIZE);
162         if (err)
163                 return err;
164         err = copy_to_kernel_nofault((char *)bpt->bpt_addr,
165                                  arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
166         return err;
167 }
168 NOKPROBE_SYMBOL(kgdb_arch_set_breakpoint);
169
170 int __weak kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
171 {
172         return copy_to_kernel_nofault((char *)bpt->bpt_addr,
173                                   (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
174 }
175 NOKPROBE_SYMBOL(kgdb_arch_remove_breakpoint);
176
177 int __weak kgdb_validate_break_address(unsigned long addr)
178 {
179         struct kgdb_bkpt tmp;
180         int err;
181
182         if (kgdb_within_blocklist(addr))
183                 return -EINVAL;
184
185         /* Validate setting the breakpoint and then removing it.  If the
186          * remove fails, the kernel needs to emit a bad message because we
187          * are deep trouble not being able to put things back the way we
188          * found them.
189          */
190         tmp.bpt_addr = addr;
191         err = kgdb_arch_set_breakpoint(&tmp);
192         if (err)
193                 return err;
194         err = kgdb_arch_remove_breakpoint(&tmp);
195         if (err)
196                 pr_err("Critical breakpoint error, kernel memory destroyed at: %lx\n",
197                        addr);
198         return err;
199 }
200
201 unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs)
202 {
203         return instruction_pointer(regs);
204 }
205 NOKPROBE_SYMBOL(kgdb_arch_pc);
206
207 int __weak kgdb_arch_init(void)
208 {
209         return 0;
210 }
211
212 int __weak kgdb_skipexception(int exception, struct pt_regs *regs)
213 {
214         return 0;
215 }
216 NOKPROBE_SYMBOL(kgdb_skipexception);
217
218 #ifdef CONFIG_SMP
219
220 /*
221  * Default (weak) implementation for kgdb_roundup_cpus
222  */
223
224 void __weak kgdb_call_nmi_hook(void *ignored)
225 {
226         /*
227          * NOTE: get_irq_regs() is supposed to get the registers from
228          * before the IPI interrupt happened and so is supposed to
229          * show where the processor was.  In some situations it's
230          * possible we might be called without an IPI, so it might be
231          * safer to figure out how to make kgdb_breakpoint() work
232          * properly here.
233          */
234         kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs());
235 }
236 NOKPROBE_SYMBOL(kgdb_call_nmi_hook);
237
238 static DEFINE_PER_CPU(call_single_data_t, kgdb_roundup_csd) =
239         CSD_INIT(kgdb_call_nmi_hook, NULL);
240
241 void __weak kgdb_roundup_cpus(void)
242 {
243         call_single_data_t *csd;
244         int this_cpu = raw_smp_processor_id();
245         int cpu;
246         int ret;
247
248         for_each_online_cpu(cpu) {
249                 /* No need to roundup ourselves */
250                 if (cpu == this_cpu)
251                         continue;
252
253                 csd = &per_cpu(kgdb_roundup_csd, cpu);
254
255                 /*
256                  * If it didn't round up last time, don't try again
257                  * since smp_call_function_single_async() will block.
258                  *
259                  * If rounding_up is false then we know that the
260                  * previous call must have at least started and that
261                  * means smp_call_function_single_async() won't block.
262                  */
263                 if (kgdb_info[cpu].rounding_up)
264                         continue;
265                 kgdb_info[cpu].rounding_up = true;
266
267                 ret = smp_call_function_single_async(cpu, csd);
268                 if (ret)
269                         kgdb_info[cpu].rounding_up = false;
270         }
271 }
272 NOKPROBE_SYMBOL(kgdb_roundup_cpus);
273
274 #endif
275
276 /*
277  * Some architectures need cache flushes when we set/clear a
278  * breakpoint:
279  */
280 static void kgdb_flush_swbreak_addr(unsigned long addr)
281 {
282         if (!CACHE_FLUSH_IS_SAFE)
283                 return;
284
285         /* Force flush instruction cache if it was outside the mm */
286         flush_icache_range(addr, addr + BREAK_INSTR_SIZE);
287 }
288 NOKPROBE_SYMBOL(kgdb_flush_swbreak_addr);
289
290 /*
291  * SW breakpoint management:
292  */
293 int dbg_activate_sw_breakpoints(void)
294 {
295         int error;
296         int ret = 0;
297         int i;
298
299         for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
300                 if (kgdb_break[i].state != BP_SET)
301                         continue;
302
303                 error = kgdb_arch_set_breakpoint(&kgdb_break[i]);
304                 if (error) {
305                         ret = error;
306                         pr_info("BP install failed: %lx\n",
307                                 kgdb_break[i].bpt_addr);
308                         continue;
309                 }
310
311                 kgdb_flush_swbreak_addr(kgdb_break[i].bpt_addr);
312                 kgdb_break[i].state = BP_ACTIVE;
313         }
314         return ret;
315 }
316 NOKPROBE_SYMBOL(dbg_activate_sw_breakpoints);
317
318 int dbg_set_sw_break(unsigned long addr)
319 {
320         int err = kgdb_validate_break_address(addr);
321         int breakno = -1;
322         int i;
323
324         if (err)
325                 return err;
326
327         for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
328                 if ((kgdb_break[i].state == BP_SET) &&
329                                         (kgdb_break[i].bpt_addr == addr))
330                         return -EEXIST;
331         }
332         for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
333                 if (kgdb_break[i].state == BP_REMOVED &&
334                                         kgdb_break[i].bpt_addr == addr) {
335                         breakno = i;
336                         break;
337                 }
338         }
339
340         if (breakno == -1) {
341                 for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
342                         if (kgdb_break[i].state == BP_UNDEFINED) {
343                                 breakno = i;
344                                 break;
345                         }
346                 }
347         }
348
349         if (breakno == -1)
350                 return -E2BIG;
351
352         kgdb_break[breakno].state = BP_SET;
353         kgdb_break[breakno].type = BP_BREAKPOINT;
354         kgdb_break[breakno].bpt_addr = addr;
355
356         return 0;
357 }
358
359 int dbg_deactivate_sw_breakpoints(void)
360 {
361         int error;
362         int ret = 0;
363         int i;
364
365         for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
366                 if (kgdb_break[i].state != BP_ACTIVE)
367                         continue;
368                 error = kgdb_arch_remove_breakpoint(&kgdb_break[i]);
369                 if (error) {
370                         pr_info("BP remove failed: %lx\n",
371                                 kgdb_break[i].bpt_addr);
372                         ret = error;
373                 }
374
375                 kgdb_flush_swbreak_addr(kgdb_break[i].bpt_addr);
376                 kgdb_break[i].state = BP_SET;
377         }
378         return ret;
379 }
380 NOKPROBE_SYMBOL(dbg_deactivate_sw_breakpoints);
381
382 int dbg_remove_sw_break(unsigned long addr)
383 {
384         int i;
385
386         for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
387                 if ((kgdb_break[i].state == BP_SET) &&
388                                 (kgdb_break[i].bpt_addr == addr)) {
389                         kgdb_break[i].state = BP_REMOVED;
390                         return 0;
391                 }
392         }
393         return -ENOENT;
394 }
395
396 int kgdb_isremovedbreak(unsigned long addr)
397 {
398         int i;
399
400         for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
401                 if ((kgdb_break[i].state == BP_REMOVED) &&
402                                         (kgdb_break[i].bpt_addr == addr))
403                         return 1;
404         }
405         return 0;
406 }
407
408 int kgdb_has_hit_break(unsigned long addr)
409 {
410         int i;
411
412         for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
413                 if (kgdb_break[i].state == BP_ACTIVE &&
414                     kgdb_break[i].bpt_addr == addr)
415                         return 1;
416         }
417         return 0;
418 }
419
420 int dbg_remove_all_break(void)
421 {
422         int error;
423         int i;
424
425         /* Clear memory breakpoints. */
426         for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
427                 if (kgdb_break[i].state != BP_ACTIVE)
428                         goto setundefined;
429                 error = kgdb_arch_remove_breakpoint(&kgdb_break[i]);
430                 if (error)
431                         pr_err("breakpoint remove failed: %lx\n",
432                                kgdb_break[i].bpt_addr);
433 setundefined:
434                 kgdb_break[i].state = BP_UNDEFINED;
435         }
436
437         /* Clear hardware breakpoints. */
438         if (arch_kgdb_ops.remove_all_hw_break)
439                 arch_kgdb_ops.remove_all_hw_break();
440
441         return 0;
442 }
443
444 void kgdb_free_init_mem(void)
445 {
446         int i;
447
448         /* Clear init memory breakpoints. */
449         for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
450                 if (init_section_contains((void *)kgdb_break[i].bpt_addr, 0))
451                         kgdb_break[i].state = BP_UNDEFINED;
452         }
453 }
454
455 #ifdef CONFIG_KGDB_KDB
456 void kdb_dump_stack_on_cpu(int cpu)
457 {
458         if (cpu == raw_smp_processor_id() || !IS_ENABLED(CONFIG_SMP)) {
459                 dump_stack();
460                 return;
461         }
462
463         if (!(kgdb_info[cpu].exception_state & DCPU_IS_SLAVE)) {
464                 kdb_printf("ERROR: Task on cpu %d didn't stop in the debugger\n",
465                            cpu);
466                 return;
467         }
468
469         /*
470          * In general, architectures don't support dumping the stack of a
471          * "running" process that's not the current one.  From the point of
472          * view of the Linux, kernel processes that are looping in the kgdb
473          * slave loop are still "running".  There's also no API (that actually
474          * works across all architectures) that can do a stack crawl based
475          * on registers passed as a parameter.
476          *
477          * Solve this conundrum by asking slave CPUs to do the backtrace
478          * themselves.
479          */
480         kgdb_info[cpu].exception_state |= DCPU_WANT_BT;
481         while (kgdb_info[cpu].exception_state & DCPU_WANT_BT)
482                 cpu_relax();
483 }
484 #endif
485
486 /*
487  * Return true if there is a valid kgdb I/O module.  Also if no
488  * debugger is attached a message can be printed to the console about
489  * waiting for the debugger to attach.
490  *
491  * The print_wait argument is only to be true when called from inside
492  * the core kgdb_handle_exception, because it will wait for the
493  * debugger to attach.
494  */
495 static int kgdb_io_ready(int print_wait)
496 {
497         if (!dbg_io_ops)
498                 return 0;
499         if (kgdb_connected)
500                 return 1;
501         if (atomic_read(&kgdb_setting_breakpoint))
502                 return 1;
503         if (print_wait) {
504 #ifdef CONFIG_KGDB_KDB
505                 if (!dbg_kdb_mode)
506                         pr_crit("waiting... or $3#33 for KDB\n");
507 #else
508                 pr_crit("Waiting for remote debugger\n");
509 #endif
510         }
511         return 1;
512 }
513 NOKPROBE_SYMBOL(kgdb_io_ready);
514
515 static int kgdb_reenter_check(struct kgdb_state *ks)
516 {
517         unsigned long addr;
518
519         if (atomic_read(&kgdb_active) != raw_smp_processor_id())
520                 return 0;
521
522         /* Panic on recursive debugger calls: */
523         exception_level++;
524         addr = kgdb_arch_pc(ks->ex_vector, ks->linux_regs);
525         dbg_deactivate_sw_breakpoints();
526
527         /*
528          * If the break point removed ok at the place exception
529          * occurred, try to recover and print a warning to the end
530          * user because the user planted a breakpoint in a place that
531          * KGDB needs in order to function.
532          */
533         if (dbg_remove_sw_break(addr) == 0) {
534                 exception_level = 0;
535                 kgdb_skipexception(ks->ex_vector, ks->linux_regs);
536                 dbg_activate_sw_breakpoints();
537                 pr_crit("re-enter error: breakpoint removed %lx\n", addr);
538                 WARN_ON_ONCE(1);
539
540                 return 1;
541         }
542         dbg_remove_all_break();
543         kgdb_skipexception(ks->ex_vector, ks->linux_regs);
544
545         if (exception_level > 1) {
546                 dump_stack();
547                 kgdb_io_module_registered = false;
548                 panic("Recursive entry to debugger");
549         }
550
551         pr_crit("re-enter exception: ALL breakpoints killed\n");
552 #ifdef CONFIG_KGDB_KDB
553         /* Allow kdb to debug itself one level */
554         return 0;
555 #endif
556         dump_stack();
557         panic("Recursive entry to debugger");
558
559         return 1;
560 }
561 NOKPROBE_SYMBOL(kgdb_reenter_check);
562
563 static void dbg_touch_watchdogs(void)
564 {
565         touch_softlockup_watchdog_sync();
566         clocksource_touch_watchdog();
567         rcu_cpu_stall_reset();
568 }
569 NOKPROBE_SYMBOL(dbg_touch_watchdogs);
570
571 static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs,
572                 int exception_state)
573 {
574         unsigned long flags;
575         int sstep_tries = 100;
576         int error;
577         int cpu;
578         int trace_on = 0;
579         int online_cpus = num_online_cpus();
580         u64 time_left;
581
582         kgdb_info[ks->cpu].enter_kgdb++;
583         kgdb_info[ks->cpu].exception_state |= exception_state;
584
585         if (exception_state == DCPU_WANT_MASTER)
586                 atomic_inc(&masters_in_kgdb);
587         else
588                 atomic_inc(&slaves_in_kgdb);
589
590         if (arch_kgdb_ops.disable_hw_break)
591                 arch_kgdb_ops.disable_hw_break(regs);
592
593 acquirelock:
594         rcu_read_lock();
595         /*
596          * Interrupts will be restored by the 'trap return' code, except when
597          * single stepping.
598          */
599         local_irq_save(flags);
600
601         cpu = ks->cpu;
602         kgdb_info[cpu].debuggerinfo = regs;
603         kgdb_info[cpu].task = current;
604         kgdb_info[cpu].ret_state = 0;
605         kgdb_info[cpu].irq_depth = hardirq_count() >> HARDIRQ_SHIFT;
606
607         /* Make sure the above info reaches the primary CPU */
608         smp_mb();
609
610         if (exception_level == 1) {
611                 if (raw_spin_trylock(&dbg_master_lock))
612                         atomic_xchg(&kgdb_active, cpu);
613                 goto cpu_master_loop;
614         }
615
616         /*
617          * CPU will loop if it is a slave or request to become a kgdb
618          * master cpu and acquire the kgdb_active lock:
619          */
620         while (1) {
621 cpu_loop:
622                 if (kgdb_info[cpu].exception_state & DCPU_NEXT_MASTER) {
623                         kgdb_info[cpu].exception_state &= ~DCPU_NEXT_MASTER;
624                         goto cpu_master_loop;
625                 } else if (kgdb_info[cpu].exception_state & DCPU_WANT_MASTER) {
626                         if (raw_spin_trylock(&dbg_master_lock)) {
627                                 atomic_xchg(&kgdb_active, cpu);
628                                 break;
629                         }
630                 } else if (kgdb_info[cpu].exception_state & DCPU_WANT_BT) {
631                         dump_stack();
632                         kgdb_info[cpu].exception_state &= ~DCPU_WANT_BT;
633                 } else if (kgdb_info[cpu].exception_state & DCPU_IS_SLAVE) {
634                         if (!raw_spin_is_locked(&dbg_slave_lock))
635                                 goto return_normal;
636                 } else {
637 return_normal:
638                         /* Return to normal operation by executing any
639                          * hw breakpoint fixup.
640                          */
641                         if (arch_kgdb_ops.correct_hw_break)
642                                 arch_kgdb_ops.correct_hw_break();
643                         if (trace_on)
644                                 tracing_on();
645                         kgdb_info[cpu].debuggerinfo = NULL;
646                         kgdb_info[cpu].task = NULL;
647                         kgdb_info[cpu].exception_state &=
648                                 ~(DCPU_WANT_MASTER | DCPU_IS_SLAVE);
649                         kgdb_info[cpu].enter_kgdb--;
650                         smp_mb__before_atomic();
651                         atomic_dec(&slaves_in_kgdb);
652                         dbg_touch_watchdogs();
653                         local_irq_restore(flags);
654                         rcu_read_unlock();
655                         return 0;
656                 }
657                 cpu_relax();
658         }
659
660         /*
661          * For single stepping, try to only enter on the processor
662          * that was single stepping.  To guard against a deadlock, the
663          * kernel will only try for the value of sstep_tries before
664          * giving up and continuing on.
665          */
666         if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
667             (kgdb_info[cpu].task &&
668              kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
669                 atomic_set(&kgdb_active, -1);
670                 raw_spin_unlock(&dbg_master_lock);
671                 dbg_touch_watchdogs();
672                 local_irq_restore(flags);
673                 rcu_read_unlock();
674
675                 goto acquirelock;
676         }
677
678         if (!kgdb_io_ready(1)) {
679                 kgdb_info[cpu].ret_state = 1;
680                 goto kgdb_restore; /* No I/O connection, resume the system */
681         }
682
683         /*
684          * Don't enter if we have hit a removed breakpoint.
685          */
686         if (kgdb_skipexception(ks->ex_vector, ks->linux_regs))
687                 goto kgdb_restore;
688
689         atomic_inc(&ignore_console_lock_warning);
690
691         /* Call the I/O driver's pre_exception routine */
692         if (dbg_io_ops->pre_exception)
693                 dbg_io_ops->pre_exception();
694
695         /*
696          * Get the passive CPU lock which will hold all the non-primary
697          * CPU in a spin state while the debugger is active
698          */
699         if (!kgdb_single_step)
700                 raw_spin_lock(&dbg_slave_lock);
701
702 #ifdef CONFIG_SMP
703         /* If send_ready set, slaves are already waiting */
704         if (ks->send_ready)
705                 atomic_set(ks->send_ready, 1);
706
707         /* Signal the other CPUs to enter kgdb_wait() */
708         else if ((!kgdb_single_step) && kgdb_do_roundup)
709                 kgdb_roundup_cpus();
710 #endif
711
712         /*
713          * Wait for the other CPUs to be notified and be waiting for us:
714          */
715         time_left = MSEC_PER_SEC;
716         while (kgdb_do_roundup && --time_left &&
717                (atomic_read(&masters_in_kgdb) + atomic_read(&slaves_in_kgdb)) !=
718                    online_cpus)
719                 udelay(1000);
720         if (!time_left)
721                 pr_crit("Timed out waiting for secondary CPUs.\n");
722
723         /*
724          * At this point the primary processor is completely
725          * in the debugger and all secondary CPUs are quiescent
726          */
727         dbg_deactivate_sw_breakpoints();
728         kgdb_single_step = 0;
729         kgdb_contthread = current;
730         exception_level = 0;
731         trace_on = tracing_is_on();
732         if (trace_on)
733                 tracing_off();
734
735         while (1) {
736 cpu_master_loop:
737                 if (dbg_kdb_mode) {
738                         kgdb_connected = 1;
739                         error = kdb_stub(ks);
740                         if (error == -1)
741                                 continue;
742                         kgdb_connected = 0;
743                 } else {
744                         /*
745                          * This is a brutal way to interfere with the debugger
746                          * and prevent gdb being used to poke at kernel memory.
747                          * This could cause trouble if lockdown is applied when
748                          * there is already an active gdb session. For now the
749                          * answer is simply "don't do that". Typically lockdown
750                          * *will* be applied before the debug core gets started
751                          * so only developers using kgdb for fairly advanced
752                          * early kernel debug can be biten by this. Hopefully
753                          * they are sophisticated enough to take care of
754                          * themselves, especially with help from the lockdown
755                          * message printed on the console!
756                          */
757                         if (security_locked_down(LOCKDOWN_DBG_WRITE_KERNEL)) {
758                                 if (IS_ENABLED(CONFIG_KGDB_KDB)) {
759                                         /* Switch back to kdb if possible... */
760                                         dbg_kdb_mode = 1;
761                                         continue;
762                                 } else {
763                                         /* ... otherwise just bail */
764                                         break;
765                                 }
766                         }
767                         error = gdb_serial_stub(ks);
768                 }
769
770                 if (error == DBG_PASS_EVENT) {
771                         dbg_kdb_mode = !dbg_kdb_mode;
772                 } else if (error == DBG_SWITCH_CPU_EVENT) {
773                         kgdb_info[dbg_switch_cpu].exception_state |=
774                                 DCPU_NEXT_MASTER;
775                         goto cpu_loop;
776                 } else {
777                         kgdb_info[cpu].ret_state = error;
778                         break;
779                 }
780         }
781
782         dbg_activate_sw_breakpoints();
783
784         /* Call the I/O driver's post_exception routine */
785         if (dbg_io_ops->post_exception)
786                 dbg_io_ops->post_exception();
787
788         atomic_dec(&ignore_console_lock_warning);
789
790         if (!kgdb_single_step) {
791                 raw_spin_unlock(&dbg_slave_lock);
792                 /* Wait till all the CPUs have quit from the debugger. */
793                 while (kgdb_do_roundup && atomic_read(&slaves_in_kgdb))
794                         cpu_relax();
795         }
796
797 kgdb_restore:
798         if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
799                 int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
800                 if (kgdb_info[sstep_cpu].task)
801                         kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
802                 else
803                         kgdb_sstep_pid = 0;
804         }
805         if (arch_kgdb_ops.correct_hw_break)
806                 arch_kgdb_ops.correct_hw_break();
807         if (trace_on)
808                 tracing_on();
809
810         kgdb_info[cpu].debuggerinfo = NULL;
811         kgdb_info[cpu].task = NULL;
812         kgdb_info[cpu].exception_state &=
813                 ~(DCPU_WANT_MASTER | DCPU_IS_SLAVE);
814         kgdb_info[cpu].enter_kgdb--;
815         smp_mb__before_atomic();
816         atomic_dec(&masters_in_kgdb);
817         /* Free kgdb_active */
818         atomic_set(&kgdb_active, -1);
819         raw_spin_unlock(&dbg_master_lock);
820         dbg_touch_watchdogs();
821         local_irq_restore(flags);
822         rcu_read_unlock();
823
824         return kgdb_info[cpu].ret_state;
825 }
826 NOKPROBE_SYMBOL(kgdb_cpu_enter);
827
828 /*
829  * kgdb_handle_exception() - main entry point from a kernel exception
830  *
831  * Locking hierarchy:
832  *      interface locks, if any (begin_session)
833  *      kgdb lock (kgdb_active)
834  */
835 int
836 kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs)
837 {
838         struct kgdb_state kgdb_var;
839         struct kgdb_state *ks = &kgdb_var;
840         int ret = 0;
841
842         if (arch_kgdb_ops.enable_nmi)
843                 arch_kgdb_ops.enable_nmi(0);
844         /*
845          * Avoid entering the debugger if we were triggered due to an oops
846          * but panic_timeout indicates the system should automatically
847          * reboot on panic. We don't want to get stuck waiting for input
848          * on such systems, especially if its "just" an oops.
849          */
850         if (signo != SIGTRAP && panic_timeout)
851                 return 1;
852
853         memset(ks, 0, sizeof(struct kgdb_state));
854         ks->cpu                 = raw_smp_processor_id();
855         ks->ex_vector           = evector;
856         ks->signo               = signo;
857         ks->err_code            = ecode;
858         ks->linux_regs          = regs;
859
860         if (kgdb_reenter_check(ks))
861                 goto out; /* Ouch, double exception ! */
862         if (kgdb_info[ks->cpu].enter_kgdb != 0)
863                 goto out;
864
865         ret = kgdb_cpu_enter(ks, regs, DCPU_WANT_MASTER);
866 out:
867         if (arch_kgdb_ops.enable_nmi)
868                 arch_kgdb_ops.enable_nmi(1);
869         return ret;
870 }
871 NOKPROBE_SYMBOL(kgdb_handle_exception);
872
873 /*
874  * GDB places a breakpoint at this function to know dynamically loaded objects.
875  */
876 static int module_event(struct notifier_block *self, unsigned long val,
877         void *data)
878 {
879         return 0;
880 }
881
882 static struct notifier_block dbg_module_load_nb = {
883         .notifier_call  = module_event,
884 };
885
886 int kgdb_nmicallback(int cpu, void *regs)
887 {
888 #ifdef CONFIG_SMP
889         struct kgdb_state kgdb_var;
890         struct kgdb_state *ks = &kgdb_var;
891
892         kgdb_info[cpu].rounding_up = false;
893
894         memset(ks, 0, sizeof(struct kgdb_state));
895         ks->cpu                 = cpu;
896         ks->linux_regs          = regs;
897
898         if (kgdb_info[ks->cpu].enter_kgdb == 0 &&
899                         raw_spin_is_locked(&dbg_master_lock)) {
900                 kgdb_cpu_enter(ks, regs, DCPU_IS_SLAVE);
901                 return 0;
902         }
903 #endif
904         return 1;
905 }
906 NOKPROBE_SYMBOL(kgdb_nmicallback);
907
908 int kgdb_nmicallin(int cpu, int trapnr, void *regs, int err_code,
909                                                         atomic_t *send_ready)
910 {
911 #ifdef CONFIG_SMP
912         if (!kgdb_io_ready(0) || !send_ready)
913                 return 1;
914
915         if (kgdb_info[cpu].enter_kgdb == 0) {
916                 struct kgdb_state kgdb_var;
917                 struct kgdb_state *ks = &kgdb_var;
918
919                 memset(ks, 0, sizeof(struct kgdb_state));
920                 ks->cpu                 = cpu;
921                 ks->ex_vector           = trapnr;
922                 ks->signo               = SIGTRAP;
923                 ks->err_code            = err_code;
924                 ks->linux_regs          = regs;
925                 ks->send_ready          = send_ready;
926                 kgdb_cpu_enter(ks, regs, DCPU_WANT_MASTER);
927                 return 0;
928         }
929 #endif
930         return 1;
931 }
932 NOKPROBE_SYMBOL(kgdb_nmicallin);
933
934 static void kgdb_console_write(struct console *co, const char *s,
935    unsigned count)
936 {
937         unsigned long flags;
938
939         /* If we're debugging, or KGDB has not connected, don't try
940          * and print. */
941         if (!kgdb_connected || atomic_read(&kgdb_active) != -1 || dbg_kdb_mode)
942                 return;
943
944         local_irq_save(flags);
945         gdbstub_msg_write(s, count);
946         local_irq_restore(flags);
947 }
948
949 static struct console kgdbcons = {
950         .name           = "kgdb",
951         .write          = kgdb_console_write,
952         .flags          = CON_PRINTBUFFER | CON_ENABLED,
953         .index          = -1,
954 };
955
956 static int __init opt_kgdb_con(char *str)
957 {
958         kgdb_use_con = 1;
959
960         if (kgdb_io_module_registered && !kgdb_con_registered) {
961                 register_console(&kgdbcons);
962                 kgdb_con_registered = 1;
963         }
964
965         return 0;
966 }
967
968 early_param("kgdbcon", opt_kgdb_con);
969
970 #ifdef CONFIG_MAGIC_SYSRQ
971 static void sysrq_handle_dbg(int key)
972 {
973         if (!dbg_io_ops) {
974                 pr_crit("ERROR: No KGDB I/O module available\n");
975                 return;
976         }
977         if (!kgdb_connected) {
978 #ifdef CONFIG_KGDB_KDB
979                 if (!dbg_kdb_mode)
980                         pr_crit("KGDB or $3#33 for KDB\n");
981 #else
982                 pr_crit("Entering KGDB\n");
983 #endif
984         }
985
986         kgdb_breakpoint();
987 }
988
989 static const struct sysrq_key_op sysrq_dbg_op = {
990         .handler        = sysrq_handle_dbg,
991         .help_msg       = "debug(g)",
992         .action_msg     = "DEBUG",
993 };
994 #endif
995
996 void kgdb_panic(const char *msg)
997 {
998         if (!kgdb_io_module_registered)
999                 return;
1000
1001         /*
1002          * We don't want to get stuck waiting for input from user if
1003          * "panic_timeout" indicates the system should automatically
1004          * reboot on panic.
1005          */
1006         if (panic_timeout)
1007                 return;
1008
1009         if (dbg_kdb_mode)
1010                 kdb_printf("PANIC: %s\n", msg);
1011
1012         kgdb_breakpoint();
1013 }
1014
1015 static void kgdb_initial_breakpoint(void)
1016 {
1017         kgdb_break_asap = 0;
1018
1019         pr_crit("Waiting for connection from remote gdb...\n");
1020         kgdb_breakpoint();
1021 }
1022
1023 void __weak kgdb_arch_late(void)
1024 {
1025 }
1026
1027 void __init dbg_late_init(void)
1028 {
1029         dbg_is_early = false;
1030         if (kgdb_io_module_registered)
1031                 kgdb_arch_late();
1032         kdb_init(KDB_INIT_FULL);
1033
1034         if (kgdb_io_module_registered && kgdb_break_asap)
1035                 kgdb_initial_breakpoint();
1036 }
1037
1038 static int
1039 dbg_notify_reboot(struct notifier_block *this, unsigned long code, void *x)
1040 {
1041         /*
1042          * Take the following action on reboot notify depending on value:
1043          *    1 == Enter debugger
1044          *    0 == [the default] detach debug client
1045          *   -1 == Do nothing... and use this until the board resets
1046          */
1047         switch (kgdbreboot) {
1048         case 1:
1049                 kgdb_breakpoint();
1050                 goto done;
1051         case -1:
1052                 goto done;
1053         }
1054         if (!dbg_kdb_mode)
1055                 gdbstub_exit(code);
1056 done:
1057         return NOTIFY_DONE;
1058 }
1059
1060 static struct notifier_block dbg_reboot_notifier = {
1061         .notifier_call          = dbg_notify_reboot,
1062         .next                   = NULL,
1063         .priority               = INT_MAX,
1064 };
1065
1066 static void kgdb_register_callbacks(void)
1067 {
1068         if (!kgdb_io_module_registered) {
1069                 kgdb_io_module_registered = 1;
1070                 kgdb_arch_init();
1071                 if (!dbg_is_early)
1072                         kgdb_arch_late();
1073                 register_module_notifier(&dbg_module_load_nb);
1074                 register_reboot_notifier(&dbg_reboot_notifier);
1075 #ifdef CONFIG_MAGIC_SYSRQ
1076                 register_sysrq_key('g', &sysrq_dbg_op);
1077 #endif
1078                 if (kgdb_use_con && !kgdb_con_registered) {
1079                         register_console(&kgdbcons);
1080                         kgdb_con_registered = 1;
1081                 }
1082         }
1083 }
1084
1085 static void kgdb_unregister_callbacks(void)
1086 {
1087         /*
1088          * When this routine is called KGDB should unregister from
1089          * handlers and clean up, making sure it is not handling any
1090          * break exceptions at the time.
1091          */
1092         if (kgdb_io_module_registered) {
1093                 kgdb_io_module_registered = 0;
1094                 unregister_reboot_notifier(&dbg_reboot_notifier);
1095                 unregister_module_notifier(&dbg_module_load_nb);
1096                 kgdb_arch_exit();
1097 #ifdef CONFIG_MAGIC_SYSRQ
1098                 unregister_sysrq_key('g', &sysrq_dbg_op);
1099 #endif
1100                 if (kgdb_con_registered) {
1101                         unregister_console(&kgdbcons);
1102                         kgdb_con_registered = 0;
1103                 }
1104         }
1105 }
1106
1107 /**
1108  *      kgdb_register_io_module - register KGDB IO module
1109  *      @new_dbg_io_ops: the io ops vector
1110  *
1111  *      Register it with the KGDB core.
1112  */
1113 int kgdb_register_io_module(struct kgdb_io *new_dbg_io_ops)
1114 {
1115         struct kgdb_io *old_dbg_io_ops;
1116         int err;
1117
1118         spin_lock(&kgdb_registration_lock);
1119
1120         old_dbg_io_ops = dbg_io_ops;
1121         if (old_dbg_io_ops) {
1122                 if (!old_dbg_io_ops->deinit) {
1123                         spin_unlock(&kgdb_registration_lock);
1124
1125                         pr_err("KGDB I/O driver %s can't replace %s.\n",
1126                                 new_dbg_io_ops->name, old_dbg_io_ops->name);
1127                         return -EBUSY;
1128                 }
1129                 pr_info("Replacing I/O driver %s with %s\n",
1130                         old_dbg_io_ops->name, new_dbg_io_ops->name);
1131         }
1132
1133         if (new_dbg_io_ops->init) {
1134                 err = new_dbg_io_ops->init();
1135                 if (err) {
1136                         spin_unlock(&kgdb_registration_lock);
1137                         return err;
1138                 }
1139         }
1140
1141         dbg_io_ops = new_dbg_io_ops;
1142
1143         spin_unlock(&kgdb_registration_lock);
1144
1145         if (old_dbg_io_ops) {
1146                 old_dbg_io_ops->deinit();
1147                 return 0;
1148         }
1149
1150         pr_info("Registered I/O driver %s\n", new_dbg_io_ops->name);
1151
1152         /* Arm KGDB now. */
1153         kgdb_register_callbacks();
1154
1155         if (kgdb_break_asap &&
1156             (!dbg_is_early || IS_ENABLED(CONFIG_ARCH_HAS_EARLY_DEBUG)))
1157                 kgdb_initial_breakpoint();
1158
1159         return 0;
1160 }
1161 EXPORT_SYMBOL_GPL(kgdb_register_io_module);
1162
1163 /**
1164  *      kgdb_unregister_io_module - unregister KGDB IO module
1165  *      @old_dbg_io_ops: the io ops vector
1166  *
1167  *      Unregister it with the KGDB core.
1168  */
1169 void kgdb_unregister_io_module(struct kgdb_io *old_dbg_io_ops)
1170 {
1171         BUG_ON(kgdb_connected);
1172
1173         /*
1174          * KGDB is no longer able to communicate out, so
1175          * unregister our callbacks and reset state.
1176          */
1177         kgdb_unregister_callbacks();
1178
1179         spin_lock(&kgdb_registration_lock);
1180
1181         WARN_ON_ONCE(dbg_io_ops != old_dbg_io_ops);
1182         dbg_io_ops = NULL;
1183
1184         spin_unlock(&kgdb_registration_lock);
1185
1186         if (old_dbg_io_ops->deinit)
1187                 old_dbg_io_ops->deinit();
1188
1189         pr_info("Unregistered I/O driver %s, debugger disabled\n",
1190                 old_dbg_io_ops->name);
1191 }
1192 EXPORT_SYMBOL_GPL(kgdb_unregister_io_module);
1193
1194 int dbg_io_get_char(void)
1195 {
1196         int ret = dbg_io_ops->read_char();
1197         if (ret == NO_POLL_CHAR)
1198                 return -1;
1199         if (!dbg_kdb_mode)
1200                 return ret;
1201         if (ret == 127)
1202                 return 8;
1203         return ret;
1204 }
1205
1206 /**
1207  * kgdb_breakpoint - generate breakpoint exception
1208  *
1209  * This function will generate a breakpoint exception.  It is used at the
1210  * beginning of a program to sync up with a debugger and can be used
1211  * otherwise as a quick means to stop program execution and "break" into
1212  * the debugger.
1213  */
1214 noinline void kgdb_breakpoint(void)
1215 {
1216         atomic_inc(&kgdb_setting_breakpoint);
1217         wmb(); /* Sync point before breakpoint */
1218         arch_kgdb_breakpoint();
1219         wmb(); /* Sync point after breakpoint */
1220         atomic_dec(&kgdb_setting_breakpoint);
1221 }
1222 EXPORT_SYMBOL_GPL(kgdb_breakpoint);
1223
1224 static int __init opt_kgdb_wait(char *str)
1225 {
1226         kgdb_break_asap = 1;
1227
1228         kdb_init(KDB_INIT_EARLY);
1229         if (kgdb_io_module_registered &&
1230             IS_ENABLED(CONFIG_ARCH_HAS_EARLY_DEBUG))
1231                 kgdb_initial_breakpoint();
1232
1233         return 0;
1234 }
1235
1236 early_param("kgdbwait", opt_kgdb_wait);