1 #define pr_fmt(fmt) "SMP alternatives: " fmt
3 #include <linux/module.h>
4 #include <linux/sched.h>
5 #include <linux/mutex.h>
6 #include <linux/list.h>
7 #include <linux/stringify.h>
9 #include <linux/vmalloc.h>
10 #include <linux/memory.h>
11 #include <linux/stop_machine.h>
12 #include <linux/slab.h>
13 #include <linux/kdebug.h>
14 #include <linux/kprobes.h>
15 #include <linux/mmu_context.h>
16 #include <asm/text-patching.h>
17 #include <asm/alternative.h>
18 #include <asm/sections.h>
19 #include <asm/pgtable.h>
22 #include <asm/cacheflush.h>
23 #include <asm/tlbflush.h>
25 #include <asm/fixmap.h>
27 int __read_mostly alternatives_patched;
29 EXPORT_SYMBOL_GPL(alternatives_patched);
31 #define MAX_PATCH_LEN (255-1)
33 static int __initdata_or_module debug_alternative;
35 static int __init debug_alt(char *str)
37 debug_alternative = 1;
40 __setup("debug-alternative", debug_alt);
42 static int noreplace_smp;
44 static int __init setup_noreplace_smp(char *str)
49 __setup("noreplace-smp", setup_noreplace_smp);
51 #define DPRINTK(fmt, args...) \
53 if (debug_alternative) \
54 printk(KERN_DEBUG "%s: " fmt "\n", __func__, ##args); \
57 #define DUMP_BYTES(buf, len, fmt, args...) \
59 if (unlikely(debug_alternative)) { \
65 printk(KERN_DEBUG fmt, ##args); \
66 for (j = 0; j < (len) - 1; j++) \
67 printk(KERN_CONT "%02hhx ", buf[j]); \
68 printk(KERN_CONT "%02hhx\n", buf[j]); \
73 * Each GENERIC_NOPX is of X bytes, and defined as an array of bytes
74 * that correspond to that nop. Getting from one nop to the next, we
75 * add to the array the offset that is equal to the sum of all sizes of
76 * nops preceding the one we are after.
78 * Note: The GENERIC_NOP5_ATOMIC is at the end, as it breaks the
79 * nice symmetry of sizes of the previous nops.
81 #if defined(GENERIC_NOP1) && !defined(CONFIG_X86_64)
82 static const unsigned char intelnops[] =
94 static const unsigned char * const intel_nops[ASM_NOP_MAX+2] =
100 intelnops + 1 + 2 + 3,
101 intelnops + 1 + 2 + 3 + 4,
102 intelnops + 1 + 2 + 3 + 4 + 5,
103 intelnops + 1 + 2 + 3 + 4 + 5 + 6,
104 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
105 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
110 static const unsigned char k8nops[] =
122 static const unsigned char * const k8_nops[ASM_NOP_MAX+2] =
129 k8nops + 1 + 2 + 3 + 4,
130 k8nops + 1 + 2 + 3 + 4 + 5,
131 k8nops + 1 + 2 + 3 + 4 + 5 + 6,
132 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
133 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
137 #if defined(K7_NOP1) && !defined(CONFIG_X86_64)
138 static const unsigned char k7nops[] =
150 static const unsigned char * const k7_nops[ASM_NOP_MAX+2] =
157 k7nops + 1 + 2 + 3 + 4,
158 k7nops + 1 + 2 + 3 + 4 + 5,
159 k7nops + 1 + 2 + 3 + 4 + 5 + 6,
160 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
161 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
166 static const unsigned char p6nops[] =
178 static const unsigned char * const p6_nops[ASM_NOP_MAX+2] =
185 p6nops + 1 + 2 + 3 + 4,
186 p6nops + 1 + 2 + 3 + 4 + 5,
187 p6nops + 1 + 2 + 3 + 4 + 5 + 6,
188 p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
189 p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
193 /* Initialize these to a safe default */
195 const unsigned char * const *ideal_nops = p6_nops;
197 const unsigned char * const *ideal_nops = intel_nops;
200 void __init arch_init_ideal_nops(void)
202 switch (boot_cpu_data.x86_vendor) {
203 case X86_VENDOR_INTEL:
205 * Due to a decoder implementation quirk, some
206 * specific Intel CPUs actually perform better with
207 * the "k8_nops" than with the SDM-recommended NOPs.
209 if (boot_cpu_data.x86 == 6 &&
210 boot_cpu_data.x86_model >= 0x0f &&
211 boot_cpu_data.x86_model != 0x1c &&
212 boot_cpu_data.x86_model != 0x26 &&
213 boot_cpu_data.x86_model != 0x27 &&
214 boot_cpu_data.x86_model < 0x30) {
215 ideal_nops = k8_nops;
216 } else if (boot_cpu_has(X86_FEATURE_NOPL)) {
217 ideal_nops = p6_nops;
220 ideal_nops = k8_nops;
222 ideal_nops = intel_nops;
227 case X86_VENDOR_HYGON:
228 ideal_nops = p6_nops;
232 if (boot_cpu_data.x86 > 0xf) {
233 ideal_nops = p6_nops;
241 ideal_nops = k8_nops;
243 if (boot_cpu_has(X86_FEATURE_K8))
244 ideal_nops = k8_nops;
245 else if (boot_cpu_has(X86_FEATURE_K7))
246 ideal_nops = k7_nops;
248 ideal_nops = intel_nops;
253 /* Use this to add nops to a buffer, then text_poke the whole buffer. */
254 static void __init_or_module add_nops(void *insns, unsigned int len)
257 unsigned int noplen = len;
258 if (noplen > ASM_NOP_MAX)
259 noplen = ASM_NOP_MAX;
260 memcpy(insns, ideal_nops[noplen], noplen);
266 extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
267 extern s32 __smp_locks[], __smp_locks_end[];
268 void text_poke_early(void *addr, const void *opcode, size_t len);
271 * Are we looking at a near JMP with a 1 or 4-byte displacement.
273 static inline bool is_jmp(const u8 opcode)
275 return opcode == 0xeb || opcode == 0xe9;
278 static void __init_or_module
279 recompute_jump(struct alt_instr *a, u8 *orig_insn, u8 *repl_insn, u8 *insnbuf)
281 u8 *next_rip, *tgt_rip;
285 if (a->replacementlen != 5)
288 o_dspl = *(s32 *)(insnbuf + 1);
290 /* next_rip of the replacement JMP */
291 next_rip = repl_insn + a->replacementlen;
292 /* target rip of the replacement JMP */
293 tgt_rip = next_rip + o_dspl;
294 n_dspl = tgt_rip - orig_insn;
296 DPRINTK("target RIP: %px, new_displ: 0x%x", tgt_rip, n_dspl);
298 if (tgt_rip - orig_insn >= 0) {
299 if (n_dspl - 2 <= 127)
303 /* negative offset */
305 if (((n_dspl - 2) & 0xff) == (n_dspl - 2))
315 insnbuf[1] = (s8)n_dspl;
316 add_nops(insnbuf + 2, 3);
325 *(s32 *)&insnbuf[1] = n_dspl;
331 DPRINTK("final displ: 0x%08x, JMP 0x%lx",
332 n_dspl, (unsigned long)orig_insn + n_dspl + repl_len);
336 * "noinline" to cause control flow change and thus invalidate I$ and
337 * cause refetch after modification.
339 static void __init_or_module noinline optimize_nops(struct alt_instr *a, u8 *instr)
344 for (i = 0; i < a->padlen; i++) {
345 if (instr[i] != 0x90)
349 local_irq_save(flags);
350 add_nops(instr + (a->instrlen - a->padlen), a->padlen);
351 local_irq_restore(flags);
353 DUMP_BYTES(instr, a->instrlen, "%px: [%d:%d) optimized NOPs: ",
354 instr, a->instrlen - a->padlen, a->padlen);
358 * Replace instructions with better alternatives for this CPU type. This runs
359 * before SMP is initialized to avoid SMP problems with self modifying code.
360 * This implies that asymmetric systems where APs have less capabilities than
361 * the boot processor are not handled. Tough. Make sure you disable such
364 * Marked "noinline" to cause control flow change and thus insn cache
365 * to refetch changed I$ lines.
367 void __init_or_module noinline apply_alternatives(struct alt_instr *start,
368 struct alt_instr *end)
371 u8 *instr, *replacement;
372 u8 insnbuf[MAX_PATCH_LEN];
374 DPRINTK("alt table %px, -> %px", start, end);
376 * The scan order should be from start to end. A later scanned
377 * alternative code can overwrite previously scanned alternative code.
378 * Some kernel functions (e.g. memcpy, memset, etc) use this order to
381 * So be careful if you want to change the scan order to any other
384 for (a = start; a < end; a++) {
387 instr = (u8 *)&a->instr_offset + a->instr_offset;
388 replacement = (u8 *)&a->repl_offset + a->repl_offset;
389 BUG_ON(a->instrlen > sizeof(insnbuf));
390 BUG_ON(a->cpuid >= (NCAPINTS + NBUGINTS) * 32);
391 if (!boot_cpu_has(a->cpuid)) {
393 optimize_nops(a, instr);
398 DPRINTK("feat: %d*32+%d, old: (%pS (%px) len: %d), repl: (%px, len: %d), pad: %d",
401 instr, instr, a->instrlen,
402 replacement, a->replacementlen, a->padlen);
404 DUMP_BYTES(instr, a->instrlen, "%px: old_insn: ", instr);
405 DUMP_BYTES(replacement, a->replacementlen, "%px: rpl_insn: ", replacement);
407 memcpy(insnbuf, replacement, a->replacementlen);
408 insnbuf_sz = a->replacementlen;
411 * 0xe8 is a relative jump; fix the offset.
413 * Instruction length is checked before the opcode to avoid
414 * accessing uninitialized bytes for zero-length replacements.
416 if (a->replacementlen == 5 && *insnbuf == 0xe8) {
417 *(s32 *)(insnbuf + 1) += replacement - instr;
418 DPRINTK("Fix CALL offset: 0x%x, CALL 0x%lx",
419 *(s32 *)(insnbuf + 1),
420 (unsigned long)instr + *(s32 *)(insnbuf + 1) + 5);
423 if (a->replacementlen && is_jmp(replacement[0]))
424 recompute_jump(a, instr, replacement, insnbuf);
426 if (a->instrlen > a->replacementlen) {
427 add_nops(insnbuf + a->replacementlen,
428 a->instrlen - a->replacementlen);
429 insnbuf_sz += a->instrlen - a->replacementlen;
431 DUMP_BYTES(insnbuf, insnbuf_sz, "%px: final_insn: ", instr);
433 text_poke_early(instr, insnbuf, insnbuf_sz);
438 static void alternatives_smp_lock(const s32 *start, const s32 *end,
439 u8 *text, u8 *text_end)
443 for (poff = start; poff < end; poff++) {
444 u8 *ptr = (u8 *)poff + *poff;
446 if (!*poff || ptr < text || ptr >= text_end)
448 /* turn DS segment override prefix into lock prefix */
450 text_poke(ptr, ((unsigned char []){0xf0}), 1);
454 static void alternatives_smp_unlock(const s32 *start, const s32 *end,
455 u8 *text, u8 *text_end)
459 for (poff = start; poff < end; poff++) {
460 u8 *ptr = (u8 *)poff + *poff;
462 if (!*poff || ptr < text || ptr >= text_end)
464 /* turn lock prefix into DS segment override prefix */
466 text_poke(ptr, ((unsigned char []){0x3E}), 1);
470 struct smp_alt_module {
471 /* what is this ??? */
475 /* ptrs to lock prefixes */
477 const s32 *locks_end;
479 /* .text segment, needed to avoid patching init code ;) */
483 struct list_head next;
485 static LIST_HEAD(smp_alt_modules);
486 static bool uniproc_patched = false; /* protected by text_mutex */
488 void __init_or_module alternatives_smp_module_add(struct module *mod,
490 void *locks, void *locks_end,
491 void *text, void *text_end)
493 struct smp_alt_module *smp;
495 mutex_lock(&text_mutex);
496 if (!uniproc_patched)
499 if (num_possible_cpus() == 1)
500 /* Don't bother remembering, we'll never have to undo it. */
503 smp = kzalloc(sizeof(*smp), GFP_KERNEL);
505 /* we'll run the (safe but slow) SMP code then ... */
511 smp->locks_end = locks_end;
513 smp->text_end = text_end;
514 DPRINTK("locks %p -> %p, text %p -> %p, name %s\n",
515 smp->locks, smp->locks_end,
516 smp->text, smp->text_end, smp->name);
518 list_add_tail(&smp->next, &smp_alt_modules);
520 alternatives_smp_unlock(locks, locks_end, text, text_end);
522 mutex_unlock(&text_mutex);
525 void __init_or_module alternatives_smp_module_del(struct module *mod)
527 struct smp_alt_module *item;
529 mutex_lock(&text_mutex);
530 list_for_each_entry(item, &smp_alt_modules, next) {
531 if (mod != item->mod)
533 list_del(&item->next);
537 mutex_unlock(&text_mutex);
540 void alternatives_enable_smp(void)
542 struct smp_alt_module *mod;
544 /* Why bother if there are no other CPUs? */
545 BUG_ON(num_possible_cpus() == 1);
547 mutex_lock(&text_mutex);
549 if (uniproc_patched) {
550 pr_info("switching to SMP code\n");
551 BUG_ON(num_online_cpus() != 1);
552 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
553 clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
554 list_for_each_entry(mod, &smp_alt_modules, next)
555 alternatives_smp_lock(mod->locks, mod->locks_end,
556 mod->text, mod->text_end);
557 uniproc_patched = false;
559 mutex_unlock(&text_mutex);
563 * Return 1 if the address range is reserved for SMP-alternatives.
564 * Must hold text_mutex.
566 int alternatives_text_reserved(void *start, void *end)
568 struct smp_alt_module *mod;
570 u8 *text_start = start;
573 lockdep_assert_held(&text_mutex);
575 list_for_each_entry(mod, &smp_alt_modules, next) {
576 if (mod->text > text_end || mod->text_end < text_start)
578 for (poff = mod->locks; poff < mod->locks_end; poff++) {
579 const u8 *ptr = (const u8 *)poff + *poff;
581 if (text_start <= ptr && text_end > ptr)
588 #endif /* CONFIG_SMP */
590 #ifdef CONFIG_PARAVIRT
591 void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
592 struct paravirt_patch_site *end)
594 struct paravirt_patch_site *p;
595 char insnbuf[MAX_PATCH_LEN];
597 for (p = start; p < end; p++) {
600 BUG_ON(p->len > MAX_PATCH_LEN);
601 /* prep the buffer with the original instructions */
602 memcpy(insnbuf, p->instr, p->len);
603 used = pv_ops.init.patch(p->instrtype, insnbuf,
604 (unsigned long)p->instr, p->len);
606 BUG_ON(used > p->len);
608 /* Pad the rest with nops */
609 add_nops(insnbuf + used, p->len - used);
610 text_poke_early(p->instr, insnbuf, p->len);
613 extern struct paravirt_patch_site __start_parainstructions[],
614 __stop_parainstructions[];
615 #endif /* CONFIG_PARAVIRT */
617 void __init alternative_instructions(void)
619 /* The patching is not fully atomic, so try to avoid local interruptions
620 that might execute the to be patched code.
621 Other CPUs are not running. */
625 * Don't stop machine check exceptions while patching.
626 * MCEs only happen when something got corrupted and in this
627 * case we must do something about the corruption.
628 * Ignoring it is worse than a unlikely patching race.
629 * Also machine checks tend to be broadcast and if one CPU
630 * goes into machine check the others follow quickly, so we don't
631 * expect a machine check to cause undue problems during to code
635 apply_alternatives(__alt_instructions, __alt_instructions_end);
638 /* Patch to UP if other cpus not imminent. */
639 if (!noreplace_smp && (num_present_cpus() == 1 || setup_max_cpus <= 1)) {
640 uniproc_patched = true;
641 alternatives_smp_module_add(NULL, "core kernel",
642 __smp_locks, __smp_locks_end,
646 if (!uniproc_patched || num_possible_cpus() == 1)
647 free_init_pages("SMP alternatives",
648 (unsigned long)__smp_locks,
649 (unsigned long)__smp_locks_end);
652 apply_paravirt(__parainstructions, __parainstructions_end);
655 alternatives_patched = 1;
659 * text_poke_early - Update instructions on a live kernel at boot time
660 * @addr: address to modify
661 * @opcode: source of the copy
662 * @len: length to copy
664 * When you use this code to patch more than one byte of an instruction
665 * you need to make sure that other CPUs cannot execute this code in parallel.
666 * Also no thread must be currently preempted in the middle of these
667 * instructions. And on the local CPU you need to be protected again NMI or MCE
668 * handlers seeing an inconsistent instruction while you patch.
670 void __init_or_module text_poke_early(void *addr, const void *opcode,
675 if (boot_cpu_has(X86_FEATURE_NX) &&
676 is_module_text_address((unsigned long)addr)) {
678 * Modules text is marked initially as non-executable, so the
679 * code cannot be running and speculative code-fetches are
680 * prevented. Just change the code.
682 memcpy(addr, opcode, len);
684 local_irq_save(flags);
685 memcpy(addr, opcode, len);
686 local_irq_restore(flags);
690 * Could also do a CLFLUSH here to speed up CPU recovery; but
691 * that causes hangs on some VIA CPUs.
696 __ro_after_init struct mm_struct *poking_mm;
697 __ro_after_init unsigned long poking_addr;
699 static void *__text_poke(void *addr, const void *opcode, size_t len)
701 bool cross_page_boundary = offset_in_page(addr) + len > PAGE_SIZE;
702 struct page *pages[2] = {NULL};
703 temp_mm_state_t prev;
710 * While boot memory allocator is running we cannot use struct pages as
711 * they are not yet initialized. There is no way to recover.
713 BUG_ON(!after_bootmem);
715 if (!core_kernel_text((unsigned long)addr)) {
716 pages[0] = vmalloc_to_page(addr);
717 if (cross_page_boundary)
718 pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
720 pages[0] = virt_to_page(addr);
721 WARN_ON(!PageReserved(pages[0]));
722 if (cross_page_boundary)
723 pages[1] = virt_to_page(addr + PAGE_SIZE);
726 * If something went wrong, crash and burn since recovery paths are not
729 BUG_ON(!pages[0] || (cross_page_boundary && !pages[1]));
731 local_irq_save(flags);
734 * Map the page without the global bit, as TLB flushing is done with
735 * flush_tlb_mm_range(), which is intended for non-global PTEs.
737 pgprot = __pgprot(pgprot_val(PAGE_KERNEL) & ~_PAGE_GLOBAL);
740 * The lock is not really needed, but this allows to avoid open-coding.
742 ptep = get_locked_pte(poking_mm, poking_addr, &ptl);
745 * This must not fail; preallocated in poking_init().
749 pte = mk_pte(pages[0], pgprot);
750 set_pte_at(poking_mm, poking_addr, ptep, pte);
752 if (cross_page_boundary) {
753 pte = mk_pte(pages[1], pgprot);
754 set_pte_at(poking_mm, poking_addr + PAGE_SIZE, ptep + 1, pte);
758 * Loading the temporary mm behaves as a compiler barrier, which
759 * guarantees that the PTE will be set at the time memcpy() is done.
761 prev = use_temporary_mm(poking_mm);
763 kasan_disable_current();
764 memcpy((u8 *)poking_addr + offset_in_page(addr), opcode, len);
765 kasan_enable_current();
768 * Ensure that the PTE is only cleared after the instructions of memcpy
769 * were issued by using a compiler barrier.
773 pte_clear(poking_mm, poking_addr, ptep);
774 if (cross_page_boundary)
775 pte_clear(poking_mm, poking_addr + PAGE_SIZE, ptep + 1);
778 * Loading the previous page-table hierarchy requires a serializing
779 * instruction that already allows the core to see the updated version.
780 * Xen-PV is assumed to serialize execution in a similar manner.
782 unuse_temporary_mm(prev);
785 * Flushing the TLB might involve IPIs, which would require enabled
786 * IRQs, but not if the mm is not used, as it is in this point.
788 flush_tlb_mm_range(poking_mm, poking_addr, poking_addr +
789 (cross_page_boundary ? 2 : 1) * PAGE_SIZE,
793 * If the text does not match what we just wrote then something is
794 * fundamentally screwy; there's nothing we can really do about that.
796 BUG_ON(memcmp(addr, opcode, len));
798 pte_unmap_unlock(ptep, ptl);
799 local_irq_restore(flags);
804 * text_poke - Update instructions on a live kernel
805 * @addr: address to modify
806 * @opcode: source of the copy
807 * @len: length to copy
809 * Only atomic text poke/set should be allowed when not doing early patching.
810 * It means the size must be writable atomically and the address must be aligned
811 * in a way that permits an atomic write. It also makes sure we fit on a single
814 * Note that the caller must ensure that if the modified code is part of a
815 * module, the module would not be removed during poking. This can be achieved
816 * by registering a module notifier, and ordering module removal and patching
819 void *text_poke(void *addr, const void *opcode, size_t len)
821 lockdep_assert_held(&text_mutex);
823 return __text_poke(addr, opcode, len);
827 * text_poke_kgdb - Update instructions on a live kernel by kgdb
828 * @addr: address to modify
829 * @opcode: source of the copy
830 * @len: length to copy
832 * Only atomic text poke/set should be allowed when not doing early patching.
833 * It means the size must be writable atomically and the address must be aligned
834 * in a way that permits an atomic write. It also makes sure we fit on a single
837 * Context: should only be used by kgdb, which ensures no other core is running,
838 * despite the fact it does not hold the text_mutex.
840 void *text_poke_kgdb(void *addr, const void *opcode, size_t len)
842 return __text_poke(addr, opcode, len);
845 static void do_sync_core(void *info)
850 static bool bp_patching_in_progress;
851 static void *bp_int3_handler, *bp_int3_addr;
853 int poke_int3_handler(struct pt_regs *regs)
856 * Having observed our INT3 instruction, we now must observe
857 * bp_patching_in_progress.
859 * in_progress = TRUE INT3
861 * write INT3 if (in_progress)
863 * Idem for bp_int3_handler.
867 if (likely(!bp_patching_in_progress))
870 if (user_mode(regs) || regs->ip != (unsigned long)bp_int3_addr)
873 /* set up the specified breakpoint handler */
874 regs->ip = (unsigned long) bp_int3_handler;
878 NOKPROBE_SYMBOL(poke_int3_handler);
881 * text_poke_bp() -- update instructions on live kernel on SMP
882 * @addr: address to patch
883 * @opcode: opcode of new instruction
884 * @len: length to copy
885 * @handler: address to jump to when the temporary breakpoint is hit
887 * Modify multi-byte instruction by using int3 breakpoint on SMP.
888 * We completely avoid stop_machine() here, and achieve the
889 * synchronization using int3 breakpoint.
891 * The way it is done:
892 * - add a int3 trap to the address that will be patched
894 * - update all but the first byte of the patched range
896 * - replace the first byte (int3) by the first byte of
900 void text_poke_bp(void *addr, const void *opcode, size_t len, void *handler)
902 unsigned char int3 = 0xcc;
904 bp_int3_handler = handler;
905 bp_int3_addr = (u8 *)addr + sizeof(int3);
906 bp_patching_in_progress = true;
908 lockdep_assert_held(&text_mutex);
911 * Corresponding read barrier in int3 notifier for making sure the
912 * in_progress and handler are correctly ordered wrt. patching.
916 text_poke(addr, &int3, sizeof(int3));
918 on_each_cpu(do_sync_core, NULL, 1);
920 if (len - sizeof(int3) > 0) {
921 /* patch all but the first byte */
922 text_poke((char *)addr + sizeof(int3),
923 (const char *) opcode + sizeof(int3),
926 * According to Intel, this core syncing is very likely
927 * not necessary and we'd be safe even without it. But
928 * better safe than sorry (plus there's not only Intel).
930 on_each_cpu(do_sync_core, NULL, 1);
933 /* patch the first byte */
934 text_poke(addr, opcode, sizeof(int3));
936 on_each_cpu(do_sync_core, NULL, 1);
938 * sync_core() implies an smp_mb() and orders this store against
939 * the writing of the new instruction.
941 bp_patching_in_progress = false;