1 // SPDX-License-Identifier: GPL-2.0-only
3 * bpf_jit_comp.c: BPF JIT compiler
5 * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com)
6 * Internal BPF Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
8 #include <linux/netdevice.h>
9 #include <linux/filter.h>
10 #include <linux/if_vlan.h>
11 #include <linux/bpf.h>
13 #include <asm/set_memory.h>
14 #include <asm/nospec-branch.h>
16 static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
29 #define EMIT(bytes, len) \
30 do { prog = emit_code(prog, bytes, len); cnt += len; } while (0)
32 #define EMIT1(b1) EMIT(b1, 1)
33 #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2)
34 #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
35 #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
37 #define EMIT1_off32(b1, off) \
38 do { EMIT1(b1); EMIT(off, 4); } while (0)
39 #define EMIT2_off32(b1, b2, off) \
40 do { EMIT2(b1, b2); EMIT(off, 4); } while (0)
41 #define EMIT3_off32(b1, b2, b3, off) \
42 do { EMIT3(b1, b2, b3); EMIT(off, 4); } while (0)
43 #define EMIT4_off32(b1, b2, b3, b4, off) \
44 do { EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0)
46 static bool is_imm8(int value)
48 return value <= 127 && value >= -128;
51 static bool is_simm32(s64 value)
53 return value == (s64)(s32)value;
56 static bool is_uimm32(u64 value)
58 return value == (u64)(u32)value;
62 #define EMIT_mov(DST, SRC) \
65 EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \
68 static int bpf_size_to_x86_bytes(int bpf_size)
70 if (bpf_size == BPF_W)
72 else if (bpf_size == BPF_H)
74 else if (bpf_size == BPF_B)
76 else if (bpf_size == BPF_DW)
83 * List of x86 cond jumps opcodes (. + s8)
84 * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32)
97 /* Pick a register outside of BPF range for JIT internal work */
98 #define AUX_REG (MAX_BPF_JIT_REG + 1)
101 * The following table maps BPF registers to x86-64 registers.
103 * x86-64 register R12 is unused, since if used as base address
104 * register in load/store instructions, it always needs an
105 * extra byte of encoding and is callee saved.
107 * Also x86-64 register R9 is unused. x86-64 register R10 is
108 * used for blinding (if enabled).
110 static const int reg2hex[] = {
111 [BPF_REG_0] = 0, /* RAX */
112 [BPF_REG_1] = 7, /* RDI */
113 [BPF_REG_2] = 6, /* RSI */
114 [BPF_REG_3] = 2, /* RDX */
115 [BPF_REG_4] = 1, /* RCX */
116 [BPF_REG_5] = 0, /* R8 */
117 [BPF_REG_6] = 3, /* RBX callee saved */
118 [BPF_REG_7] = 5, /* R13 callee saved */
119 [BPF_REG_8] = 6, /* R14 callee saved */
120 [BPF_REG_9] = 7, /* R15 callee saved */
121 [BPF_REG_FP] = 5, /* RBP readonly */
122 [BPF_REG_AX] = 2, /* R10 temp register */
123 [AUX_REG] = 3, /* R11 temp register */
127 * is_ereg() == true if BPF register 'reg' maps to x86-64 r8..r15
128 * which need extra byte of encoding.
129 * rax,rcx,...,rbp have simpler encoding
131 static bool is_ereg(u32 reg)
133 return (1 << reg) & (BIT(BPF_REG_5) |
141 static bool is_axreg(u32 reg)
143 return reg == BPF_REG_0;
146 /* Add modifiers if 'reg' maps to x86-64 registers R8..R15 */
147 static u8 add_1mod(u8 byte, u32 reg)
154 static u8 add_2mod(u8 byte, u32 r1, u32 r2)
163 /* Encode 'dst_reg' register into x86-64 opcode 'byte' */
164 static u8 add_1reg(u8 byte, u32 dst_reg)
166 return byte + reg2hex[dst_reg];
169 /* Encode 'dst_reg' and 'src_reg' registers into x86-64 opcode 'byte' */
170 static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
172 return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3);
175 static void jit_fill_hole(void *area, unsigned int size)
177 /* Fill whole space with INT3 instructions */
178 memset(area, 0xcc, size);
182 int cleanup_addr; /* Epilogue code offset */
185 /* Maximum number of bytes emitted while JITing one eBPF insn */
186 #define BPF_MAX_INSN_SIZE 128
187 #define BPF_INSN_SAFETY 64
189 #define PROLOGUE_SIZE 20
192 * Emit x86-64 prologue code for BPF program and check its size.
193 * bpf_tail_call helper will skip it while jumping into another program
195 static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf)
200 EMIT1(0x55); /* push rbp */
201 EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
202 /* sub rsp, rounded_stack_depth */
203 EMIT3_off32(0x48, 0x81, 0xEC, round_up(stack_depth, 8));
204 EMIT1(0x53); /* push rbx */
205 EMIT2(0x41, 0x55); /* push r13 */
206 EMIT2(0x41, 0x56); /* push r14 */
207 EMIT2(0x41, 0x57); /* push r15 */
208 if (!ebpf_from_cbpf) {
209 /* zero init tail_call_cnt */
211 BUILD_BUG_ON(cnt != PROLOGUE_SIZE);
217 * Generate the following code:
219 * ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ...
220 * if (index >= array->map.max_entries)
222 * if (++tail_call_cnt > MAX_TAIL_CALL_CNT)
224 * prog = array->ptrs[index];
227 * goto *(prog->bpf_func + prologue_size);
230 static void emit_bpf_tail_call(u8 **pprog)
233 int label1, label2, label3;
237 * rdi - pointer to ctx
238 * rsi - pointer to bpf_array
239 * rdx - index in bpf_array
243 * if (index >= array->map.max_entries)
246 EMIT2(0x89, 0xD2); /* mov edx, edx */
247 EMIT3(0x39, 0x56, /* cmp dword ptr [rsi + 16], edx */
248 offsetof(struct bpf_array, map.max_entries));
249 #define OFFSET1 (41 + RETPOLINE_RAX_BPF_JIT_SIZE) /* Number of bytes to jump */
250 EMIT2(X86_JBE, OFFSET1); /* jbe out */
254 * if (tail_call_cnt > MAX_TAIL_CALL_CNT)
257 EMIT2_off32(0x8B, 0x85, -36 - MAX_BPF_STACK); /* mov eax, dword ptr [rbp - 548] */
258 EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */
259 #define OFFSET2 (30 + RETPOLINE_RAX_BPF_JIT_SIZE)
260 EMIT2(X86_JA, OFFSET2); /* ja out */
262 EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */
263 EMIT2_off32(0x89, 0x85, -36 - MAX_BPF_STACK); /* mov dword ptr [rbp -548], eax */
265 /* prog = array->ptrs[index]; */
266 EMIT4_off32(0x48, 0x8B, 0x84, 0xD6, /* mov rax, [rsi + rdx * 8 + offsetof(...)] */
267 offsetof(struct bpf_array, ptrs));
273 EMIT3(0x48, 0x85, 0xC0); /* test rax,rax */
274 #define OFFSET3 (8 + RETPOLINE_RAX_BPF_JIT_SIZE)
275 EMIT2(X86_JE, OFFSET3); /* je out */
278 /* goto *(prog->bpf_func + prologue_size); */
279 EMIT4(0x48, 0x8B, 0x40, /* mov rax, qword ptr [rax + 32] */
280 offsetof(struct bpf_prog, bpf_func));
281 EMIT4(0x48, 0x83, 0xC0, PROLOGUE_SIZE); /* add rax, prologue_size */
284 * Wow we're ready to jump into next BPF program
285 * rdi == ctx (1st arg)
286 * rax == prog->bpf_func + prologue_size
288 RETPOLINE_RAX_BPF_JIT();
291 BUILD_BUG_ON(cnt - label1 != OFFSET1);
292 BUILD_BUG_ON(cnt - label2 != OFFSET2);
293 BUILD_BUG_ON(cnt - label3 != OFFSET3);
297 static void emit_mov_imm32(u8 **pprog, bool sign_propagate,
298 u32 dst_reg, const u32 imm32)
305 * Optimization: if imm32 is positive, use 'mov %eax, imm32'
306 * (which zero-extends imm32) to save 2 bytes.
308 if (sign_propagate && (s32)imm32 < 0) {
309 /* 'mov %rax, imm32' sign extends imm32 */
310 b1 = add_1mod(0x48, dst_reg);
313 EMIT3_off32(b1, b2, add_1reg(b3, dst_reg), imm32);
318 * Optimization: if imm32 is zero, use 'xor %eax, %eax'
322 if (is_ereg(dst_reg))
323 EMIT1(add_2mod(0x40, dst_reg, dst_reg));
326 EMIT2(b2, add_2reg(b3, dst_reg, dst_reg));
330 /* mov %eax, imm32 */
331 if (is_ereg(dst_reg))
332 EMIT1(add_1mod(0x40, dst_reg));
333 EMIT1_off32(add_1reg(0xB8, dst_reg), imm32);
338 static void emit_mov_imm64(u8 **pprog, u32 dst_reg,
339 const u32 imm32_hi, const u32 imm32_lo)
344 if (is_uimm32(((u64)imm32_hi << 32) | (u32)imm32_lo)) {
346 * For emitting plain u32, where sign bit must not be
347 * propagated LLVM tends to load imm64 over mov32
348 * directly, so save couple of bytes by just doing
349 * 'mov %eax, imm32' instead.
351 emit_mov_imm32(&prog, false, dst_reg, imm32_lo);
353 /* movabsq %rax, imm64 */
354 EMIT2(add_1mod(0x48, dst_reg), add_1reg(0xB8, dst_reg));
362 static void emit_mov_reg(u8 **pprog, bool is64, u32 dst_reg, u32 src_reg)
369 EMIT_mov(dst_reg, src_reg);
372 if (is_ereg(dst_reg) || is_ereg(src_reg))
373 EMIT1(add_2mod(0x40, dst_reg, src_reg));
374 EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg));
380 static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
381 int oldproglen, struct jit_context *ctx)
383 struct bpf_insn *insn = bpf_prog->insnsi;
384 int insn_cnt = bpf_prog->len;
385 bool seen_exit = false;
386 u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY];
391 emit_prologue(&prog, bpf_prog->aux->stack_depth,
392 bpf_prog_was_classic(bpf_prog));
393 addrs[0] = prog - temp;
395 for (i = 1; i <= insn_cnt; i++, insn++) {
396 const s32 imm32 = insn->imm;
397 u32 dst_reg = insn->dst_reg;
398 u32 src_reg = insn->src_reg;
405 switch (insn->code) {
407 case BPF_ALU | BPF_ADD | BPF_X:
408 case BPF_ALU | BPF_SUB | BPF_X:
409 case BPF_ALU | BPF_AND | BPF_X:
410 case BPF_ALU | BPF_OR | BPF_X:
411 case BPF_ALU | BPF_XOR | BPF_X:
412 case BPF_ALU64 | BPF_ADD | BPF_X:
413 case BPF_ALU64 | BPF_SUB | BPF_X:
414 case BPF_ALU64 | BPF_AND | BPF_X:
415 case BPF_ALU64 | BPF_OR | BPF_X:
416 case BPF_ALU64 | BPF_XOR | BPF_X:
417 switch (BPF_OP(insn->code)) {
418 case BPF_ADD: b2 = 0x01; break;
419 case BPF_SUB: b2 = 0x29; break;
420 case BPF_AND: b2 = 0x21; break;
421 case BPF_OR: b2 = 0x09; break;
422 case BPF_XOR: b2 = 0x31; break;
424 if (BPF_CLASS(insn->code) == BPF_ALU64)
425 EMIT1(add_2mod(0x48, dst_reg, src_reg));
426 else if (is_ereg(dst_reg) || is_ereg(src_reg))
427 EMIT1(add_2mod(0x40, dst_reg, src_reg));
428 EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg));
431 case BPF_ALU64 | BPF_MOV | BPF_X:
432 case BPF_ALU | BPF_MOV | BPF_X:
434 BPF_CLASS(insn->code) == BPF_ALU64,
439 case BPF_ALU | BPF_NEG:
440 case BPF_ALU64 | BPF_NEG:
441 if (BPF_CLASS(insn->code) == BPF_ALU64)
442 EMIT1(add_1mod(0x48, dst_reg));
443 else if (is_ereg(dst_reg))
444 EMIT1(add_1mod(0x40, dst_reg));
445 EMIT2(0xF7, add_1reg(0xD8, dst_reg));
448 case BPF_ALU | BPF_ADD | BPF_K:
449 case BPF_ALU | BPF_SUB | BPF_K:
450 case BPF_ALU | BPF_AND | BPF_K:
451 case BPF_ALU | BPF_OR | BPF_K:
452 case BPF_ALU | BPF_XOR | BPF_K:
453 case BPF_ALU64 | BPF_ADD | BPF_K:
454 case BPF_ALU64 | BPF_SUB | BPF_K:
455 case BPF_ALU64 | BPF_AND | BPF_K:
456 case BPF_ALU64 | BPF_OR | BPF_K:
457 case BPF_ALU64 | BPF_XOR | BPF_K:
458 if (BPF_CLASS(insn->code) == BPF_ALU64)
459 EMIT1(add_1mod(0x48, dst_reg));
460 else if (is_ereg(dst_reg))
461 EMIT1(add_1mod(0x40, dst_reg));
464 * b3 holds 'normal' opcode, b2 short form only valid
465 * in case dst is eax/rax.
467 switch (BPF_OP(insn->code)) {
491 EMIT3(0x83, add_1reg(b3, dst_reg), imm32);
492 else if (is_axreg(dst_reg))
493 EMIT1_off32(b2, imm32);
495 EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32);
498 case BPF_ALU64 | BPF_MOV | BPF_K:
499 case BPF_ALU | BPF_MOV | BPF_K:
500 emit_mov_imm32(&prog, BPF_CLASS(insn->code) == BPF_ALU64,
504 case BPF_LD | BPF_IMM | BPF_DW:
505 emit_mov_imm64(&prog, dst_reg, insn[1].imm, insn[0].imm);
510 /* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */
511 case BPF_ALU | BPF_MOD | BPF_X:
512 case BPF_ALU | BPF_DIV | BPF_X:
513 case BPF_ALU | BPF_MOD | BPF_K:
514 case BPF_ALU | BPF_DIV | BPF_K:
515 case BPF_ALU64 | BPF_MOD | BPF_X:
516 case BPF_ALU64 | BPF_DIV | BPF_X:
517 case BPF_ALU64 | BPF_MOD | BPF_K:
518 case BPF_ALU64 | BPF_DIV | BPF_K:
519 EMIT1(0x50); /* push rax */
520 EMIT1(0x52); /* push rdx */
522 if (BPF_SRC(insn->code) == BPF_X)
523 /* mov r11, src_reg */
524 EMIT_mov(AUX_REG, src_reg);
527 EMIT3_off32(0x49, 0xC7, 0xC3, imm32);
529 /* mov rax, dst_reg */
530 EMIT_mov(BPF_REG_0, dst_reg);
534 * equivalent to 'xor rdx, rdx', but one byte less
538 if (BPF_CLASS(insn->code) == BPF_ALU64)
540 EMIT3(0x49, 0xF7, 0xF3);
543 EMIT3(0x41, 0xF7, 0xF3);
545 if (BPF_OP(insn->code) == BPF_MOD)
547 EMIT3(0x49, 0x89, 0xD3);
550 EMIT3(0x49, 0x89, 0xC3);
552 EMIT1(0x5A); /* pop rdx */
553 EMIT1(0x58); /* pop rax */
555 /* mov dst_reg, r11 */
556 EMIT_mov(dst_reg, AUX_REG);
559 case BPF_ALU | BPF_MUL | BPF_K:
560 case BPF_ALU | BPF_MUL | BPF_X:
561 case BPF_ALU64 | BPF_MUL | BPF_K:
562 case BPF_ALU64 | BPF_MUL | BPF_X:
564 bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
566 if (dst_reg != BPF_REG_0)
567 EMIT1(0x50); /* push rax */
568 if (dst_reg != BPF_REG_3)
569 EMIT1(0x52); /* push rdx */
571 /* mov r11, dst_reg */
572 EMIT_mov(AUX_REG, dst_reg);
574 if (BPF_SRC(insn->code) == BPF_X)
575 emit_mov_reg(&prog, is64, BPF_REG_0, src_reg);
577 emit_mov_imm32(&prog, is64, BPF_REG_0, imm32);
580 EMIT1(add_1mod(0x48, AUX_REG));
581 else if (is_ereg(AUX_REG))
582 EMIT1(add_1mod(0x40, AUX_REG));
584 EMIT2(0xF7, add_1reg(0xE0, AUX_REG));
586 if (dst_reg != BPF_REG_3)
587 EMIT1(0x5A); /* pop rdx */
588 if (dst_reg != BPF_REG_0) {
589 /* mov dst_reg, rax */
590 EMIT_mov(dst_reg, BPF_REG_0);
591 EMIT1(0x58); /* pop rax */
596 case BPF_ALU | BPF_LSH | BPF_K:
597 case BPF_ALU | BPF_RSH | BPF_K:
598 case BPF_ALU | BPF_ARSH | BPF_K:
599 case BPF_ALU64 | BPF_LSH | BPF_K:
600 case BPF_ALU64 | BPF_RSH | BPF_K:
601 case BPF_ALU64 | BPF_ARSH | BPF_K:
602 if (BPF_CLASS(insn->code) == BPF_ALU64)
603 EMIT1(add_1mod(0x48, dst_reg));
604 else if (is_ereg(dst_reg))
605 EMIT1(add_1mod(0x40, dst_reg));
607 switch (BPF_OP(insn->code)) {
608 case BPF_LSH: b3 = 0xE0; break;
609 case BPF_RSH: b3 = 0xE8; break;
610 case BPF_ARSH: b3 = 0xF8; break;
614 EMIT2(0xD1, add_1reg(b3, dst_reg));
616 EMIT3(0xC1, add_1reg(b3, dst_reg), imm32);
619 case BPF_ALU | BPF_LSH | BPF_X:
620 case BPF_ALU | BPF_RSH | BPF_X:
621 case BPF_ALU | BPF_ARSH | BPF_X:
622 case BPF_ALU64 | BPF_LSH | BPF_X:
623 case BPF_ALU64 | BPF_RSH | BPF_X:
624 case BPF_ALU64 | BPF_ARSH | BPF_X:
626 /* Check for bad case when dst_reg == rcx */
627 if (dst_reg == BPF_REG_4) {
628 /* mov r11, dst_reg */
629 EMIT_mov(AUX_REG, dst_reg);
633 if (src_reg != BPF_REG_4) { /* common case */
634 EMIT1(0x51); /* push rcx */
636 /* mov rcx, src_reg */
637 EMIT_mov(BPF_REG_4, src_reg);
640 /* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */
641 if (BPF_CLASS(insn->code) == BPF_ALU64)
642 EMIT1(add_1mod(0x48, dst_reg));
643 else if (is_ereg(dst_reg))
644 EMIT1(add_1mod(0x40, dst_reg));
646 switch (BPF_OP(insn->code)) {
647 case BPF_LSH: b3 = 0xE0; break;
648 case BPF_RSH: b3 = 0xE8; break;
649 case BPF_ARSH: b3 = 0xF8; break;
651 EMIT2(0xD3, add_1reg(b3, dst_reg));
653 if (src_reg != BPF_REG_4)
654 EMIT1(0x59); /* pop rcx */
656 if (insn->dst_reg == BPF_REG_4)
657 /* mov dst_reg, r11 */
658 EMIT_mov(insn->dst_reg, AUX_REG);
661 case BPF_ALU | BPF_END | BPF_FROM_BE:
664 /* Emit 'ror %ax, 8' to swap lower 2 bytes */
666 if (is_ereg(dst_reg))
668 EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8);
670 /* Emit 'movzwl eax, ax' */
671 if (is_ereg(dst_reg))
672 EMIT3(0x45, 0x0F, 0xB7);
675 EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
678 /* Emit 'bswap eax' to swap lower 4 bytes */
679 if (is_ereg(dst_reg))
683 EMIT1(add_1reg(0xC8, dst_reg));
686 /* Emit 'bswap rax' to swap 8 bytes */
687 EMIT3(add_1mod(0x48, dst_reg), 0x0F,
688 add_1reg(0xC8, dst_reg));
693 case BPF_ALU | BPF_END | BPF_FROM_LE:
697 * Emit 'movzwl eax, ax' to zero extend 16-bit
700 if (is_ereg(dst_reg))
701 EMIT3(0x45, 0x0F, 0xB7);
704 EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
707 /* Emit 'mov eax, eax' to clear upper 32-bits */
708 if (is_ereg(dst_reg))
710 EMIT2(0x89, add_2reg(0xC0, dst_reg, dst_reg));
718 /* ST: *(u8*)(dst_reg + off) = imm */
719 case BPF_ST | BPF_MEM | BPF_B:
720 if (is_ereg(dst_reg))
725 case BPF_ST | BPF_MEM | BPF_H:
726 if (is_ereg(dst_reg))
727 EMIT3(0x66, 0x41, 0xC7);
731 case BPF_ST | BPF_MEM | BPF_W:
732 if (is_ereg(dst_reg))
737 case BPF_ST | BPF_MEM | BPF_DW:
738 EMIT2(add_1mod(0x48, dst_reg), 0xC7);
740 st: if (is_imm8(insn->off))
741 EMIT2(add_1reg(0x40, dst_reg), insn->off);
743 EMIT1_off32(add_1reg(0x80, dst_reg), insn->off);
745 EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(insn->code)));
748 /* STX: *(u8*)(dst_reg + off) = src_reg */
749 case BPF_STX | BPF_MEM | BPF_B:
750 /* Emit 'mov byte ptr [rax + off], al' */
751 if (is_ereg(dst_reg) || is_ereg(src_reg) ||
752 /* We have to add extra byte for x86 SIL, DIL regs */
753 src_reg == BPF_REG_1 || src_reg == BPF_REG_2)
754 EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88);
758 case BPF_STX | BPF_MEM | BPF_H:
759 if (is_ereg(dst_reg) || is_ereg(src_reg))
760 EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89);
764 case BPF_STX | BPF_MEM | BPF_W:
765 if (is_ereg(dst_reg) || is_ereg(src_reg))
766 EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89);
770 case BPF_STX | BPF_MEM | BPF_DW:
771 EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89);
772 stx: if (is_imm8(insn->off))
773 EMIT2(add_2reg(0x40, dst_reg, src_reg), insn->off);
775 EMIT1_off32(add_2reg(0x80, dst_reg, src_reg),
779 /* LDX: dst_reg = *(u8*)(src_reg + off) */
780 case BPF_LDX | BPF_MEM | BPF_B:
781 /* Emit 'movzx rax, byte ptr [rax + off]' */
782 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6);
784 case BPF_LDX | BPF_MEM | BPF_H:
785 /* Emit 'movzx rax, word ptr [rax + off]' */
786 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7);
788 case BPF_LDX | BPF_MEM | BPF_W:
789 /* Emit 'mov eax, dword ptr [rax+0x14]' */
790 if (is_ereg(dst_reg) || is_ereg(src_reg))
791 EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B);
795 case BPF_LDX | BPF_MEM | BPF_DW:
796 /* Emit 'mov rax, qword ptr [rax+0x14]' */
797 EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B);
799 * If insn->off == 0 we can save one extra byte, but
800 * special case of x86 R13 which always needs an offset
801 * is not worth the hassle
803 if (is_imm8(insn->off))
804 EMIT2(add_2reg(0x40, src_reg, dst_reg), insn->off);
806 EMIT1_off32(add_2reg(0x80, src_reg, dst_reg),
810 /* STX XADD: lock *(u32*)(dst_reg + off) += src_reg */
811 case BPF_STX | BPF_XADD | BPF_W:
812 /* Emit 'lock add dword ptr [rax + off], eax' */
813 if (is_ereg(dst_reg) || is_ereg(src_reg))
814 EMIT3(0xF0, add_2mod(0x40, dst_reg, src_reg), 0x01);
818 case BPF_STX | BPF_XADD | BPF_DW:
819 EMIT3(0xF0, add_2mod(0x48, dst_reg, src_reg), 0x01);
820 xadd: if (is_imm8(insn->off))
821 EMIT2(add_2reg(0x40, dst_reg, src_reg), insn->off);
823 EMIT1_off32(add_2reg(0x80, dst_reg, src_reg),
828 case BPF_JMP | BPF_CALL:
829 func = (u8 *) __bpf_call_base + imm32;
830 jmp_offset = func - (image + addrs[i]);
831 if (!imm32 || !is_simm32(jmp_offset)) {
832 pr_err("unsupported BPF func %d addr %p image %p\n",
836 EMIT1_off32(0xE8, jmp_offset);
839 case BPF_JMP | BPF_TAIL_CALL:
840 emit_bpf_tail_call(&prog);
844 case BPF_JMP | BPF_JEQ | BPF_X:
845 case BPF_JMP | BPF_JNE | BPF_X:
846 case BPF_JMP | BPF_JGT | BPF_X:
847 case BPF_JMP | BPF_JLT | BPF_X:
848 case BPF_JMP | BPF_JGE | BPF_X:
849 case BPF_JMP | BPF_JLE | BPF_X:
850 case BPF_JMP | BPF_JSGT | BPF_X:
851 case BPF_JMP | BPF_JSLT | BPF_X:
852 case BPF_JMP | BPF_JSGE | BPF_X:
853 case BPF_JMP | BPF_JSLE | BPF_X:
854 case BPF_JMP32 | BPF_JEQ | BPF_X:
855 case BPF_JMP32 | BPF_JNE | BPF_X:
856 case BPF_JMP32 | BPF_JGT | BPF_X:
857 case BPF_JMP32 | BPF_JLT | BPF_X:
858 case BPF_JMP32 | BPF_JGE | BPF_X:
859 case BPF_JMP32 | BPF_JLE | BPF_X:
860 case BPF_JMP32 | BPF_JSGT | BPF_X:
861 case BPF_JMP32 | BPF_JSLT | BPF_X:
862 case BPF_JMP32 | BPF_JSGE | BPF_X:
863 case BPF_JMP32 | BPF_JSLE | BPF_X:
864 /* cmp dst_reg, src_reg */
865 if (BPF_CLASS(insn->code) == BPF_JMP)
866 EMIT1(add_2mod(0x48, dst_reg, src_reg));
867 else if (is_ereg(dst_reg) || is_ereg(src_reg))
868 EMIT1(add_2mod(0x40, dst_reg, src_reg));
869 EMIT2(0x39, add_2reg(0xC0, dst_reg, src_reg));
872 case BPF_JMP | BPF_JSET | BPF_X:
873 case BPF_JMP32 | BPF_JSET | BPF_X:
874 /* test dst_reg, src_reg */
875 if (BPF_CLASS(insn->code) == BPF_JMP)
876 EMIT1(add_2mod(0x48, dst_reg, src_reg));
877 else if (is_ereg(dst_reg) || is_ereg(src_reg))
878 EMIT1(add_2mod(0x40, dst_reg, src_reg));
879 EMIT2(0x85, add_2reg(0xC0, dst_reg, src_reg));
882 case BPF_JMP | BPF_JSET | BPF_K:
883 case BPF_JMP32 | BPF_JSET | BPF_K:
884 /* test dst_reg, imm32 */
885 if (BPF_CLASS(insn->code) == BPF_JMP)
886 EMIT1(add_1mod(0x48, dst_reg));
887 else if (is_ereg(dst_reg))
888 EMIT1(add_1mod(0x40, dst_reg));
889 EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32);
892 case BPF_JMP | BPF_JEQ | BPF_K:
893 case BPF_JMP | BPF_JNE | BPF_K:
894 case BPF_JMP | BPF_JGT | BPF_K:
895 case BPF_JMP | BPF_JLT | BPF_K:
896 case BPF_JMP | BPF_JGE | BPF_K:
897 case BPF_JMP | BPF_JLE | BPF_K:
898 case BPF_JMP | BPF_JSGT | BPF_K:
899 case BPF_JMP | BPF_JSLT | BPF_K:
900 case BPF_JMP | BPF_JSGE | BPF_K:
901 case BPF_JMP | BPF_JSLE | BPF_K:
902 case BPF_JMP32 | BPF_JEQ | BPF_K:
903 case BPF_JMP32 | BPF_JNE | BPF_K:
904 case BPF_JMP32 | BPF_JGT | BPF_K:
905 case BPF_JMP32 | BPF_JLT | BPF_K:
906 case BPF_JMP32 | BPF_JGE | BPF_K:
907 case BPF_JMP32 | BPF_JLE | BPF_K:
908 case BPF_JMP32 | BPF_JSGT | BPF_K:
909 case BPF_JMP32 | BPF_JSLT | BPF_K:
910 case BPF_JMP32 | BPF_JSGE | BPF_K:
911 case BPF_JMP32 | BPF_JSLE | BPF_K:
912 /* cmp dst_reg, imm8/32 */
913 if (BPF_CLASS(insn->code) == BPF_JMP)
914 EMIT1(add_1mod(0x48, dst_reg));
915 else if (is_ereg(dst_reg))
916 EMIT1(add_1mod(0x40, dst_reg));
919 EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32);
921 EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32);
923 emit_cond_jmp: /* Convert BPF opcode to x86 */
924 switch (BPF_OP(insn->code)) {
933 /* GT is unsigned '>', JA in x86 */
937 /* LT is unsigned '<', JB in x86 */
941 /* GE is unsigned '>=', JAE in x86 */
945 /* LE is unsigned '<=', JBE in x86 */
949 /* Signed '>', GT in x86 */
953 /* Signed '<', LT in x86 */
957 /* Signed '>=', GE in x86 */
961 /* Signed '<=', LE in x86 */
964 default: /* to silence GCC warning */
967 jmp_offset = addrs[i + insn->off] - addrs[i];
968 if (is_imm8(jmp_offset)) {
969 EMIT2(jmp_cond, jmp_offset);
970 } else if (is_simm32(jmp_offset)) {
971 EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset);
973 pr_err("cond_jmp gen bug %llx\n", jmp_offset);
979 case BPF_JMP | BPF_JA:
981 /* -1 jmp instructions will always jump
982 * backwards two bytes. Explicitly handling
983 * this case avoids wasting too many passes
984 * when there are long sequences of replaced
989 jmp_offset = addrs[i + insn->off] - addrs[i];
992 /* Optimize out nop jumps */
995 if (is_imm8(jmp_offset)) {
996 EMIT2(0xEB, jmp_offset);
997 } else if (is_simm32(jmp_offset)) {
998 EMIT1_off32(0xE9, jmp_offset);
1000 pr_err("jmp gen bug %llx\n", jmp_offset);
1005 case BPF_JMP | BPF_EXIT:
1007 jmp_offset = ctx->cleanup_addr - addrs[i];
1011 /* Update cleanup_addr */
1012 ctx->cleanup_addr = proglen;
1013 if (!bpf_prog_was_classic(bpf_prog))
1014 EMIT1(0x5B); /* get rid of tail_call_cnt */
1015 EMIT2(0x41, 0x5F); /* pop r15 */
1016 EMIT2(0x41, 0x5E); /* pop r14 */
1017 EMIT2(0x41, 0x5D); /* pop r13 */
1018 EMIT1(0x5B); /* pop rbx */
1019 EMIT1(0xC9); /* leave */
1020 EMIT1(0xC3); /* ret */
1025 * By design x86-64 JIT should support all BPF instructions.
1026 * This error will be seen if new instruction was added
1027 * to the interpreter, but not to the JIT, or if there is
1030 pr_err("bpf_jit: unknown opcode %02x\n", insn->code);
1035 if (ilen > BPF_MAX_INSN_SIZE) {
1036 pr_err("bpf_jit: fatal insn size error\n");
1041 if (unlikely(proglen + ilen > oldproglen)) {
1042 pr_err("bpf_jit: fatal error\n");
1045 memcpy(image + proglen, temp, ilen);
1054 struct x64_jit_data {
1055 struct bpf_binary_header *header;
1059 struct jit_context ctx;
1062 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
1064 struct bpf_binary_header *header = NULL;
1065 struct bpf_prog *tmp, *orig_prog = prog;
1066 struct x64_jit_data *jit_data;
1067 int proglen, oldproglen = 0;
1068 struct jit_context ctx = {};
1069 bool tmp_blinded = false;
1070 bool extra_pass = false;
1076 if (!prog->jit_requested)
1079 tmp = bpf_jit_blind_constants(prog);
1081 * If blinding was requested and we failed during blinding,
1082 * we must fall back to the interpreter.
1091 jit_data = prog->aux->jit_data;
1093 jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
1098 prog->aux->jit_data = jit_data;
1100 addrs = jit_data->addrs;
1102 ctx = jit_data->ctx;
1103 oldproglen = jit_data->proglen;
1104 image = jit_data->image;
1105 header = jit_data->header;
1107 goto skip_init_addrs;
1109 addrs = kmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL);
1116 * Before first pass, make a rough estimation of addrs[]
1117 * each BPF instruction is translated to less than 64 bytes
1119 for (proglen = 0, i = 0; i <= prog->len; i++) {
1123 ctx.cleanup_addr = proglen;
1127 * JITed image shrinks with every pass and the loop iterates
1128 * until the image stops shrinking. Very large BPF programs
1129 * may converge on the last pass. In such case do one more
1130 * pass to emit the final image.
1132 for (pass = 0; pass < 20 || image; pass++) {
1133 proglen = do_jit(prog, addrs, image, oldproglen, &ctx);
1138 bpf_jit_binary_free(header);
1143 if (proglen != oldproglen) {
1144 pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
1145 proglen, oldproglen);
1150 if (proglen == oldproglen) {
1151 header = bpf_jit_binary_alloc(proglen, &image,
1158 oldproglen = proglen;
1162 if (bpf_jit_enable > 1)
1163 bpf_jit_dump(prog->len, proglen, pass + 1, image);
1166 if (!prog->is_func || extra_pass) {
1167 bpf_jit_binary_lock_ro(header);
1169 jit_data->addrs = addrs;
1170 jit_data->ctx = ctx;
1171 jit_data->proglen = proglen;
1172 jit_data->image = image;
1173 jit_data->header = header;
1175 prog->bpf_func = (void *)image;
1177 prog->jited_len = proglen;
1182 if (!image || !prog->is_func || extra_pass) {
1184 bpf_prog_fill_jited_linfo(prog, addrs + 1);
1188 prog->aux->jit_data = NULL;
1192 bpf_jit_prog_release_other(prog, prog == orig_prog ?