Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
[sfrench/cifs-2.6.git] / arch / x86 / net / bpf_jit_comp.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * bpf_jit_comp.c: BPF JIT compiler
4  *
5  * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com)
6  * Internal BPF Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
7  */
8 #include <linux/netdevice.h>
9 #include <linux/filter.h>
10 #include <linux/if_vlan.h>
11 #include <linux/bpf.h>
12 #include <linux/memory.h>
13 #include <linux/sort.h>
14 #include <asm/extable.h>
15 #include <asm/set_memory.h>
16 #include <asm/nospec-branch.h>
17 #include <asm/text-patching.h>
18 #include <asm/asm-prototypes.h>
19
20 static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
21 {
22         if (len == 1)
23                 *ptr = bytes;
24         else if (len == 2)
25                 *(u16 *)ptr = bytes;
26         else {
27                 *(u32 *)ptr = bytes;
28                 barrier();
29         }
30         return ptr + len;
31 }
32
33 #define EMIT(bytes, len) \
34         do { prog = emit_code(prog, bytes, len); } while (0)
35
36 #define EMIT1(b1)               EMIT(b1, 1)
37 #define EMIT2(b1, b2)           EMIT((b1) + ((b2) << 8), 2)
38 #define EMIT3(b1, b2, b3)       EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
39 #define EMIT4(b1, b2, b3, b4)   EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
40
41 #define EMIT1_off32(b1, off) \
42         do { EMIT1(b1); EMIT(off, 4); } while (0)
43 #define EMIT2_off32(b1, b2, off) \
44         do { EMIT2(b1, b2); EMIT(off, 4); } while (0)
45 #define EMIT3_off32(b1, b2, b3, off) \
46         do { EMIT3(b1, b2, b3); EMIT(off, 4); } while (0)
47 #define EMIT4_off32(b1, b2, b3, b4, off) \
48         do { EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0)
49
50 static bool is_imm8(int value)
51 {
52         return value <= 127 && value >= -128;
53 }
54
55 static bool is_simm32(s64 value)
56 {
57         return value == (s64)(s32)value;
58 }
59
60 static bool is_uimm32(u64 value)
61 {
62         return value == (u64)(u32)value;
63 }
64
65 /* mov dst, src */
66 #define EMIT_mov(DST, SRC)                                                               \
67         do {                                                                             \
68                 if (DST != SRC)                                                          \
69                         EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \
70         } while (0)
71
72 static int bpf_size_to_x86_bytes(int bpf_size)
73 {
74         if (bpf_size == BPF_W)
75                 return 4;
76         else if (bpf_size == BPF_H)
77                 return 2;
78         else if (bpf_size == BPF_B)
79                 return 1;
80         else if (bpf_size == BPF_DW)
81                 return 4; /* imm32 */
82         else
83                 return 0;
84 }
85
86 /*
87  * List of x86 cond jumps opcodes (. + s8)
88  * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32)
89  */
90 #define X86_JB  0x72
91 #define X86_JAE 0x73
92 #define X86_JE  0x74
93 #define X86_JNE 0x75
94 #define X86_JBE 0x76
95 #define X86_JA  0x77
96 #define X86_JL  0x7C
97 #define X86_JGE 0x7D
98 #define X86_JLE 0x7E
99 #define X86_JG  0x7F
100
101 /* Pick a register outside of BPF range for JIT internal work */
102 #define AUX_REG (MAX_BPF_JIT_REG + 1)
103 #define X86_REG_R9 (MAX_BPF_JIT_REG + 2)
104
105 /*
106  * The following table maps BPF registers to x86-64 registers.
107  *
108  * x86-64 register R12 is unused, since if used as base address
109  * register in load/store instructions, it always needs an
110  * extra byte of encoding and is callee saved.
111  *
112  * x86-64 register R9 is not used by BPF programs, but can be used by BPF
113  * trampoline. x86-64 register R10 is used for blinding (if enabled).
114  */
115 static const int reg2hex[] = {
116         [BPF_REG_0] = 0,  /* RAX */
117         [BPF_REG_1] = 7,  /* RDI */
118         [BPF_REG_2] = 6,  /* RSI */
119         [BPF_REG_3] = 2,  /* RDX */
120         [BPF_REG_4] = 1,  /* RCX */
121         [BPF_REG_5] = 0,  /* R8  */
122         [BPF_REG_6] = 3,  /* RBX callee saved */
123         [BPF_REG_7] = 5,  /* R13 callee saved */
124         [BPF_REG_8] = 6,  /* R14 callee saved */
125         [BPF_REG_9] = 7,  /* R15 callee saved */
126         [BPF_REG_FP] = 5, /* RBP readonly */
127         [BPF_REG_AX] = 2, /* R10 temp register */
128         [AUX_REG] = 3,    /* R11 temp register */
129         [X86_REG_R9] = 1, /* R9 register, 6th function argument */
130 };
131
132 static const int reg2pt_regs[] = {
133         [BPF_REG_0] = offsetof(struct pt_regs, ax),
134         [BPF_REG_1] = offsetof(struct pt_regs, di),
135         [BPF_REG_2] = offsetof(struct pt_regs, si),
136         [BPF_REG_3] = offsetof(struct pt_regs, dx),
137         [BPF_REG_4] = offsetof(struct pt_regs, cx),
138         [BPF_REG_5] = offsetof(struct pt_regs, r8),
139         [BPF_REG_6] = offsetof(struct pt_regs, bx),
140         [BPF_REG_7] = offsetof(struct pt_regs, r13),
141         [BPF_REG_8] = offsetof(struct pt_regs, r14),
142         [BPF_REG_9] = offsetof(struct pt_regs, r15),
143 };
144
145 /*
146  * is_ereg() == true if BPF register 'reg' maps to x86-64 r8..r15
147  * which need extra byte of encoding.
148  * rax,rcx,...,rbp have simpler encoding
149  */
150 static bool is_ereg(u32 reg)
151 {
152         return (1 << reg) & (BIT(BPF_REG_5) |
153                              BIT(AUX_REG) |
154                              BIT(BPF_REG_7) |
155                              BIT(BPF_REG_8) |
156                              BIT(BPF_REG_9) |
157                              BIT(X86_REG_R9) |
158                              BIT(BPF_REG_AX));
159 }
160
161 /*
162  * is_ereg_8l() == true if BPF register 'reg' is mapped to access x86-64
163  * lower 8-bit registers dil,sil,bpl,spl,r8b..r15b, which need extra byte
164  * of encoding. al,cl,dl,bl have simpler encoding.
165  */
166 static bool is_ereg_8l(u32 reg)
167 {
168         return is_ereg(reg) ||
169             (1 << reg) & (BIT(BPF_REG_1) |
170                           BIT(BPF_REG_2) |
171                           BIT(BPF_REG_FP));
172 }
173
174 static bool is_axreg(u32 reg)
175 {
176         return reg == BPF_REG_0;
177 }
178
179 /* Add modifiers if 'reg' maps to x86-64 registers R8..R15 */
180 static u8 add_1mod(u8 byte, u32 reg)
181 {
182         if (is_ereg(reg))
183                 byte |= 1;
184         return byte;
185 }
186
187 static u8 add_2mod(u8 byte, u32 r1, u32 r2)
188 {
189         if (is_ereg(r1))
190                 byte |= 1;
191         if (is_ereg(r2))
192                 byte |= 4;
193         return byte;
194 }
195
196 /* Encode 'dst_reg' register into x86-64 opcode 'byte' */
197 static u8 add_1reg(u8 byte, u32 dst_reg)
198 {
199         return byte + reg2hex[dst_reg];
200 }
201
202 /* Encode 'dst_reg' and 'src_reg' registers into x86-64 opcode 'byte' */
203 static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
204 {
205         return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3);
206 }
207
208 /* Some 1-byte opcodes for binary ALU operations */
209 static u8 simple_alu_opcodes[] = {
210         [BPF_ADD] = 0x01,
211         [BPF_SUB] = 0x29,
212         [BPF_AND] = 0x21,
213         [BPF_OR] = 0x09,
214         [BPF_XOR] = 0x31,
215         [BPF_LSH] = 0xE0,
216         [BPF_RSH] = 0xE8,
217         [BPF_ARSH] = 0xF8,
218 };
219
220 static void jit_fill_hole(void *area, unsigned int size)
221 {
222         /* Fill whole space with INT3 instructions */
223         memset(area, 0xcc, size);
224 }
225
226 struct jit_context {
227         int cleanup_addr; /* Epilogue code offset */
228 };
229
230 /* Maximum number of bytes emitted while JITing one eBPF insn */
231 #define BPF_MAX_INSN_SIZE       128
232 #define BPF_INSN_SAFETY         64
233
234 /* Number of bytes emit_patch() needs to generate instructions */
235 #define X86_PATCH_SIZE          5
236 /* Number of bytes that will be skipped on tailcall */
237 #define X86_TAIL_CALL_OFFSET    11
238
239 static void push_callee_regs(u8 **pprog, bool *callee_regs_used)
240 {
241         u8 *prog = *pprog;
242
243         if (callee_regs_used[0])
244                 EMIT1(0x53);         /* push rbx */
245         if (callee_regs_used[1])
246                 EMIT2(0x41, 0x55);   /* push r13 */
247         if (callee_regs_used[2])
248                 EMIT2(0x41, 0x56);   /* push r14 */
249         if (callee_regs_used[3])
250                 EMIT2(0x41, 0x57);   /* push r15 */
251         *pprog = prog;
252 }
253
254 static void pop_callee_regs(u8 **pprog, bool *callee_regs_used)
255 {
256         u8 *prog = *pprog;
257
258         if (callee_regs_used[3])
259                 EMIT2(0x41, 0x5F);   /* pop r15 */
260         if (callee_regs_used[2])
261                 EMIT2(0x41, 0x5E);   /* pop r14 */
262         if (callee_regs_used[1])
263                 EMIT2(0x41, 0x5D);   /* pop r13 */
264         if (callee_regs_used[0])
265                 EMIT1(0x5B);         /* pop rbx */
266         *pprog = prog;
267 }
268
269 /*
270  * Emit x86-64 prologue code for BPF program.
271  * bpf_tail_call helper will skip the first X86_TAIL_CALL_OFFSET bytes
272  * while jumping to another program
273  */
274 static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf,
275                           bool tail_call_reachable, bool is_subprog)
276 {
277         u8 *prog = *pprog;
278
279         /* BPF trampoline can be made to work without these nops,
280          * but let's waste 5 bytes for now and optimize later
281          */
282         memcpy(prog, x86_nops[5], X86_PATCH_SIZE);
283         prog += X86_PATCH_SIZE;
284         if (!ebpf_from_cbpf) {
285                 if (tail_call_reachable && !is_subprog)
286                         EMIT2(0x31, 0xC0); /* xor eax, eax */
287                 else
288                         EMIT2(0x66, 0x90); /* nop2 */
289         }
290         EMIT1(0x55);             /* push rbp */
291         EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
292         /* sub rsp, rounded_stack_depth */
293         if (stack_depth)
294                 EMIT3_off32(0x48, 0x81, 0xEC, round_up(stack_depth, 8));
295         if (tail_call_reachable)
296                 EMIT1(0x50);         /* push rax */
297         *pprog = prog;
298 }
299
300 static int emit_patch(u8 **pprog, void *func, void *ip, u8 opcode)
301 {
302         u8 *prog = *pprog;
303         s64 offset;
304
305         offset = func - (ip + X86_PATCH_SIZE);
306         if (!is_simm32(offset)) {
307                 pr_err("Target call %p is out of range\n", func);
308                 return -ERANGE;
309         }
310         EMIT1_off32(opcode, offset);
311         *pprog = prog;
312         return 0;
313 }
314
315 static int emit_call(u8 **pprog, void *func, void *ip)
316 {
317         return emit_patch(pprog, func, ip, 0xE8);
318 }
319
320 static int emit_jump(u8 **pprog, void *func, void *ip)
321 {
322         return emit_patch(pprog, func, ip, 0xE9);
323 }
324
325 static int __bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
326                                 void *old_addr, void *new_addr,
327                                 const bool text_live)
328 {
329         const u8 *nop_insn = x86_nops[5];
330         u8 old_insn[X86_PATCH_SIZE];
331         u8 new_insn[X86_PATCH_SIZE];
332         u8 *prog;
333         int ret;
334
335         memcpy(old_insn, nop_insn, X86_PATCH_SIZE);
336         if (old_addr) {
337                 prog = old_insn;
338                 ret = t == BPF_MOD_CALL ?
339                       emit_call(&prog, old_addr, ip) :
340                       emit_jump(&prog, old_addr, ip);
341                 if (ret)
342                         return ret;
343         }
344
345         memcpy(new_insn, nop_insn, X86_PATCH_SIZE);
346         if (new_addr) {
347                 prog = new_insn;
348                 ret = t == BPF_MOD_CALL ?
349                       emit_call(&prog, new_addr, ip) :
350                       emit_jump(&prog, new_addr, ip);
351                 if (ret)
352                         return ret;
353         }
354
355         ret = -EBUSY;
356         mutex_lock(&text_mutex);
357         if (memcmp(ip, old_insn, X86_PATCH_SIZE))
358                 goto out;
359         ret = 1;
360         if (memcmp(ip, new_insn, X86_PATCH_SIZE)) {
361                 if (text_live)
362                         text_poke_bp(ip, new_insn, X86_PATCH_SIZE, NULL);
363                 else
364                         memcpy(ip, new_insn, X86_PATCH_SIZE);
365                 ret = 0;
366         }
367 out:
368         mutex_unlock(&text_mutex);
369         return ret;
370 }
371
372 int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
373                        void *old_addr, void *new_addr)
374 {
375         if (!is_kernel_text((long)ip) &&
376             !is_bpf_text_address((long)ip))
377                 /* BPF poking in modules is not supported */
378                 return -EINVAL;
379
380         return __bpf_arch_text_poke(ip, t, old_addr, new_addr, true);
381 }
382
383 static int get_pop_bytes(bool *callee_regs_used)
384 {
385         int bytes = 0;
386
387         if (callee_regs_used[3])
388                 bytes += 2;
389         if (callee_regs_used[2])
390                 bytes += 2;
391         if (callee_regs_used[1])
392                 bytes += 2;
393         if (callee_regs_used[0])
394                 bytes += 1;
395
396         return bytes;
397 }
398
399 /*
400  * Generate the following code:
401  *
402  * ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ...
403  *   if (index >= array->map.max_entries)
404  *     goto out;
405  *   if (++tail_call_cnt > MAX_TAIL_CALL_CNT)
406  *     goto out;
407  *   prog = array->ptrs[index];
408  *   if (prog == NULL)
409  *     goto out;
410  *   goto *(prog->bpf_func + prologue_size);
411  * out:
412  */
413 static void emit_bpf_tail_call_indirect(u8 **pprog, bool *callee_regs_used,
414                                         u32 stack_depth)
415 {
416         int tcc_off = -4 - round_up(stack_depth, 8);
417         u8 *prog = *pprog;
418         int pop_bytes = 0;
419         int off1 = 42;
420         int off2 = 31;
421         int off3 = 9;
422
423         /* count the additional bytes used for popping callee regs from stack
424          * that need to be taken into account for each of the offsets that
425          * are used for bailing out of the tail call
426          */
427         pop_bytes = get_pop_bytes(callee_regs_used);
428         off1 += pop_bytes;
429         off2 += pop_bytes;
430         off3 += pop_bytes;
431
432         if (stack_depth) {
433                 off1 += 7;
434                 off2 += 7;
435                 off3 += 7;
436         }
437
438         /*
439          * rdi - pointer to ctx
440          * rsi - pointer to bpf_array
441          * rdx - index in bpf_array
442          */
443
444         /*
445          * if (index >= array->map.max_entries)
446          *      goto out;
447          */
448         EMIT2(0x89, 0xD2);                        /* mov edx, edx */
449         EMIT3(0x39, 0x56,                         /* cmp dword ptr [rsi + 16], edx */
450               offsetof(struct bpf_array, map.max_entries));
451 #define OFFSET1 (off1 + RETPOLINE_RCX_BPF_JIT_SIZE) /* Number of bytes to jump */
452         EMIT2(X86_JBE, OFFSET1);                  /* jbe out */
453
454         /*
455          * if (tail_call_cnt > MAX_TAIL_CALL_CNT)
456          *      goto out;
457          */
458         EMIT2_off32(0x8B, 0x85, tcc_off);         /* mov eax, dword ptr [rbp - tcc_off] */
459         EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT);     /* cmp eax, MAX_TAIL_CALL_CNT */
460 #define OFFSET2 (off2 + RETPOLINE_RCX_BPF_JIT_SIZE)
461         EMIT2(X86_JA, OFFSET2);                   /* ja out */
462         EMIT3(0x83, 0xC0, 0x01);                  /* add eax, 1 */
463         EMIT2_off32(0x89, 0x85, tcc_off);         /* mov dword ptr [rbp - tcc_off], eax */
464
465         /* prog = array->ptrs[index]; */
466         EMIT4_off32(0x48, 0x8B, 0x8C, 0xD6,       /* mov rcx, [rsi + rdx * 8 + offsetof(...)] */
467                     offsetof(struct bpf_array, ptrs));
468
469         /*
470          * if (prog == NULL)
471          *      goto out;
472          */
473         EMIT3(0x48, 0x85, 0xC9);                  /* test rcx,rcx */
474 #define OFFSET3 (off3 + RETPOLINE_RCX_BPF_JIT_SIZE)
475         EMIT2(X86_JE, OFFSET3);                   /* je out */
476
477         *pprog = prog;
478         pop_callee_regs(pprog, callee_regs_used);
479         prog = *pprog;
480
481         EMIT1(0x58);                              /* pop rax */
482         if (stack_depth)
483                 EMIT3_off32(0x48, 0x81, 0xC4,     /* add rsp, sd */
484                             round_up(stack_depth, 8));
485
486         /* goto *(prog->bpf_func + X86_TAIL_CALL_OFFSET); */
487         EMIT4(0x48, 0x8B, 0x49,                   /* mov rcx, qword ptr [rcx + 32] */
488               offsetof(struct bpf_prog, bpf_func));
489         EMIT4(0x48, 0x83, 0xC1,                   /* add rcx, X86_TAIL_CALL_OFFSET */
490               X86_TAIL_CALL_OFFSET);
491         /*
492          * Now we're ready to jump into next BPF program
493          * rdi == ctx (1st arg)
494          * rcx == prog->bpf_func + X86_TAIL_CALL_OFFSET
495          */
496         RETPOLINE_RCX_BPF_JIT();
497
498         /* out: */
499         *pprog = prog;
500 }
501
502 static void emit_bpf_tail_call_direct(struct bpf_jit_poke_descriptor *poke,
503                                       u8 **pprog, int addr, u8 *image,
504                                       bool *callee_regs_used, u32 stack_depth)
505 {
506         int tcc_off = -4 - round_up(stack_depth, 8);
507         u8 *prog = *pprog;
508         int pop_bytes = 0;
509         int off1 = 20;
510         int poke_off;
511
512         /* count the additional bytes used for popping callee regs to stack
513          * that need to be taken into account for jump offset that is used for
514          * bailing out from of the tail call when limit is reached
515          */
516         pop_bytes = get_pop_bytes(callee_regs_used);
517         off1 += pop_bytes;
518
519         /*
520          * total bytes for:
521          * - nop5/ jmpq $off
522          * - pop callee regs
523          * - sub rsp, $val if depth > 0
524          * - pop rax
525          */
526         poke_off = X86_PATCH_SIZE + pop_bytes + 1;
527         if (stack_depth) {
528                 poke_off += 7;
529                 off1 += 7;
530         }
531
532         /*
533          * if (tail_call_cnt > MAX_TAIL_CALL_CNT)
534          *      goto out;
535          */
536         EMIT2_off32(0x8B, 0x85, tcc_off);             /* mov eax, dword ptr [rbp - tcc_off] */
537         EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT);         /* cmp eax, MAX_TAIL_CALL_CNT */
538         EMIT2(X86_JA, off1);                          /* ja out */
539         EMIT3(0x83, 0xC0, 0x01);                      /* add eax, 1 */
540         EMIT2_off32(0x89, 0x85, tcc_off);             /* mov dword ptr [rbp - tcc_off], eax */
541
542         poke->tailcall_bypass = image + (addr - poke_off - X86_PATCH_SIZE);
543         poke->adj_off = X86_TAIL_CALL_OFFSET;
544         poke->tailcall_target = image + (addr - X86_PATCH_SIZE);
545         poke->bypass_addr = (u8 *)poke->tailcall_target + X86_PATCH_SIZE;
546
547         emit_jump(&prog, (u8 *)poke->tailcall_target + X86_PATCH_SIZE,
548                   poke->tailcall_bypass);
549
550         *pprog = prog;
551         pop_callee_regs(pprog, callee_regs_used);
552         prog = *pprog;
553         EMIT1(0x58);                                  /* pop rax */
554         if (stack_depth)
555                 EMIT3_off32(0x48, 0x81, 0xC4, round_up(stack_depth, 8));
556
557         memcpy(prog, x86_nops[5], X86_PATCH_SIZE);
558         prog += X86_PATCH_SIZE;
559         /* out: */
560
561         *pprog = prog;
562 }
563
564 static void bpf_tail_call_direct_fixup(struct bpf_prog *prog)
565 {
566         struct bpf_jit_poke_descriptor *poke;
567         struct bpf_array *array;
568         struct bpf_prog *target;
569         int i, ret;
570
571         for (i = 0; i < prog->aux->size_poke_tab; i++) {
572                 poke = &prog->aux->poke_tab[i];
573                 if (poke->aux && poke->aux != prog->aux)
574                         continue;
575
576                 WARN_ON_ONCE(READ_ONCE(poke->tailcall_target_stable));
577
578                 if (poke->reason != BPF_POKE_REASON_TAIL_CALL)
579                         continue;
580
581                 array = container_of(poke->tail_call.map, struct bpf_array, map);
582                 mutex_lock(&array->aux->poke_mutex);
583                 target = array->ptrs[poke->tail_call.key];
584                 if (target) {
585                         /* Plain memcpy is used when image is not live yet
586                          * and still not locked as read-only. Once poke
587                          * location is active (poke->tailcall_target_stable),
588                          * any parallel bpf_arch_text_poke() might occur
589                          * still on the read-write image until we finally
590                          * locked it as read-only. Both modifications on
591                          * the given image are under text_mutex to avoid
592                          * interference.
593                          */
594                         ret = __bpf_arch_text_poke(poke->tailcall_target,
595                                                    BPF_MOD_JUMP, NULL,
596                                                    (u8 *)target->bpf_func +
597                                                    poke->adj_off, false);
598                         BUG_ON(ret < 0);
599                         ret = __bpf_arch_text_poke(poke->tailcall_bypass,
600                                                    BPF_MOD_JUMP,
601                                                    (u8 *)poke->tailcall_target +
602                                                    X86_PATCH_SIZE, NULL, false);
603                         BUG_ON(ret < 0);
604                 }
605                 WRITE_ONCE(poke->tailcall_target_stable, true);
606                 mutex_unlock(&array->aux->poke_mutex);
607         }
608 }
609
610 static void emit_mov_imm32(u8 **pprog, bool sign_propagate,
611                            u32 dst_reg, const u32 imm32)
612 {
613         u8 *prog = *pprog;
614         u8 b1, b2, b3;
615
616         /*
617          * Optimization: if imm32 is positive, use 'mov %eax, imm32'
618          * (which zero-extends imm32) to save 2 bytes.
619          */
620         if (sign_propagate && (s32)imm32 < 0) {
621                 /* 'mov %rax, imm32' sign extends imm32 */
622                 b1 = add_1mod(0x48, dst_reg);
623                 b2 = 0xC7;
624                 b3 = 0xC0;
625                 EMIT3_off32(b1, b2, add_1reg(b3, dst_reg), imm32);
626                 goto done;
627         }
628
629         /*
630          * Optimization: if imm32 is zero, use 'xor %eax, %eax'
631          * to save 3 bytes.
632          */
633         if (imm32 == 0) {
634                 if (is_ereg(dst_reg))
635                         EMIT1(add_2mod(0x40, dst_reg, dst_reg));
636                 b2 = 0x31; /* xor */
637                 b3 = 0xC0;
638                 EMIT2(b2, add_2reg(b3, dst_reg, dst_reg));
639                 goto done;
640         }
641
642         /* mov %eax, imm32 */
643         if (is_ereg(dst_reg))
644                 EMIT1(add_1mod(0x40, dst_reg));
645         EMIT1_off32(add_1reg(0xB8, dst_reg), imm32);
646 done:
647         *pprog = prog;
648 }
649
650 static void emit_mov_imm64(u8 **pprog, u32 dst_reg,
651                            const u32 imm32_hi, const u32 imm32_lo)
652 {
653         u8 *prog = *pprog;
654
655         if (is_uimm32(((u64)imm32_hi << 32) | (u32)imm32_lo)) {
656                 /*
657                  * For emitting plain u32, where sign bit must not be
658                  * propagated LLVM tends to load imm64 over mov32
659                  * directly, so save couple of bytes by just doing
660                  * 'mov %eax, imm32' instead.
661                  */
662                 emit_mov_imm32(&prog, false, dst_reg, imm32_lo);
663         } else {
664                 /* movabsq %rax, imm64 */
665                 EMIT2(add_1mod(0x48, dst_reg), add_1reg(0xB8, dst_reg));
666                 EMIT(imm32_lo, 4);
667                 EMIT(imm32_hi, 4);
668         }
669
670         *pprog = prog;
671 }
672
673 static void emit_mov_reg(u8 **pprog, bool is64, u32 dst_reg, u32 src_reg)
674 {
675         u8 *prog = *pprog;
676
677         if (is64) {
678                 /* mov dst, src */
679                 EMIT_mov(dst_reg, src_reg);
680         } else {
681                 /* mov32 dst, src */
682                 if (is_ereg(dst_reg) || is_ereg(src_reg))
683                         EMIT1(add_2mod(0x40, dst_reg, src_reg));
684                 EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg));
685         }
686
687         *pprog = prog;
688 }
689
690 /* Emit the suffix (ModR/M etc) for addressing *(ptr_reg + off) and val_reg */
691 static void emit_insn_suffix(u8 **pprog, u32 ptr_reg, u32 val_reg, int off)
692 {
693         u8 *prog = *pprog;
694
695         if (is_imm8(off)) {
696                 /* 1-byte signed displacement.
697                  *
698                  * If off == 0 we could skip this and save one extra byte, but
699                  * special case of x86 R13 which always needs an offset is not
700                  * worth the hassle
701                  */
702                 EMIT2(add_2reg(0x40, ptr_reg, val_reg), off);
703         } else {
704                 /* 4-byte signed displacement */
705                 EMIT1_off32(add_2reg(0x80, ptr_reg, val_reg), off);
706         }
707         *pprog = prog;
708 }
709
710 /*
711  * Emit a REX byte if it will be necessary to address these registers
712  */
713 static void maybe_emit_mod(u8 **pprog, u32 dst_reg, u32 src_reg, bool is64)
714 {
715         u8 *prog = *pprog;
716
717         if (is64)
718                 EMIT1(add_2mod(0x48, dst_reg, src_reg));
719         else if (is_ereg(dst_reg) || is_ereg(src_reg))
720                 EMIT1(add_2mod(0x40, dst_reg, src_reg));
721         *pprog = prog;
722 }
723
724 /* LDX: dst_reg = *(u8*)(src_reg + off) */
725 static void emit_ldx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
726 {
727         u8 *prog = *pprog;
728
729         switch (size) {
730         case BPF_B:
731                 /* Emit 'movzx rax, byte ptr [rax + off]' */
732                 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6);
733                 break;
734         case BPF_H:
735                 /* Emit 'movzx rax, word ptr [rax + off]' */
736                 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7);
737                 break;
738         case BPF_W:
739                 /* Emit 'mov eax, dword ptr [rax+0x14]' */
740                 if (is_ereg(dst_reg) || is_ereg(src_reg))
741                         EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B);
742                 else
743                         EMIT1(0x8B);
744                 break;
745         case BPF_DW:
746                 /* Emit 'mov rax, qword ptr [rax+0x14]' */
747                 EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B);
748                 break;
749         }
750         emit_insn_suffix(&prog, src_reg, dst_reg, off);
751         *pprog = prog;
752 }
753
754 /* STX: *(u8*)(dst_reg + off) = src_reg */
755 static void emit_stx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
756 {
757         u8 *prog = *pprog;
758
759         switch (size) {
760         case BPF_B:
761                 /* Emit 'mov byte ptr [rax + off], al' */
762                 if (is_ereg(dst_reg) || is_ereg_8l(src_reg))
763                         /* Add extra byte for eregs or SIL,DIL,BPL in src_reg */
764                         EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88);
765                 else
766                         EMIT1(0x88);
767                 break;
768         case BPF_H:
769                 if (is_ereg(dst_reg) || is_ereg(src_reg))
770                         EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89);
771                 else
772                         EMIT2(0x66, 0x89);
773                 break;
774         case BPF_W:
775                 if (is_ereg(dst_reg) || is_ereg(src_reg))
776                         EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89);
777                 else
778                         EMIT1(0x89);
779                 break;
780         case BPF_DW:
781                 EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89);
782                 break;
783         }
784         emit_insn_suffix(&prog, dst_reg, src_reg, off);
785         *pprog = prog;
786 }
787
788 static int emit_atomic(u8 **pprog, u8 atomic_op,
789                        u32 dst_reg, u32 src_reg, s16 off, u8 bpf_size)
790 {
791         u8 *prog = *pprog;
792
793         EMIT1(0xF0); /* lock prefix */
794
795         maybe_emit_mod(&prog, dst_reg, src_reg, bpf_size == BPF_DW);
796
797         /* emit opcode */
798         switch (atomic_op) {
799         case BPF_ADD:
800         case BPF_SUB:
801         case BPF_AND:
802         case BPF_OR:
803         case BPF_XOR:
804                 /* lock *(u32/u64*)(dst_reg + off) <op>= src_reg */
805                 EMIT1(simple_alu_opcodes[atomic_op]);
806                 break;
807         case BPF_ADD | BPF_FETCH:
808                 /* src_reg = atomic_fetch_add(dst_reg + off, src_reg); */
809                 EMIT2(0x0F, 0xC1);
810                 break;
811         case BPF_XCHG:
812                 /* src_reg = atomic_xchg(dst_reg + off, src_reg); */
813                 EMIT1(0x87);
814                 break;
815         case BPF_CMPXCHG:
816                 /* r0 = atomic_cmpxchg(dst_reg + off, r0, src_reg); */
817                 EMIT2(0x0F, 0xB1);
818                 break;
819         default:
820                 pr_err("bpf_jit: unknown atomic opcode %02x\n", atomic_op);
821                 return -EFAULT;
822         }
823
824         emit_insn_suffix(&prog, dst_reg, src_reg, off);
825
826         *pprog = prog;
827         return 0;
828 }
829
830 static bool ex_handler_bpf(const struct exception_table_entry *x,
831                            struct pt_regs *regs, int trapnr,
832                            unsigned long error_code, unsigned long fault_addr)
833 {
834         u32 reg = x->fixup >> 8;
835
836         /* jump over faulting load and clear dest register */
837         *(unsigned long *)((void *)regs + reg) = 0;
838         regs->ip += x->fixup & 0xff;
839         return true;
840 }
841
842 static void detect_reg_usage(struct bpf_insn *insn, int insn_cnt,
843                              bool *regs_used, bool *tail_call_seen)
844 {
845         int i;
846
847         for (i = 1; i <= insn_cnt; i++, insn++) {
848                 if (insn->code == (BPF_JMP | BPF_TAIL_CALL))
849                         *tail_call_seen = true;
850                 if (insn->dst_reg == BPF_REG_6 || insn->src_reg == BPF_REG_6)
851                         regs_used[0] = true;
852                 if (insn->dst_reg == BPF_REG_7 || insn->src_reg == BPF_REG_7)
853                         regs_used[1] = true;
854                 if (insn->dst_reg == BPF_REG_8 || insn->src_reg == BPF_REG_8)
855                         regs_used[2] = true;
856                 if (insn->dst_reg == BPF_REG_9 || insn->src_reg == BPF_REG_9)
857                         regs_used[3] = true;
858         }
859 }
860
861 static void emit_nops(u8 **pprog, int len)
862 {
863         u8 *prog = *pprog;
864         int i, noplen;
865
866         while (len > 0) {
867                 noplen = len;
868
869                 if (noplen > ASM_NOP_MAX)
870                         noplen = ASM_NOP_MAX;
871
872                 for (i = 0; i < noplen; i++)
873                         EMIT1(x86_nops[noplen][i]);
874                 len -= noplen;
875         }
876
877         *pprog = prog;
878 }
879
880 #define INSN_SZ_DIFF (((addrs[i] - addrs[i - 1]) - (prog - temp)))
881
882 static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
883                   int oldproglen, struct jit_context *ctx, bool jmp_padding)
884 {
885         bool tail_call_reachable = bpf_prog->aux->tail_call_reachable;
886         struct bpf_insn *insn = bpf_prog->insnsi;
887         bool callee_regs_used[4] = {};
888         int insn_cnt = bpf_prog->len;
889         bool tail_call_seen = false;
890         bool seen_exit = false;
891         u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY];
892         int i, excnt = 0;
893         int ilen, proglen = 0;
894         u8 *prog = temp;
895         int err;
896
897         detect_reg_usage(insn, insn_cnt, callee_regs_used,
898                          &tail_call_seen);
899
900         /* tail call's presence in current prog implies it is reachable */
901         tail_call_reachable |= tail_call_seen;
902
903         emit_prologue(&prog, bpf_prog->aux->stack_depth,
904                       bpf_prog_was_classic(bpf_prog), tail_call_reachable,
905                       bpf_prog->aux->func_idx != 0);
906         push_callee_regs(&prog, callee_regs_used);
907
908         ilen = prog - temp;
909         if (image)
910                 memcpy(image + proglen, temp, ilen);
911         proglen += ilen;
912         addrs[0] = proglen;
913         prog = temp;
914
915         for (i = 1; i <= insn_cnt; i++, insn++) {
916                 const s32 imm32 = insn->imm;
917                 u32 dst_reg = insn->dst_reg;
918                 u32 src_reg = insn->src_reg;
919                 u8 b2 = 0, b3 = 0;
920                 u8 *start_of_ldx;
921                 s64 jmp_offset;
922                 u8 jmp_cond;
923                 u8 *func;
924                 int nops;
925
926                 switch (insn->code) {
927                         /* ALU */
928                 case BPF_ALU | BPF_ADD | BPF_X:
929                 case BPF_ALU | BPF_SUB | BPF_X:
930                 case BPF_ALU | BPF_AND | BPF_X:
931                 case BPF_ALU | BPF_OR | BPF_X:
932                 case BPF_ALU | BPF_XOR | BPF_X:
933                 case BPF_ALU64 | BPF_ADD | BPF_X:
934                 case BPF_ALU64 | BPF_SUB | BPF_X:
935                 case BPF_ALU64 | BPF_AND | BPF_X:
936                 case BPF_ALU64 | BPF_OR | BPF_X:
937                 case BPF_ALU64 | BPF_XOR | BPF_X:
938                         maybe_emit_mod(&prog, dst_reg, src_reg,
939                                        BPF_CLASS(insn->code) == BPF_ALU64);
940                         b2 = simple_alu_opcodes[BPF_OP(insn->code)];
941                         EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg));
942                         break;
943
944                 case BPF_ALU64 | BPF_MOV | BPF_X:
945                 case BPF_ALU | BPF_MOV | BPF_X:
946                         emit_mov_reg(&prog,
947                                      BPF_CLASS(insn->code) == BPF_ALU64,
948                                      dst_reg, src_reg);
949                         break;
950
951                         /* neg dst */
952                 case BPF_ALU | BPF_NEG:
953                 case BPF_ALU64 | BPF_NEG:
954                         if (BPF_CLASS(insn->code) == BPF_ALU64)
955                                 EMIT1(add_1mod(0x48, dst_reg));
956                         else if (is_ereg(dst_reg))
957                                 EMIT1(add_1mod(0x40, dst_reg));
958                         EMIT2(0xF7, add_1reg(0xD8, dst_reg));
959                         break;
960
961                 case BPF_ALU | BPF_ADD | BPF_K:
962                 case BPF_ALU | BPF_SUB | BPF_K:
963                 case BPF_ALU | BPF_AND | BPF_K:
964                 case BPF_ALU | BPF_OR | BPF_K:
965                 case BPF_ALU | BPF_XOR | BPF_K:
966                 case BPF_ALU64 | BPF_ADD | BPF_K:
967                 case BPF_ALU64 | BPF_SUB | BPF_K:
968                 case BPF_ALU64 | BPF_AND | BPF_K:
969                 case BPF_ALU64 | BPF_OR | BPF_K:
970                 case BPF_ALU64 | BPF_XOR | BPF_K:
971                         if (BPF_CLASS(insn->code) == BPF_ALU64)
972                                 EMIT1(add_1mod(0x48, dst_reg));
973                         else if (is_ereg(dst_reg))
974                                 EMIT1(add_1mod(0x40, dst_reg));
975
976                         /*
977                          * b3 holds 'normal' opcode, b2 short form only valid
978                          * in case dst is eax/rax.
979                          */
980                         switch (BPF_OP(insn->code)) {
981                         case BPF_ADD:
982                                 b3 = 0xC0;
983                                 b2 = 0x05;
984                                 break;
985                         case BPF_SUB:
986                                 b3 = 0xE8;
987                                 b2 = 0x2D;
988                                 break;
989                         case BPF_AND:
990                                 b3 = 0xE0;
991                                 b2 = 0x25;
992                                 break;
993                         case BPF_OR:
994                                 b3 = 0xC8;
995                                 b2 = 0x0D;
996                                 break;
997                         case BPF_XOR:
998                                 b3 = 0xF0;
999                                 b2 = 0x35;
1000                                 break;
1001                         }
1002
1003                         if (is_imm8(imm32))
1004                                 EMIT3(0x83, add_1reg(b3, dst_reg), imm32);
1005                         else if (is_axreg(dst_reg))
1006                                 EMIT1_off32(b2, imm32);
1007                         else
1008                                 EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32);
1009                         break;
1010
1011                 case BPF_ALU64 | BPF_MOV | BPF_K:
1012                 case BPF_ALU | BPF_MOV | BPF_K:
1013                         emit_mov_imm32(&prog, BPF_CLASS(insn->code) == BPF_ALU64,
1014                                        dst_reg, imm32);
1015                         break;
1016
1017                 case BPF_LD | BPF_IMM | BPF_DW:
1018                         emit_mov_imm64(&prog, dst_reg, insn[1].imm, insn[0].imm);
1019                         insn++;
1020                         i++;
1021                         break;
1022
1023                         /* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */
1024                 case BPF_ALU | BPF_MOD | BPF_X:
1025                 case BPF_ALU | BPF_DIV | BPF_X:
1026                 case BPF_ALU | BPF_MOD | BPF_K:
1027                 case BPF_ALU | BPF_DIV | BPF_K:
1028                 case BPF_ALU64 | BPF_MOD | BPF_X:
1029                 case BPF_ALU64 | BPF_DIV | BPF_X:
1030                 case BPF_ALU64 | BPF_MOD | BPF_K:
1031                 case BPF_ALU64 | BPF_DIV | BPF_K:
1032                         EMIT1(0x50); /* push rax */
1033                         EMIT1(0x52); /* push rdx */
1034
1035                         if (BPF_SRC(insn->code) == BPF_X)
1036                                 /* mov r11, src_reg */
1037                                 EMIT_mov(AUX_REG, src_reg);
1038                         else
1039                                 /* mov r11, imm32 */
1040                                 EMIT3_off32(0x49, 0xC7, 0xC3, imm32);
1041
1042                         /* mov rax, dst_reg */
1043                         EMIT_mov(BPF_REG_0, dst_reg);
1044
1045                         /*
1046                          * xor edx, edx
1047                          * equivalent to 'xor rdx, rdx', but one byte less
1048                          */
1049                         EMIT2(0x31, 0xd2);
1050
1051                         if (BPF_CLASS(insn->code) == BPF_ALU64)
1052                                 /* div r11 */
1053                                 EMIT3(0x49, 0xF7, 0xF3);
1054                         else
1055                                 /* div r11d */
1056                                 EMIT3(0x41, 0xF7, 0xF3);
1057
1058                         if (BPF_OP(insn->code) == BPF_MOD)
1059                                 /* mov r11, rdx */
1060                                 EMIT3(0x49, 0x89, 0xD3);
1061                         else
1062                                 /* mov r11, rax */
1063                                 EMIT3(0x49, 0x89, 0xC3);
1064
1065                         EMIT1(0x5A); /* pop rdx */
1066                         EMIT1(0x58); /* pop rax */
1067
1068                         /* mov dst_reg, r11 */
1069                         EMIT_mov(dst_reg, AUX_REG);
1070                         break;
1071
1072                 case BPF_ALU | BPF_MUL | BPF_K:
1073                 case BPF_ALU | BPF_MUL | BPF_X:
1074                 case BPF_ALU64 | BPF_MUL | BPF_K:
1075                 case BPF_ALU64 | BPF_MUL | BPF_X:
1076                 {
1077                         bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
1078
1079                         if (dst_reg != BPF_REG_0)
1080                                 EMIT1(0x50); /* push rax */
1081                         if (dst_reg != BPF_REG_3)
1082                                 EMIT1(0x52); /* push rdx */
1083
1084                         /* mov r11, dst_reg */
1085                         EMIT_mov(AUX_REG, dst_reg);
1086
1087                         if (BPF_SRC(insn->code) == BPF_X)
1088                                 emit_mov_reg(&prog, is64, BPF_REG_0, src_reg);
1089                         else
1090                                 emit_mov_imm32(&prog, is64, BPF_REG_0, imm32);
1091
1092                         if (is64)
1093                                 EMIT1(add_1mod(0x48, AUX_REG));
1094                         else if (is_ereg(AUX_REG))
1095                                 EMIT1(add_1mod(0x40, AUX_REG));
1096                         /* mul(q) r11 */
1097                         EMIT2(0xF7, add_1reg(0xE0, AUX_REG));
1098
1099                         if (dst_reg != BPF_REG_3)
1100                                 EMIT1(0x5A); /* pop rdx */
1101                         if (dst_reg != BPF_REG_0) {
1102                                 /* mov dst_reg, rax */
1103                                 EMIT_mov(dst_reg, BPF_REG_0);
1104                                 EMIT1(0x58); /* pop rax */
1105                         }
1106                         break;
1107                 }
1108                         /* Shifts */
1109                 case BPF_ALU | BPF_LSH | BPF_K:
1110                 case BPF_ALU | BPF_RSH | BPF_K:
1111                 case BPF_ALU | BPF_ARSH | BPF_K:
1112                 case BPF_ALU64 | BPF_LSH | BPF_K:
1113                 case BPF_ALU64 | BPF_RSH | BPF_K:
1114                 case BPF_ALU64 | BPF_ARSH | BPF_K:
1115                         if (BPF_CLASS(insn->code) == BPF_ALU64)
1116                                 EMIT1(add_1mod(0x48, dst_reg));
1117                         else if (is_ereg(dst_reg))
1118                                 EMIT1(add_1mod(0x40, dst_reg));
1119
1120                         b3 = simple_alu_opcodes[BPF_OP(insn->code)];
1121                         if (imm32 == 1)
1122                                 EMIT2(0xD1, add_1reg(b3, dst_reg));
1123                         else
1124                                 EMIT3(0xC1, add_1reg(b3, dst_reg), imm32);
1125                         break;
1126
1127                 case BPF_ALU | BPF_LSH | BPF_X:
1128                 case BPF_ALU | BPF_RSH | BPF_X:
1129                 case BPF_ALU | BPF_ARSH | BPF_X:
1130                 case BPF_ALU64 | BPF_LSH | BPF_X:
1131                 case BPF_ALU64 | BPF_RSH | BPF_X:
1132                 case BPF_ALU64 | BPF_ARSH | BPF_X:
1133
1134                         /* Check for bad case when dst_reg == rcx */
1135                         if (dst_reg == BPF_REG_4) {
1136                                 /* mov r11, dst_reg */
1137                                 EMIT_mov(AUX_REG, dst_reg);
1138                                 dst_reg = AUX_REG;
1139                         }
1140
1141                         if (src_reg != BPF_REG_4) { /* common case */
1142                                 EMIT1(0x51); /* push rcx */
1143
1144                                 /* mov rcx, src_reg */
1145                                 EMIT_mov(BPF_REG_4, src_reg);
1146                         }
1147
1148                         /* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */
1149                         if (BPF_CLASS(insn->code) == BPF_ALU64)
1150                                 EMIT1(add_1mod(0x48, dst_reg));
1151                         else if (is_ereg(dst_reg))
1152                                 EMIT1(add_1mod(0x40, dst_reg));
1153
1154                         b3 = simple_alu_opcodes[BPF_OP(insn->code)];
1155                         EMIT2(0xD3, add_1reg(b3, dst_reg));
1156
1157                         if (src_reg != BPF_REG_4)
1158                                 EMIT1(0x59); /* pop rcx */
1159
1160                         if (insn->dst_reg == BPF_REG_4)
1161                                 /* mov dst_reg, r11 */
1162                                 EMIT_mov(insn->dst_reg, AUX_REG);
1163                         break;
1164
1165                 case BPF_ALU | BPF_END | BPF_FROM_BE:
1166                         switch (imm32) {
1167                         case 16:
1168                                 /* Emit 'ror %ax, 8' to swap lower 2 bytes */
1169                                 EMIT1(0x66);
1170                                 if (is_ereg(dst_reg))
1171                                         EMIT1(0x41);
1172                                 EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8);
1173
1174                                 /* Emit 'movzwl eax, ax' */
1175                                 if (is_ereg(dst_reg))
1176                                         EMIT3(0x45, 0x0F, 0xB7);
1177                                 else
1178                                         EMIT2(0x0F, 0xB7);
1179                                 EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
1180                                 break;
1181                         case 32:
1182                                 /* Emit 'bswap eax' to swap lower 4 bytes */
1183                                 if (is_ereg(dst_reg))
1184                                         EMIT2(0x41, 0x0F);
1185                                 else
1186                                         EMIT1(0x0F);
1187                                 EMIT1(add_1reg(0xC8, dst_reg));
1188                                 break;
1189                         case 64:
1190                                 /* Emit 'bswap rax' to swap 8 bytes */
1191                                 EMIT3(add_1mod(0x48, dst_reg), 0x0F,
1192                                       add_1reg(0xC8, dst_reg));
1193                                 break;
1194                         }
1195                         break;
1196
1197                 case BPF_ALU | BPF_END | BPF_FROM_LE:
1198                         switch (imm32) {
1199                         case 16:
1200                                 /*
1201                                  * Emit 'movzwl eax, ax' to zero extend 16-bit
1202                                  * into 64 bit
1203                                  */
1204                                 if (is_ereg(dst_reg))
1205                                         EMIT3(0x45, 0x0F, 0xB7);
1206                                 else
1207                                         EMIT2(0x0F, 0xB7);
1208                                 EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
1209                                 break;
1210                         case 32:
1211                                 /* Emit 'mov eax, eax' to clear upper 32-bits */
1212                                 if (is_ereg(dst_reg))
1213                                         EMIT1(0x45);
1214                                 EMIT2(0x89, add_2reg(0xC0, dst_reg, dst_reg));
1215                                 break;
1216                         case 64:
1217                                 /* nop */
1218                                 break;
1219                         }
1220                         break;
1221
1222                         /* ST: *(u8*)(dst_reg + off) = imm */
1223                 case BPF_ST | BPF_MEM | BPF_B:
1224                         if (is_ereg(dst_reg))
1225                                 EMIT2(0x41, 0xC6);
1226                         else
1227                                 EMIT1(0xC6);
1228                         goto st;
1229                 case BPF_ST | BPF_MEM | BPF_H:
1230                         if (is_ereg(dst_reg))
1231                                 EMIT3(0x66, 0x41, 0xC7);
1232                         else
1233                                 EMIT2(0x66, 0xC7);
1234                         goto st;
1235                 case BPF_ST | BPF_MEM | BPF_W:
1236                         if (is_ereg(dst_reg))
1237                                 EMIT2(0x41, 0xC7);
1238                         else
1239                                 EMIT1(0xC7);
1240                         goto st;
1241                 case BPF_ST | BPF_MEM | BPF_DW:
1242                         EMIT2(add_1mod(0x48, dst_reg), 0xC7);
1243
1244 st:                     if (is_imm8(insn->off))
1245                                 EMIT2(add_1reg(0x40, dst_reg), insn->off);
1246                         else
1247                                 EMIT1_off32(add_1reg(0x80, dst_reg), insn->off);
1248
1249                         EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(insn->code)));
1250                         break;
1251
1252                         /* STX: *(u8*)(dst_reg + off) = src_reg */
1253                 case BPF_STX | BPF_MEM | BPF_B:
1254                 case BPF_STX | BPF_MEM | BPF_H:
1255                 case BPF_STX | BPF_MEM | BPF_W:
1256                 case BPF_STX | BPF_MEM | BPF_DW:
1257                         emit_stx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
1258                         break;
1259
1260                         /* LDX: dst_reg = *(u8*)(src_reg + off) */
1261                 case BPF_LDX | BPF_MEM | BPF_B:
1262                 case BPF_LDX | BPF_PROBE_MEM | BPF_B:
1263                 case BPF_LDX | BPF_MEM | BPF_H:
1264                 case BPF_LDX | BPF_PROBE_MEM | BPF_H:
1265                 case BPF_LDX | BPF_MEM | BPF_W:
1266                 case BPF_LDX | BPF_PROBE_MEM | BPF_W:
1267                 case BPF_LDX | BPF_MEM | BPF_DW:
1268                 case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
1269                         if (BPF_MODE(insn->code) == BPF_PROBE_MEM) {
1270                                 /* test src_reg, src_reg */
1271                                 maybe_emit_mod(&prog, src_reg, src_reg, true); /* always 1 byte */
1272                                 EMIT2(0x85, add_2reg(0xC0, src_reg, src_reg));
1273                                 /* jne start_of_ldx */
1274                                 EMIT2(X86_JNE, 0);
1275                                 /* xor dst_reg, dst_reg */
1276                                 emit_mov_imm32(&prog, false, dst_reg, 0);
1277                                 /* jmp byte_after_ldx */
1278                                 EMIT2(0xEB, 0);
1279
1280                                 /* populate jmp_offset for JNE above */
1281                                 temp[4] = prog - temp - 5 /* sizeof(test + jne) */;
1282                                 start_of_ldx = prog;
1283                         }
1284                         emit_ldx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
1285                         if (BPF_MODE(insn->code) == BPF_PROBE_MEM) {
1286                                 struct exception_table_entry *ex;
1287                                 u8 *_insn = image + proglen + (start_of_ldx - temp);
1288                                 s64 delta;
1289
1290                                 /* populate jmp_offset for JMP above */
1291                                 start_of_ldx[-1] = prog - start_of_ldx;
1292
1293                                 if (!bpf_prog->aux->extable)
1294                                         break;
1295
1296                                 if (excnt >= bpf_prog->aux->num_exentries) {
1297                                         pr_err("ex gen bug\n");
1298                                         return -EFAULT;
1299                                 }
1300                                 ex = &bpf_prog->aux->extable[excnt++];
1301
1302                                 delta = _insn - (u8 *)&ex->insn;
1303                                 if (!is_simm32(delta)) {
1304                                         pr_err("extable->insn doesn't fit into 32-bit\n");
1305                                         return -EFAULT;
1306                                 }
1307                                 ex->insn = delta;
1308
1309                                 delta = (u8 *)ex_handler_bpf - (u8 *)&ex->handler;
1310                                 if (!is_simm32(delta)) {
1311                                         pr_err("extable->handler doesn't fit into 32-bit\n");
1312                                         return -EFAULT;
1313                                 }
1314                                 ex->handler = delta;
1315
1316                                 if (dst_reg > BPF_REG_9) {
1317                                         pr_err("verifier error\n");
1318                                         return -EFAULT;
1319                                 }
1320                                 /*
1321                                  * Compute size of x86 insn and its target dest x86 register.
1322                                  * ex_handler_bpf() will use lower 8 bits to adjust
1323                                  * pt_regs->ip to jump over this x86 instruction
1324                                  * and upper bits to figure out which pt_regs to zero out.
1325                                  * End result: x86 insn "mov rbx, qword ptr [rax+0x14]"
1326                                  * of 4 bytes will be ignored and rbx will be zero inited.
1327                                  */
1328                                 ex->fixup = (prog - temp) | (reg2pt_regs[dst_reg] << 8);
1329                         }
1330                         break;
1331
1332                 case BPF_STX | BPF_ATOMIC | BPF_W:
1333                 case BPF_STX | BPF_ATOMIC | BPF_DW:
1334                         if (insn->imm == (BPF_AND | BPF_FETCH) ||
1335                             insn->imm == (BPF_OR | BPF_FETCH) ||
1336                             insn->imm == (BPF_XOR | BPF_FETCH)) {
1337                                 u8 *branch_target;
1338                                 bool is64 = BPF_SIZE(insn->code) == BPF_DW;
1339                                 u32 real_src_reg = src_reg;
1340
1341                                 /*
1342                                  * Can't be implemented with a single x86 insn.
1343                                  * Need to do a CMPXCHG loop.
1344                                  */
1345
1346                                 /* Will need RAX as a CMPXCHG operand so save R0 */
1347                                 emit_mov_reg(&prog, true, BPF_REG_AX, BPF_REG_0);
1348                                 if (src_reg == BPF_REG_0)
1349                                         real_src_reg = BPF_REG_AX;
1350
1351                                 branch_target = prog;
1352                                 /* Load old value */
1353                                 emit_ldx(&prog, BPF_SIZE(insn->code),
1354                                          BPF_REG_0, dst_reg, insn->off);
1355                                 /*
1356                                  * Perform the (commutative) operation locally,
1357                                  * put the result in the AUX_REG.
1358                                  */
1359                                 emit_mov_reg(&prog, is64, AUX_REG, BPF_REG_0);
1360                                 maybe_emit_mod(&prog, AUX_REG, real_src_reg, is64);
1361                                 EMIT2(simple_alu_opcodes[BPF_OP(insn->imm)],
1362                                       add_2reg(0xC0, AUX_REG, real_src_reg));
1363                                 /* Attempt to swap in new value */
1364                                 err = emit_atomic(&prog, BPF_CMPXCHG,
1365                                                   dst_reg, AUX_REG, insn->off,
1366                                                   BPF_SIZE(insn->code));
1367                                 if (WARN_ON(err))
1368                                         return err;
1369                                 /*
1370                                  * ZF tells us whether we won the race. If it's
1371                                  * cleared we need to try again.
1372                                  */
1373                                 EMIT2(X86_JNE, -(prog - branch_target) - 2);
1374                                 /* Return the pre-modification value */
1375                                 emit_mov_reg(&prog, is64, real_src_reg, BPF_REG_0);
1376                                 /* Restore R0 after clobbering RAX */
1377                                 emit_mov_reg(&prog, true, BPF_REG_0, BPF_REG_AX);
1378                                 break;
1379
1380                         }
1381
1382                         err = emit_atomic(&prog, insn->imm, dst_reg, src_reg,
1383                                                   insn->off, BPF_SIZE(insn->code));
1384                         if (err)
1385                                 return err;
1386                         break;
1387
1388                         /* call */
1389                 case BPF_JMP | BPF_CALL:
1390                         func = (u8 *) __bpf_call_base + imm32;
1391                         if (tail_call_reachable) {
1392                                 EMIT3_off32(0x48, 0x8B, 0x85,
1393                                             -(bpf_prog->aux->stack_depth + 8));
1394                                 if (!imm32 || emit_call(&prog, func, image + addrs[i - 1] + 7))
1395                                         return -EINVAL;
1396                         } else {
1397                                 if (!imm32 || emit_call(&prog, func, image + addrs[i - 1]))
1398                                         return -EINVAL;
1399                         }
1400                         break;
1401
1402                 case BPF_JMP | BPF_TAIL_CALL:
1403                         if (imm32)
1404                                 emit_bpf_tail_call_direct(&bpf_prog->aux->poke_tab[imm32 - 1],
1405                                                           &prog, addrs[i], image,
1406                                                           callee_regs_used,
1407                                                           bpf_prog->aux->stack_depth);
1408                         else
1409                                 emit_bpf_tail_call_indirect(&prog,
1410                                                             callee_regs_used,
1411                                                             bpf_prog->aux->stack_depth);
1412                         break;
1413
1414                         /* cond jump */
1415                 case BPF_JMP | BPF_JEQ | BPF_X:
1416                 case BPF_JMP | BPF_JNE | BPF_X:
1417                 case BPF_JMP | BPF_JGT | BPF_X:
1418                 case BPF_JMP | BPF_JLT | BPF_X:
1419                 case BPF_JMP | BPF_JGE | BPF_X:
1420                 case BPF_JMP | BPF_JLE | BPF_X:
1421                 case BPF_JMP | BPF_JSGT | BPF_X:
1422                 case BPF_JMP | BPF_JSLT | BPF_X:
1423                 case BPF_JMP | BPF_JSGE | BPF_X:
1424                 case BPF_JMP | BPF_JSLE | BPF_X:
1425                 case BPF_JMP32 | BPF_JEQ | BPF_X:
1426                 case BPF_JMP32 | BPF_JNE | BPF_X:
1427                 case BPF_JMP32 | BPF_JGT | BPF_X:
1428                 case BPF_JMP32 | BPF_JLT | BPF_X:
1429                 case BPF_JMP32 | BPF_JGE | BPF_X:
1430                 case BPF_JMP32 | BPF_JLE | BPF_X:
1431                 case BPF_JMP32 | BPF_JSGT | BPF_X:
1432                 case BPF_JMP32 | BPF_JSLT | BPF_X:
1433                 case BPF_JMP32 | BPF_JSGE | BPF_X:
1434                 case BPF_JMP32 | BPF_JSLE | BPF_X:
1435                         /* cmp dst_reg, src_reg */
1436                         maybe_emit_mod(&prog, dst_reg, src_reg,
1437                                        BPF_CLASS(insn->code) == BPF_JMP);
1438                         EMIT2(0x39, add_2reg(0xC0, dst_reg, src_reg));
1439                         goto emit_cond_jmp;
1440
1441                 case BPF_JMP | BPF_JSET | BPF_X:
1442                 case BPF_JMP32 | BPF_JSET | BPF_X:
1443                         /* test dst_reg, src_reg */
1444                         maybe_emit_mod(&prog, dst_reg, src_reg,
1445                                        BPF_CLASS(insn->code) == BPF_JMP);
1446                         EMIT2(0x85, add_2reg(0xC0, dst_reg, src_reg));
1447                         goto emit_cond_jmp;
1448
1449                 case BPF_JMP | BPF_JSET | BPF_K:
1450                 case BPF_JMP32 | BPF_JSET | BPF_K:
1451                         /* test dst_reg, imm32 */
1452                         if (BPF_CLASS(insn->code) == BPF_JMP)
1453                                 EMIT1(add_1mod(0x48, dst_reg));
1454                         else if (is_ereg(dst_reg))
1455                                 EMIT1(add_1mod(0x40, dst_reg));
1456                         EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32);
1457                         goto emit_cond_jmp;
1458
1459                 case BPF_JMP | BPF_JEQ | BPF_K:
1460                 case BPF_JMP | BPF_JNE | BPF_K:
1461                 case BPF_JMP | BPF_JGT | BPF_K:
1462                 case BPF_JMP | BPF_JLT | BPF_K:
1463                 case BPF_JMP | BPF_JGE | BPF_K:
1464                 case BPF_JMP | BPF_JLE | BPF_K:
1465                 case BPF_JMP | BPF_JSGT | BPF_K:
1466                 case BPF_JMP | BPF_JSLT | BPF_K:
1467                 case BPF_JMP | BPF_JSGE | BPF_K:
1468                 case BPF_JMP | BPF_JSLE | BPF_K:
1469                 case BPF_JMP32 | BPF_JEQ | BPF_K:
1470                 case BPF_JMP32 | BPF_JNE | BPF_K:
1471                 case BPF_JMP32 | BPF_JGT | BPF_K:
1472                 case BPF_JMP32 | BPF_JLT | BPF_K:
1473                 case BPF_JMP32 | BPF_JGE | BPF_K:
1474                 case BPF_JMP32 | BPF_JLE | BPF_K:
1475                 case BPF_JMP32 | BPF_JSGT | BPF_K:
1476                 case BPF_JMP32 | BPF_JSLT | BPF_K:
1477                 case BPF_JMP32 | BPF_JSGE | BPF_K:
1478                 case BPF_JMP32 | BPF_JSLE | BPF_K:
1479                         /* test dst_reg, dst_reg to save one extra byte */
1480                         if (imm32 == 0) {
1481                                 maybe_emit_mod(&prog, dst_reg, dst_reg,
1482                                                BPF_CLASS(insn->code) == BPF_JMP);
1483                                 EMIT2(0x85, add_2reg(0xC0, dst_reg, dst_reg));
1484                                 goto emit_cond_jmp;
1485                         }
1486
1487                         /* cmp dst_reg, imm8/32 */
1488                         if (BPF_CLASS(insn->code) == BPF_JMP)
1489                                 EMIT1(add_1mod(0x48, dst_reg));
1490                         else if (is_ereg(dst_reg))
1491                                 EMIT1(add_1mod(0x40, dst_reg));
1492
1493                         if (is_imm8(imm32))
1494                                 EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32);
1495                         else
1496                                 EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32);
1497
1498 emit_cond_jmp:          /* Convert BPF opcode to x86 */
1499                         switch (BPF_OP(insn->code)) {
1500                         case BPF_JEQ:
1501                                 jmp_cond = X86_JE;
1502                                 break;
1503                         case BPF_JSET:
1504                         case BPF_JNE:
1505                                 jmp_cond = X86_JNE;
1506                                 break;
1507                         case BPF_JGT:
1508                                 /* GT is unsigned '>', JA in x86 */
1509                                 jmp_cond = X86_JA;
1510                                 break;
1511                         case BPF_JLT:
1512                                 /* LT is unsigned '<', JB in x86 */
1513                                 jmp_cond = X86_JB;
1514                                 break;
1515                         case BPF_JGE:
1516                                 /* GE is unsigned '>=', JAE in x86 */
1517                                 jmp_cond = X86_JAE;
1518                                 break;
1519                         case BPF_JLE:
1520                                 /* LE is unsigned '<=', JBE in x86 */
1521                                 jmp_cond = X86_JBE;
1522                                 break;
1523                         case BPF_JSGT:
1524                                 /* Signed '>', GT in x86 */
1525                                 jmp_cond = X86_JG;
1526                                 break;
1527                         case BPF_JSLT:
1528                                 /* Signed '<', LT in x86 */
1529                                 jmp_cond = X86_JL;
1530                                 break;
1531                         case BPF_JSGE:
1532                                 /* Signed '>=', GE in x86 */
1533                                 jmp_cond = X86_JGE;
1534                                 break;
1535                         case BPF_JSLE:
1536                                 /* Signed '<=', LE in x86 */
1537                                 jmp_cond = X86_JLE;
1538                                 break;
1539                         default: /* to silence GCC warning */
1540                                 return -EFAULT;
1541                         }
1542                         jmp_offset = addrs[i + insn->off] - addrs[i];
1543                         if (is_imm8(jmp_offset)) {
1544                                 if (jmp_padding) {
1545                                         /* To keep the jmp_offset valid, the extra bytes are
1546                                          * padded before the jump insn, so we subtract the
1547                                          * 2 bytes of jmp_cond insn from INSN_SZ_DIFF.
1548                                          *
1549                                          * If the previous pass already emits an imm8
1550                                          * jmp_cond, then this BPF insn won't shrink, so
1551                                          * "nops" is 0.
1552                                          *
1553                                          * On the other hand, if the previous pass emits an
1554                                          * imm32 jmp_cond, the extra 4 bytes(*) is padded to
1555                                          * keep the image from shrinking further.
1556                                          *
1557                                          * (*) imm32 jmp_cond is 6 bytes, and imm8 jmp_cond
1558                                          *     is 2 bytes, so the size difference is 4 bytes.
1559                                          */
1560                                         nops = INSN_SZ_DIFF - 2;
1561                                         if (nops != 0 && nops != 4) {
1562                                                 pr_err("unexpected jmp_cond padding: %d bytes\n",
1563                                                        nops);
1564                                                 return -EFAULT;
1565                                         }
1566                                         emit_nops(&prog, nops);
1567                                 }
1568                                 EMIT2(jmp_cond, jmp_offset);
1569                         } else if (is_simm32(jmp_offset)) {
1570                                 EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset);
1571                         } else {
1572                                 pr_err("cond_jmp gen bug %llx\n", jmp_offset);
1573                                 return -EFAULT;
1574                         }
1575
1576                         break;
1577
1578                 case BPF_JMP | BPF_JA:
1579                         if (insn->off == -1)
1580                                 /* -1 jmp instructions will always jump
1581                                  * backwards two bytes. Explicitly handling
1582                                  * this case avoids wasting too many passes
1583                                  * when there are long sequences of replaced
1584                                  * dead code.
1585                                  */
1586                                 jmp_offset = -2;
1587                         else
1588                                 jmp_offset = addrs[i + insn->off] - addrs[i];
1589
1590                         if (!jmp_offset) {
1591                                 /*
1592                                  * If jmp_padding is enabled, the extra nops will
1593                                  * be inserted. Otherwise, optimize out nop jumps.
1594                                  */
1595                                 if (jmp_padding) {
1596                                         /* There are 3 possible conditions.
1597                                          * (1) This BPF_JA is already optimized out in
1598                                          *     the previous run, so there is no need
1599                                          *     to pad any extra byte (0 byte).
1600                                          * (2) The previous pass emits an imm8 jmp,
1601                                          *     so we pad 2 bytes to match the previous
1602                                          *     insn size.
1603                                          * (3) Similarly, the previous pass emits an
1604                                          *     imm32 jmp, and 5 bytes is padded.
1605                                          */
1606                                         nops = INSN_SZ_DIFF;
1607                                         if (nops != 0 && nops != 2 && nops != 5) {
1608                                                 pr_err("unexpected nop jump padding: %d bytes\n",
1609                                                        nops);
1610                                                 return -EFAULT;
1611                                         }
1612                                         emit_nops(&prog, nops);
1613                                 }
1614                                 break;
1615                         }
1616 emit_jmp:
1617                         if (is_imm8(jmp_offset)) {
1618                                 if (jmp_padding) {
1619                                         /* To avoid breaking jmp_offset, the extra bytes
1620                                          * are padded before the actual jmp insn, so
1621                                          * 2 bytes is subtracted from INSN_SZ_DIFF.
1622                                          *
1623                                          * If the previous pass already emits an imm8
1624                                          * jmp, there is nothing to pad (0 byte).
1625                                          *
1626                                          * If it emits an imm32 jmp (5 bytes) previously
1627                                          * and now an imm8 jmp (2 bytes), then we pad
1628                                          * (5 - 2 = 3) bytes to stop the image from
1629                                          * shrinking further.
1630                                          */
1631                                         nops = INSN_SZ_DIFF - 2;
1632                                         if (nops != 0 && nops != 3) {
1633                                                 pr_err("unexpected jump padding: %d bytes\n",
1634                                                        nops);
1635                                                 return -EFAULT;
1636                                         }
1637                                         emit_nops(&prog, INSN_SZ_DIFF - 2);
1638                                 }
1639                                 EMIT2(0xEB, jmp_offset);
1640                         } else if (is_simm32(jmp_offset)) {
1641                                 EMIT1_off32(0xE9, jmp_offset);
1642                         } else {
1643                                 pr_err("jmp gen bug %llx\n", jmp_offset);
1644                                 return -EFAULT;
1645                         }
1646                         break;
1647
1648                 case BPF_JMP | BPF_EXIT:
1649                         if (seen_exit) {
1650                                 jmp_offset = ctx->cleanup_addr - addrs[i];
1651                                 goto emit_jmp;
1652                         }
1653                         seen_exit = true;
1654                         /* Update cleanup_addr */
1655                         ctx->cleanup_addr = proglen;
1656                         pop_callee_regs(&prog, callee_regs_used);
1657                         EMIT1(0xC9);         /* leave */
1658                         EMIT1(0xC3);         /* ret */
1659                         break;
1660
1661                 default:
1662                         /*
1663                          * By design x86-64 JIT should support all BPF instructions.
1664                          * This error will be seen if new instruction was added
1665                          * to the interpreter, but not to the JIT, or if there is
1666                          * junk in bpf_prog.
1667                          */
1668                         pr_err("bpf_jit: unknown opcode %02x\n", insn->code);
1669                         return -EINVAL;
1670                 }
1671
1672                 ilen = prog - temp;
1673                 if (ilen > BPF_MAX_INSN_SIZE) {
1674                         pr_err("bpf_jit: fatal insn size error\n");
1675                         return -EFAULT;
1676                 }
1677
1678                 if (image) {
1679                         /*
1680                          * When populating the image, assert that:
1681                          *
1682                          *  i) We do not write beyond the allocated space, and
1683                          * ii) addrs[i] did not change from the prior run, in order
1684                          *     to validate assumptions made for computing branch
1685                          *     displacements.
1686                          */
1687                         if (unlikely(proglen + ilen > oldproglen ||
1688                                      proglen + ilen != addrs[i])) {
1689                                 pr_err("bpf_jit: fatal error\n");
1690                                 return -EFAULT;
1691                         }
1692                         memcpy(image + proglen, temp, ilen);
1693                 }
1694                 proglen += ilen;
1695                 addrs[i] = proglen;
1696                 prog = temp;
1697         }
1698
1699         if (image && excnt != bpf_prog->aux->num_exentries) {
1700                 pr_err("extable is not populated\n");
1701                 return -EFAULT;
1702         }
1703         return proglen;
1704 }
1705
1706 static void save_regs(const struct btf_func_model *m, u8 **prog, int nr_args,
1707                       int stack_size)
1708 {
1709         int i;
1710         /* Store function arguments to stack.
1711          * For a function that accepts two pointers the sequence will be:
1712          * mov QWORD PTR [rbp-0x10],rdi
1713          * mov QWORD PTR [rbp-0x8],rsi
1714          */
1715         for (i = 0; i < min(nr_args, 6); i++)
1716                 emit_stx(prog, bytes_to_bpf_size(m->arg_size[i]),
1717                          BPF_REG_FP,
1718                          i == 5 ? X86_REG_R9 : BPF_REG_1 + i,
1719                          -(stack_size - i * 8));
1720 }
1721
1722 static void restore_regs(const struct btf_func_model *m, u8 **prog, int nr_args,
1723                          int stack_size)
1724 {
1725         int i;
1726
1727         /* Restore function arguments from stack.
1728          * For a function that accepts two pointers the sequence will be:
1729          * EMIT4(0x48, 0x8B, 0x7D, 0xF0); mov rdi,QWORD PTR [rbp-0x10]
1730          * EMIT4(0x48, 0x8B, 0x75, 0xF8); mov rsi,QWORD PTR [rbp-0x8]
1731          */
1732         for (i = 0; i < min(nr_args, 6); i++)
1733                 emit_ldx(prog, bytes_to_bpf_size(m->arg_size[i]),
1734                          i == 5 ? X86_REG_R9 : BPF_REG_1 + i,
1735                          BPF_REG_FP,
1736                          -(stack_size - i * 8));
1737 }
1738
1739 static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
1740                            struct bpf_prog *p, int stack_size, bool mod_ret)
1741 {
1742         u8 *prog = *pprog;
1743         u8 *jmp_insn;
1744
1745         /* arg1: mov rdi, progs[i] */
1746         emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p);
1747         if (emit_call(&prog,
1748                       p->aux->sleepable ? __bpf_prog_enter_sleepable :
1749                       __bpf_prog_enter, prog))
1750                         return -EINVAL;
1751         /* remember prog start time returned by __bpf_prog_enter */
1752         emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0);
1753
1754         /* if (__bpf_prog_enter*(prog) == 0)
1755          *      goto skip_exec_of_prog;
1756          */
1757         EMIT3(0x48, 0x85, 0xC0);  /* test rax,rax */
1758         /* emit 2 nops that will be replaced with JE insn */
1759         jmp_insn = prog;
1760         emit_nops(&prog, 2);
1761
1762         /* arg1: lea rdi, [rbp - stack_size] */
1763         EMIT4(0x48, 0x8D, 0x7D, -stack_size);
1764         /* arg2: progs[i]->insnsi for interpreter */
1765         if (!p->jited)
1766                 emit_mov_imm64(&prog, BPF_REG_2,
1767                                (long) p->insnsi >> 32,
1768                                (u32) (long) p->insnsi);
1769         /* call JITed bpf program or interpreter */
1770         if (emit_call(&prog, p->bpf_func, prog))
1771                 return -EINVAL;
1772
1773         /* BPF_TRAMP_MODIFY_RETURN trampolines can modify the return
1774          * of the previous call which is then passed on the stack to
1775          * the next BPF program.
1776          */
1777         if (mod_ret)
1778                 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
1779
1780         /* replace 2 nops with JE insn, since jmp target is known */
1781         jmp_insn[0] = X86_JE;
1782         jmp_insn[1] = prog - jmp_insn - 2;
1783
1784         /* arg1: mov rdi, progs[i] */
1785         emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p);
1786         /* arg2: mov rsi, rbx <- start time in nsec */
1787         emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6);
1788         if (emit_call(&prog,
1789                       p->aux->sleepable ? __bpf_prog_exit_sleepable :
1790                       __bpf_prog_exit, prog))
1791                         return -EINVAL;
1792
1793         *pprog = prog;
1794         return 0;
1795 }
1796
1797 static void emit_align(u8 **pprog, u32 align)
1798 {
1799         u8 *target, *prog = *pprog;
1800
1801         target = PTR_ALIGN(prog, align);
1802         if (target != prog)
1803                 emit_nops(&prog, target - prog);
1804
1805         *pprog = prog;
1806 }
1807
1808 static int emit_cond_near_jump(u8 **pprog, void *func, void *ip, u8 jmp_cond)
1809 {
1810         u8 *prog = *pprog;
1811         s64 offset;
1812
1813         offset = func - (ip + 2 + 4);
1814         if (!is_simm32(offset)) {
1815                 pr_err("Target %p is out of range\n", func);
1816                 return -EINVAL;
1817         }
1818         EMIT2_off32(0x0F, jmp_cond + 0x10, offset);
1819         *pprog = prog;
1820         return 0;
1821 }
1822
1823 static int invoke_bpf(const struct btf_func_model *m, u8 **pprog,
1824                       struct bpf_tramp_progs *tp, int stack_size)
1825 {
1826         int i;
1827         u8 *prog = *pprog;
1828
1829         for (i = 0; i < tp->nr_progs; i++) {
1830                 if (invoke_bpf_prog(m, &prog, tp->progs[i], stack_size, false))
1831                         return -EINVAL;
1832         }
1833         *pprog = prog;
1834         return 0;
1835 }
1836
1837 static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog,
1838                               struct bpf_tramp_progs *tp, int stack_size,
1839                               u8 **branches)
1840 {
1841         u8 *prog = *pprog;
1842         int i;
1843
1844         /* The first fmod_ret program will receive a garbage return value.
1845          * Set this to 0 to avoid confusing the program.
1846          */
1847         emit_mov_imm32(&prog, false, BPF_REG_0, 0);
1848         emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
1849         for (i = 0; i < tp->nr_progs; i++) {
1850                 if (invoke_bpf_prog(m, &prog, tp->progs[i], stack_size, true))
1851                         return -EINVAL;
1852
1853                 /* mod_ret prog stored return value into [rbp - 8]. Emit:
1854                  * if (*(u64 *)(rbp - 8) !=  0)
1855                  *      goto do_fexit;
1856                  */
1857                 /* cmp QWORD PTR [rbp - 0x8], 0x0 */
1858                 EMIT4(0x48, 0x83, 0x7d, 0xf8); EMIT1(0x00);
1859
1860                 /* Save the location of the branch and Generate 6 nops
1861                  * (4 bytes for an offset and 2 bytes for the jump) These nops
1862                  * are replaced with a conditional jump once do_fexit (i.e. the
1863                  * start of the fexit invocation) is finalized.
1864                  */
1865                 branches[i] = prog;
1866                 emit_nops(&prog, 4 + 2);
1867         }
1868
1869         *pprog = prog;
1870         return 0;
1871 }
1872
1873 /* Example:
1874  * __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
1875  * its 'struct btf_func_model' will be nr_args=2
1876  * The assembly code when eth_type_trans is executing after trampoline:
1877  *
1878  * push rbp
1879  * mov rbp, rsp
1880  * sub rsp, 16                     // space for skb and dev
1881  * push rbx                        // temp regs to pass start time
1882  * mov qword ptr [rbp - 16], rdi   // save skb pointer to stack
1883  * mov qword ptr [rbp - 8], rsi    // save dev pointer to stack
1884  * call __bpf_prog_enter           // rcu_read_lock and preempt_disable
1885  * mov rbx, rax                    // remember start time in bpf stats are enabled
1886  * lea rdi, [rbp - 16]             // R1==ctx of bpf prog
1887  * call addr_of_jited_FENTRY_prog
1888  * movabsq rdi, 64bit_addr_of_struct_bpf_prog  // unused if bpf stats are off
1889  * mov rsi, rbx                    // prog start time
1890  * call __bpf_prog_exit            // rcu_read_unlock, preempt_enable and stats math
1891  * mov rdi, qword ptr [rbp - 16]   // restore skb pointer from stack
1892  * mov rsi, qword ptr [rbp - 8]    // restore dev pointer from stack
1893  * pop rbx
1894  * leave
1895  * ret
1896  *
1897  * eth_type_trans has 5 byte nop at the beginning. These 5 bytes will be
1898  * replaced with 'call generated_bpf_trampoline'. When it returns
1899  * eth_type_trans will continue executing with original skb and dev pointers.
1900  *
1901  * The assembly code when eth_type_trans is called from trampoline:
1902  *
1903  * push rbp
1904  * mov rbp, rsp
1905  * sub rsp, 24                     // space for skb, dev, return value
1906  * push rbx                        // temp regs to pass start time
1907  * mov qword ptr [rbp - 24], rdi   // save skb pointer to stack
1908  * mov qword ptr [rbp - 16], rsi   // save dev pointer to stack
1909  * call __bpf_prog_enter           // rcu_read_lock and preempt_disable
1910  * mov rbx, rax                    // remember start time if bpf stats are enabled
1911  * lea rdi, [rbp - 24]             // R1==ctx of bpf prog
1912  * call addr_of_jited_FENTRY_prog  // bpf prog can access skb and dev
1913  * movabsq rdi, 64bit_addr_of_struct_bpf_prog  // unused if bpf stats are off
1914  * mov rsi, rbx                    // prog start time
1915  * call __bpf_prog_exit            // rcu_read_unlock, preempt_enable and stats math
1916  * mov rdi, qword ptr [rbp - 24]   // restore skb pointer from stack
1917  * mov rsi, qword ptr [rbp - 16]   // restore dev pointer from stack
1918  * call eth_type_trans+5           // execute body of eth_type_trans
1919  * mov qword ptr [rbp - 8], rax    // save return value
1920  * call __bpf_prog_enter           // rcu_read_lock and preempt_disable
1921  * mov rbx, rax                    // remember start time in bpf stats are enabled
1922  * lea rdi, [rbp - 24]             // R1==ctx of bpf prog
1923  * call addr_of_jited_FEXIT_prog   // bpf prog can access skb, dev, return value
1924  * movabsq rdi, 64bit_addr_of_struct_bpf_prog  // unused if bpf stats are off
1925  * mov rsi, rbx                    // prog start time
1926  * call __bpf_prog_exit            // rcu_read_unlock, preempt_enable and stats math
1927  * mov rax, qword ptr [rbp - 8]    // restore eth_type_trans's return value
1928  * pop rbx
1929  * leave
1930  * add rsp, 8                      // skip eth_type_trans's frame
1931  * ret                             // return to its caller
1932  */
1933 int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end,
1934                                 const struct btf_func_model *m, u32 flags,
1935                                 struct bpf_tramp_progs *tprogs,
1936                                 void *orig_call)
1937 {
1938         int ret, i, nr_args = m->nr_args;
1939         int stack_size = nr_args * 8;
1940         struct bpf_tramp_progs *fentry = &tprogs[BPF_TRAMP_FENTRY];
1941         struct bpf_tramp_progs *fexit = &tprogs[BPF_TRAMP_FEXIT];
1942         struct bpf_tramp_progs *fmod_ret = &tprogs[BPF_TRAMP_MODIFY_RETURN];
1943         u8 **branches = NULL;
1944         u8 *prog;
1945
1946         /* x86-64 supports up to 6 arguments. 7+ can be added in the future */
1947         if (nr_args > 6)
1948                 return -ENOTSUPP;
1949
1950         if ((flags & BPF_TRAMP_F_RESTORE_REGS) &&
1951             (flags & BPF_TRAMP_F_SKIP_FRAME))
1952                 return -EINVAL;
1953
1954         if (flags & BPF_TRAMP_F_CALL_ORIG)
1955                 stack_size += 8; /* room for return value of orig_call */
1956
1957         if (flags & BPF_TRAMP_F_IP_ARG)
1958                 stack_size += 8; /* room for IP address argument */
1959
1960         if (flags & BPF_TRAMP_F_SKIP_FRAME)
1961                 /* skip patched call instruction and point orig_call to actual
1962                  * body of the kernel function.
1963                  */
1964                 orig_call += X86_PATCH_SIZE;
1965
1966         prog = image;
1967
1968         EMIT1(0x55);             /* push rbp */
1969         EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
1970         EMIT4(0x48, 0x83, 0xEC, stack_size); /* sub rsp, stack_size */
1971         EMIT1(0x53);             /* push rbx */
1972
1973         if (flags & BPF_TRAMP_F_IP_ARG) {
1974                 /* Store IP address of the traced function:
1975                  * mov rax, QWORD PTR [rbp + 8]
1976                  * sub rax, X86_PATCH_SIZE
1977                  * mov QWORD PTR [rbp - stack_size], rax
1978                  */
1979                 emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, 8);
1980                 EMIT4(0x48, 0x83, 0xe8, X86_PATCH_SIZE);
1981                 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -stack_size);
1982
1983                 /* Continue with stack_size for regs storage, stack will
1984                  * be correctly restored with 'leave' instruction.
1985                  */
1986                 stack_size -= 8;
1987         }
1988
1989         save_regs(m, &prog, nr_args, stack_size);
1990
1991         if (flags & BPF_TRAMP_F_CALL_ORIG) {
1992                 /* arg1: mov rdi, im */
1993                 emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
1994                 if (emit_call(&prog, __bpf_tramp_enter, prog)) {
1995                         ret = -EINVAL;
1996                         goto cleanup;
1997                 }
1998         }
1999
2000         if (fentry->nr_progs)
2001                 if (invoke_bpf(m, &prog, fentry, stack_size))
2002                         return -EINVAL;
2003
2004         if (fmod_ret->nr_progs) {
2005                 branches = kcalloc(fmod_ret->nr_progs, sizeof(u8 *),
2006                                    GFP_KERNEL);
2007                 if (!branches)
2008                         return -ENOMEM;
2009
2010                 if (invoke_bpf_mod_ret(m, &prog, fmod_ret, stack_size,
2011                                        branches)) {
2012                         ret = -EINVAL;
2013                         goto cleanup;
2014                 }
2015         }
2016
2017         if (flags & BPF_TRAMP_F_CALL_ORIG) {
2018                 restore_regs(m, &prog, nr_args, stack_size);
2019
2020                 /* call original function */
2021                 if (emit_call(&prog, orig_call, prog)) {
2022                         ret = -EINVAL;
2023                         goto cleanup;
2024                 }
2025                 /* remember return value in a stack for bpf prog to access */
2026                 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
2027                 im->ip_after_call = prog;
2028                 memcpy(prog, x86_nops[5], X86_PATCH_SIZE);
2029                 prog += X86_PATCH_SIZE;
2030         }
2031
2032         if (fmod_ret->nr_progs) {
2033                 /* From Intel 64 and IA-32 Architectures Optimization
2034                  * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler
2035                  * Coding Rule 11: All branch targets should be 16-byte
2036                  * aligned.
2037                  */
2038                 emit_align(&prog, 16);
2039                 /* Update the branches saved in invoke_bpf_mod_ret with the
2040                  * aligned address of do_fexit.
2041                  */
2042                 for (i = 0; i < fmod_ret->nr_progs; i++)
2043                         emit_cond_near_jump(&branches[i], prog, branches[i],
2044                                             X86_JNE);
2045         }
2046
2047         if (fexit->nr_progs)
2048                 if (invoke_bpf(m, &prog, fexit, stack_size)) {
2049                         ret = -EINVAL;
2050                         goto cleanup;
2051                 }
2052
2053         if (flags & BPF_TRAMP_F_RESTORE_REGS)
2054                 restore_regs(m, &prog, nr_args, stack_size);
2055
2056         /* This needs to be done regardless. If there were fmod_ret programs,
2057          * the return value is only updated on the stack and still needs to be
2058          * restored to R0.
2059          */
2060         if (flags & BPF_TRAMP_F_CALL_ORIG) {
2061                 im->ip_epilogue = prog;
2062                 /* arg1: mov rdi, im */
2063                 emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
2064                 if (emit_call(&prog, __bpf_tramp_exit, prog)) {
2065                         ret = -EINVAL;
2066                         goto cleanup;
2067                 }
2068                 /* restore original return value back into RAX */
2069                 emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);
2070         }
2071
2072         EMIT1(0x5B); /* pop rbx */
2073         EMIT1(0xC9); /* leave */
2074         if (flags & BPF_TRAMP_F_SKIP_FRAME)
2075                 /* skip our return address and return to parent */
2076                 EMIT4(0x48, 0x83, 0xC4, 8); /* add rsp, 8 */
2077         EMIT1(0xC3); /* ret */
2078         /* Make sure the trampoline generation logic doesn't overflow */
2079         if (WARN_ON_ONCE(prog > (u8 *)image_end - BPF_INSN_SAFETY)) {
2080                 ret = -EFAULT;
2081                 goto cleanup;
2082         }
2083         ret = prog - (u8 *)image;
2084
2085 cleanup:
2086         kfree(branches);
2087         return ret;
2088 }
2089
2090 static int emit_fallback_jump(u8 **pprog)
2091 {
2092         u8 *prog = *pprog;
2093         int err = 0;
2094
2095 #ifdef CONFIG_RETPOLINE
2096         /* Note that this assumes the the compiler uses external
2097          * thunks for indirect calls. Both clang and GCC use the same
2098          * naming convention for external thunks.
2099          */
2100         err = emit_jump(&prog, __x86_indirect_thunk_rdx, prog);
2101 #else
2102         EMIT2(0xFF, 0xE2);      /* jmp rdx */
2103 #endif
2104         *pprog = prog;
2105         return err;
2106 }
2107
2108 static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs)
2109 {
2110         u8 *jg_reloc, *prog = *pprog;
2111         int pivot, err, jg_bytes = 1;
2112         s64 jg_offset;
2113
2114         if (a == b) {
2115                 /* Leaf node of recursion, i.e. not a range of indices
2116                  * anymore.
2117                  */
2118                 EMIT1(add_1mod(0x48, BPF_REG_3));       /* cmp rdx,func */
2119                 if (!is_simm32(progs[a]))
2120                         return -1;
2121                 EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3),
2122                             progs[a]);
2123                 err = emit_cond_near_jump(&prog,        /* je func */
2124                                           (void *)progs[a], prog,
2125                                           X86_JE);
2126                 if (err)
2127                         return err;
2128
2129                 err = emit_fallback_jump(&prog);        /* jmp thunk/indirect */
2130                 if (err)
2131                         return err;
2132
2133                 *pprog = prog;
2134                 return 0;
2135         }
2136
2137         /* Not a leaf node, so we pivot, and recursively descend into
2138          * the lower and upper ranges.
2139          */
2140         pivot = (b - a) / 2;
2141         EMIT1(add_1mod(0x48, BPF_REG_3));               /* cmp rdx,func */
2142         if (!is_simm32(progs[a + pivot]))
2143                 return -1;
2144         EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3), progs[a + pivot]);
2145
2146         if (pivot > 2) {                                /* jg upper_part */
2147                 /* Require near jump. */
2148                 jg_bytes = 4;
2149                 EMIT2_off32(0x0F, X86_JG + 0x10, 0);
2150         } else {
2151                 EMIT2(X86_JG, 0);
2152         }
2153         jg_reloc = prog;
2154
2155         err = emit_bpf_dispatcher(&prog, a, a + pivot,  /* emit lower_part */
2156                                   progs);
2157         if (err)
2158                 return err;
2159
2160         /* From Intel 64 and IA-32 Architectures Optimization
2161          * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler
2162          * Coding Rule 11: All branch targets should be 16-byte
2163          * aligned.
2164          */
2165         emit_align(&prog, 16);
2166         jg_offset = prog - jg_reloc;
2167         emit_code(jg_reloc - jg_bytes, jg_offset, jg_bytes);
2168
2169         err = emit_bpf_dispatcher(&prog, a + pivot + 1, /* emit upper_part */
2170                                   b, progs);
2171         if (err)
2172                 return err;
2173
2174         *pprog = prog;
2175         return 0;
2176 }
2177
2178 static int cmp_ips(const void *a, const void *b)
2179 {
2180         const s64 *ipa = a;
2181         const s64 *ipb = b;
2182
2183         if (*ipa > *ipb)
2184                 return 1;
2185         if (*ipa < *ipb)
2186                 return -1;
2187         return 0;
2188 }
2189
2190 int arch_prepare_bpf_dispatcher(void *image, s64 *funcs, int num_funcs)
2191 {
2192         u8 *prog = image;
2193
2194         sort(funcs, num_funcs, sizeof(funcs[0]), cmp_ips, NULL);
2195         return emit_bpf_dispatcher(&prog, 0, num_funcs - 1, funcs);
2196 }
2197
2198 struct x64_jit_data {
2199         struct bpf_binary_header *header;
2200         int *addrs;
2201         u8 *image;
2202         int proglen;
2203         struct jit_context ctx;
2204 };
2205
2206 #define MAX_PASSES 20
2207 #define PADDING_PASSES (MAX_PASSES - 5)
2208
2209 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
2210 {
2211         struct bpf_binary_header *header = NULL;
2212         struct bpf_prog *tmp, *orig_prog = prog;
2213         struct x64_jit_data *jit_data;
2214         int proglen, oldproglen = 0;
2215         struct jit_context ctx = {};
2216         bool tmp_blinded = false;
2217         bool extra_pass = false;
2218         bool padding = false;
2219         u8 *image = NULL;
2220         int *addrs;
2221         int pass;
2222         int i;
2223
2224         if (!prog->jit_requested)
2225                 return orig_prog;
2226
2227         tmp = bpf_jit_blind_constants(prog);
2228         /*
2229          * If blinding was requested and we failed during blinding,
2230          * we must fall back to the interpreter.
2231          */
2232         if (IS_ERR(tmp))
2233                 return orig_prog;
2234         if (tmp != prog) {
2235                 tmp_blinded = true;
2236                 prog = tmp;
2237         }
2238
2239         jit_data = prog->aux->jit_data;
2240         if (!jit_data) {
2241                 jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
2242                 if (!jit_data) {
2243                         prog = orig_prog;
2244                         goto out;
2245                 }
2246                 prog->aux->jit_data = jit_data;
2247         }
2248         addrs = jit_data->addrs;
2249         if (addrs) {
2250                 ctx = jit_data->ctx;
2251                 oldproglen = jit_data->proglen;
2252                 image = jit_data->image;
2253                 header = jit_data->header;
2254                 extra_pass = true;
2255                 padding = true;
2256                 goto skip_init_addrs;
2257         }
2258         addrs = kvmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL);
2259         if (!addrs) {
2260                 prog = orig_prog;
2261                 goto out_addrs;
2262         }
2263
2264         /*
2265          * Before first pass, make a rough estimation of addrs[]
2266          * each BPF instruction is translated to less than 64 bytes
2267          */
2268         for (proglen = 0, i = 0; i <= prog->len; i++) {
2269                 proglen += 64;
2270                 addrs[i] = proglen;
2271         }
2272         ctx.cleanup_addr = proglen;
2273 skip_init_addrs:
2274
2275         /*
2276          * JITed image shrinks with every pass and the loop iterates
2277          * until the image stops shrinking. Very large BPF programs
2278          * may converge on the last pass. In such case do one more
2279          * pass to emit the final image.
2280          */
2281         for (pass = 0; pass < MAX_PASSES || image; pass++) {
2282                 if (!padding && pass >= PADDING_PASSES)
2283                         padding = true;
2284                 proglen = do_jit(prog, addrs, image, oldproglen, &ctx, padding);
2285                 if (proglen <= 0) {
2286 out_image:
2287                         image = NULL;
2288                         if (header)
2289                                 bpf_jit_binary_free(header);
2290                         prog = orig_prog;
2291                         goto out_addrs;
2292                 }
2293                 if (image) {
2294                         if (proglen != oldproglen) {
2295                                 pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
2296                                        proglen, oldproglen);
2297                                 goto out_image;
2298                         }
2299                         break;
2300                 }
2301                 if (proglen == oldproglen) {
2302                         /*
2303                          * The number of entries in extable is the number of BPF_LDX
2304                          * insns that access kernel memory via "pointer to BTF type".
2305                          * The verifier changed their opcode from LDX|MEM|size
2306                          * to LDX|PROBE_MEM|size to make JITing easier.
2307                          */
2308                         u32 align = __alignof__(struct exception_table_entry);
2309                         u32 extable_size = prog->aux->num_exentries *
2310                                 sizeof(struct exception_table_entry);
2311
2312                         /* allocate module memory for x86 insns and extable */
2313                         header = bpf_jit_binary_alloc(roundup(proglen, align) + extable_size,
2314                                                       &image, align, jit_fill_hole);
2315                         if (!header) {
2316                                 prog = orig_prog;
2317                                 goto out_addrs;
2318                         }
2319                         prog->aux->extable = (void *) image + roundup(proglen, align);
2320                 }
2321                 oldproglen = proglen;
2322                 cond_resched();
2323         }
2324
2325         if (bpf_jit_enable > 1)
2326                 bpf_jit_dump(prog->len, proglen, pass + 1, image);
2327
2328         if (image) {
2329                 if (!prog->is_func || extra_pass) {
2330                         bpf_tail_call_direct_fixup(prog);
2331                         bpf_jit_binary_lock_ro(header);
2332                 } else {
2333                         jit_data->addrs = addrs;
2334                         jit_data->ctx = ctx;
2335                         jit_data->proglen = proglen;
2336                         jit_data->image = image;
2337                         jit_data->header = header;
2338                 }
2339                 prog->bpf_func = (void *)image;
2340                 prog->jited = 1;
2341                 prog->jited_len = proglen;
2342         } else {
2343                 prog = orig_prog;
2344         }
2345
2346         if (!image || !prog->is_func || extra_pass) {
2347                 if (image)
2348                         bpf_prog_fill_jited_linfo(prog, addrs + 1);
2349 out_addrs:
2350                 kvfree(addrs);
2351                 kfree(jit_data);
2352                 prog->aux->jit_data = NULL;
2353         }
2354 out:
2355         if (tmp_blinded)
2356                 bpf_jit_prog_release_other(prog, prog == orig_prog ?
2357                                            tmp : orig_prog);
2358         return prog;
2359 }
2360
2361 bool bpf_jit_supports_kfunc_call(void)
2362 {
2363         return true;
2364 }