2 * bpf_jit_comp64.c: eBPF JIT compiler
4 * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
7 * Based on the powerpc classic BPF JIT compiler by Matt Evans
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; version 2
14 #include <linux/moduleloader.h>
15 #include <asm/cacheflush.h>
16 #include <linux/netdevice.h>
17 #include <linux/filter.h>
18 #include <linux/if_vlan.h>
19 #include <asm/kprobes.h>
20 #include <linux/bpf.h>
22 #include "bpf_jit64.h"
24 static void bpf_jit_fill_ill_insns(void *area, unsigned int size)
26 memset32(area, BREAKPOINT_INSTRUCTION, size/4);
29 static inline void bpf_flush_icache(void *start, void *end)
32 flush_icache_range((unsigned long)start, (unsigned long)end);
35 static inline bool bpf_is_seen_register(struct codegen_context *ctx, int i)
37 return (ctx->seen & (1 << (31 - b2p[i])));
40 static inline void bpf_set_seen_register(struct codegen_context *ctx, int i)
42 ctx->seen |= (1 << (31 - b2p[i]));
45 static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
48 * We only need a stack frame if:
49 * - we call other functions (kernel helpers), or
50 * - the bpf program uses its stack area
51 * The latter condition is deduced from the usage of BPF_REG_FP
53 return ctx->seen & SEEN_FUNC || bpf_is_seen_register(ctx, BPF_REG_FP);
57 * When not setting up our own stackframe, the redzone usage is:
59 * [ prev sp ] <-------------
61 * sp (r1) ---> [ stack pointer ] --------------
62 * [ nv gpr save area ] 6*8
65 * [ unused red zone ] 208 bytes protected
67 static int bpf_jit_stack_local(struct codegen_context *ctx)
69 if (bpf_has_stack_frame(ctx))
70 return STACK_FRAME_MIN_SIZE + ctx->stack_size;
72 return -(BPF_PPC_STACK_SAVE + 16);
75 static int bpf_jit_stack_tailcallcnt(struct codegen_context *ctx)
77 return bpf_jit_stack_local(ctx) + 8;
80 static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg)
82 if (reg >= BPF_PPC_NVR_MIN && reg < 32)
83 return (bpf_has_stack_frame(ctx) ?
84 (BPF_PPC_STACKFRAME + ctx->stack_size) : 0)
87 pr_err("BPF JIT is asking about unknown registers");
91 static void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
96 * Initialize tail_call_cnt if we do tail calls.
97 * Otherwise, put in NOPs so that it can be skipped when we are
98 * invoked through a tail call.
100 if (ctx->seen & SEEN_TAILCALL) {
101 PPC_LI(b2p[TMP_REG_1], 0);
102 /* this goes in the redzone */
103 PPC_BPF_STL(b2p[TMP_REG_1], 1, -(BPF_PPC_STACK_SAVE + 8));
109 #define BPF_TAILCALL_PROLOGUE_SIZE 8
111 if (bpf_has_stack_frame(ctx)) {
113 * We need a stack frame, but we don't necessarily need to
114 * save/restore LR unless we call other functions
116 if (ctx->seen & SEEN_FUNC) {
117 EMIT(PPC_INST_MFLR | __PPC_RT(R0));
118 PPC_BPF_STL(0, 1, PPC_LR_STKOFF);
121 PPC_BPF_STLU(1, 1, -(BPF_PPC_STACKFRAME + ctx->stack_size));
125 * Back up non-volatile regs -- BPF registers 6-10
126 * If we haven't created our own stack frame, we save these
127 * in the protected zone below the previous stack frame
129 for (i = BPF_REG_6; i <= BPF_REG_10; i++)
130 if (bpf_is_seen_register(ctx, i))
131 PPC_BPF_STL(b2p[i], 1, bpf_jit_stack_offsetof(ctx, b2p[i]));
133 /* Setup frame pointer to point to the bpf stack area */
134 if (bpf_is_seen_register(ctx, BPF_REG_FP))
135 PPC_ADDI(b2p[BPF_REG_FP], 1,
136 STACK_FRAME_MIN_SIZE + ctx->stack_size);
139 static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx)
144 for (i = BPF_REG_6; i <= BPF_REG_10; i++)
145 if (bpf_is_seen_register(ctx, i))
146 PPC_BPF_LL(b2p[i], 1, bpf_jit_stack_offsetof(ctx, b2p[i]));
148 /* Tear down our stack frame */
149 if (bpf_has_stack_frame(ctx)) {
150 PPC_ADDI(1, 1, BPF_PPC_STACKFRAME + ctx->stack_size);
151 if (ctx->seen & SEEN_FUNC) {
152 PPC_BPF_LL(0, 1, PPC_LR_STKOFF);
158 static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
160 bpf_jit_emit_common_epilogue(image, ctx);
162 /* Move result to r3 */
163 PPC_MR(3, b2p[BPF_REG_0]);
168 static void bpf_jit_emit_func_call(u32 *image, struct codegen_context *ctx, u64 func)
170 unsigned int i, ctx_idx = ctx->idx;
172 /* Load function address into r12 */
175 /* For bpf-to-bpf function calls, the callee's address is unknown
176 * until the last extra pass. As seen above, we use PPC_LI64() to
177 * load the callee's address, but this may optimize the number of
178 * instructions required based on the nature of the address.
180 * Since we don't want the number of instructions emitted to change,
181 * we pad the optimized PPC_LI64() call with NOPs to guarantee that
182 * we always have a five-instruction sequence, which is the maximum
183 * that PPC_LI64() can emit.
185 for (i = ctx->idx - ctx_idx; i < 5; i++)
188 #ifdef PPC64_ELF_ABI_v1
190 * Load TOC from function descriptor at offset 8.
191 * We can clobber r2 since we get called through a
192 * function pointer (so caller will save/restore r2)
193 * and since we don't use a TOC ourself.
195 PPC_BPF_LL(2, 12, 8);
196 /* Load actual entry point from function descriptor */
197 PPC_BPF_LL(12, 12, 0);
204 static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
207 * By now, the eBPF program has already setup parameters in r3, r4 and r5
208 * r3/BPF_REG_1 - pointer to ctx -- passed as is to the next bpf program
209 * r4/BPF_REG_2 - pointer to bpf_array
210 * r5/BPF_REG_3 - index in bpf_array
212 int b2p_bpf_array = b2p[BPF_REG_2];
213 int b2p_index = b2p[BPF_REG_3];
216 * if (index >= array->map.max_entries)
219 PPC_LWZ(b2p[TMP_REG_1], b2p_bpf_array, offsetof(struct bpf_array, map.max_entries));
220 PPC_RLWINM(b2p_index, b2p_index, 0, 0, 31);
221 PPC_CMPLW(b2p_index, b2p[TMP_REG_1]);
222 PPC_BCC(COND_GE, out);
225 * if (tail_call_cnt > MAX_TAIL_CALL_CNT)
228 PPC_LD(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx));
229 PPC_CMPLWI(b2p[TMP_REG_1], MAX_TAIL_CALL_CNT);
230 PPC_BCC(COND_GT, out);
235 PPC_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1], 1);
236 PPC_BPF_STL(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx));
238 /* prog = array->ptrs[index]; */
239 PPC_MULI(b2p[TMP_REG_1], b2p_index, 8);
240 PPC_ADD(b2p[TMP_REG_1], b2p[TMP_REG_1], b2p_bpf_array);
241 PPC_LD(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_array, ptrs));
247 PPC_CMPLDI(b2p[TMP_REG_1], 0);
248 PPC_BCC(COND_EQ, out);
250 /* goto *(prog->bpf_func + prologue_size); */
251 PPC_LD(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_prog, bpf_func));
252 #ifdef PPC64_ELF_ABI_v1
253 /* skip past the function descriptor */
254 PPC_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1],
255 FUNCTION_DESCR_SIZE + BPF_TAILCALL_PROLOGUE_SIZE);
257 PPC_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1], BPF_TAILCALL_PROLOGUE_SIZE);
259 PPC_MTCTR(b2p[TMP_REG_1]);
261 /* tear down stack, restore NVRs, ... */
262 bpf_jit_emit_common_epilogue(image, ctx);
268 /* Assemble the body code between the prologue & epilogue */
269 static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
270 struct codegen_context *ctx,
271 u32 *addrs, bool extra_pass)
273 const struct bpf_insn *insn = fp->insnsi;
277 /* Start of epilogue code - will only be valid 2nd pass onwards */
278 u32 exit_addr = addrs[flen];
280 for (i = 0; i < flen; i++) {
281 u32 code = insn[i].code;
282 u32 dst_reg = b2p[insn[i].dst_reg];
283 u32 src_reg = b2p[insn[i].src_reg];
284 s16 off = insn[i].off;
285 s32 imm = insn[i].imm;
292 * addrs[] maps a BPF bytecode address into a real offset from
293 * the start of the body code.
295 addrs[i] = ctx->idx * 4;
298 * As an optimization, we note down which non-volatile registers
299 * are used so that we can only save/restore those in our
300 * prologue and epilogue. We do this here regardless of whether
301 * the actual BPF instruction uses src/dst registers or not
302 * (for instance, BPF_CALL does not use them). The expectation
303 * is that those instructions will have src_reg/dst_reg set to
304 * 0. Even otherwise, we just lose some prologue/epilogue
305 * optimization but everything else should work without
308 if (dst_reg >= BPF_PPC_NVR_MIN && dst_reg < 32)
309 bpf_set_seen_register(ctx, insn[i].dst_reg);
310 if (src_reg >= BPF_PPC_NVR_MIN && src_reg < 32)
311 bpf_set_seen_register(ctx, insn[i].src_reg);
315 * Arithmetic operations: ADD/SUB/MUL/DIV/MOD/NEG
317 case BPF_ALU | BPF_ADD | BPF_X: /* (u32) dst += (u32) src */
318 case BPF_ALU64 | BPF_ADD | BPF_X: /* dst += src */
319 PPC_ADD(dst_reg, dst_reg, src_reg);
320 goto bpf_alu32_trunc;
321 case BPF_ALU | BPF_SUB | BPF_X: /* (u32) dst -= (u32) src */
322 case BPF_ALU64 | BPF_SUB | BPF_X: /* dst -= src */
323 PPC_SUB(dst_reg, dst_reg, src_reg);
324 goto bpf_alu32_trunc;
325 case BPF_ALU | BPF_ADD | BPF_K: /* (u32) dst += (u32) imm */
326 case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
327 case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */
328 case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */
329 if (BPF_OP(code) == BPF_SUB)
332 if (imm >= -32768 && imm < 32768)
333 PPC_ADDI(dst_reg, dst_reg, IMM_L(imm));
335 PPC_LI32(b2p[TMP_REG_1], imm);
336 PPC_ADD(dst_reg, dst_reg, b2p[TMP_REG_1]);
339 goto bpf_alu32_trunc;
340 case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */
341 case BPF_ALU64 | BPF_MUL | BPF_X: /* dst *= src */
342 if (BPF_CLASS(code) == BPF_ALU)
343 PPC_MULW(dst_reg, dst_reg, src_reg);
345 PPC_MULD(dst_reg, dst_reg, src_reg);
346 goto bpf_alu32_trunc;
347 case BPF_ALU | BPF_MUL | BPF_K: /* (u32) dst *= (u32) imm */
348 case BPF_ALU64 | BPF_MUL | BPF_K: /* dst *= imm */
349 if (imm >= -32768 && imm < 32768)
350 PPC_MULI(dst_reg, dst_reg, IMM_L(imm));
352 PPC_LI32(b2p[TMP_REG_1], imm);
353 if (BPF_CLASS(code) == BPF_ALU)
354 PPC_MULW(dst_reg, dst_reg,
357 PPC_MULD(dst_reg, dst_reg,
360 goto bpf_alu32_trunc;
361 case BPF_ALU | BPF_DIV | BPF_X: /* (u32) dst /= (u32) src */
362 case BPF_ALU | BPF_MOD | BPF_X: /* (u32) dst %= (u32) src */
363 if (BPF_OP(code) == BPF_MOD) {
364 PPC_DIVWU(b2p[TMP_REG_1], dst_reg, src_reg);
365 PPC_MULW(b2p[TMP_REG_1], src_reg,
367 PPC_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]);
369 PPC_DIVWU(dst_reg, dst_reg, src_reg);
370 goto bpf_alu32_trunc;
371 case BPF_ALU64 | BPF_DIV | BPF_X: /* dst /= src */
372 case BPF_ALU64 | BPF_MOD | BPF_X: /* dst %= src */
373 if (BPF_OP(code) == BPF_MOD) {
374 PPC_DIVD(b2p[TMP_REG_1], dst_reg, src_reg);
375 PPC_MULD(b2p[TMP_REG_1], src_reg,
377 PPC_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]);
379 PPC_DIVD(dst_reg, dst_reg, src_reg);
381 case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */
382 case BPF_ALU | BPF_DIV | BPF_K: /* (u32) dst /= (u32) imm */
383 case BPF_ALU64 | BPF_MOD | BPF_K: /* dst %= imm */
384 case BPF_ALU64 | BPF_DIV | BPF_K: /* dst /= imm */
388 goto bpf_alu32_trunc;
390 PPC_LI32(b2p[TMP_REG_1], imm);
391 switch (BPF_CLASS(code)) {
393 if (BPF_OP(code) == BPF_MOD) {
394 PPC_DIVWU(b2p[TMP_REG_2], dst_reg,
396 PPC_MULW(b2p[TMP_REG_1],
399 PPC_SUB(dst_reg, dst_reg,
402 PPC_DIVWU(dst_reg, dst_reg,
406 if (BPF_OP(code) == BPF_MOD) {
407 PPC_DIVD(b2p[TMP_REG_2], dst_reg,
409 PPC_MULD(b2p[TMP_REG_1],
412 PPC_SUB(dst_reg, dst_reg,
415 PPC_DIVD(dst_reg, dst_reg,
419 goto bpf_alu32_trunc;
420 case BPF_ALU | BPF_NEG: /* (u32) dst = -dst */
421 case BPF_ALU64 | BPF_NEG: /* dst = -dst */
422 PPC_NEG(dst_reg, dst_reg);
423 goto bpf_alu32_trunc;
426 * Logical operations: AND/OR/XOR/[A]LSH/[A]RSH
428 case BPF_ALU | BPF_AND | BPF_X: /* (u32) dst = dst & src */
429 case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */
430 PPC_AND(dst_reg, dst_reg, src_reg);
431 goto bpf_alu32_trunc;
432 case BPF_ALU | BPF_AND | BPF_K: /* (u32) dst = dst & imm */
433 case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */
435 PPC_ANDI(dst_reg, dst_reg, IMM_L(imm));
438 PPC_LI32(b2p[TMP_REG_1], imm);
439 PPC_AND(dst_reg, dst_reg, b2p[TMP_REG_1]);
441 goto bpf_alu32_trunc;
442 case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */
443 case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */
444 PPC_OR(dst_reg, dst_reg, src_reg);
445 goto bpf_alu32_trunc;
446 case BPF_ALU | BPF_OR | BPF_K:/* dst = (u32) dst | (u32) imm */
447 case BPF_ALU64 | BPF_OR | BPF_K:/* dst = dst | imm */
448 if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
450 PPC_LI32(b2p[TMP_REG_1], imm);
451 PPC_OR(dst_reg, dst_reg, b2p[TMP_REG_1]);
454 PPC_ORI(dst_reg, dst_reg, IMM_L(imm));
456 PPC_ORIS(dst_reg, dst_reg, IMM_H(imm));
458 goto bpf_alu32_trunc;
459 case BPF_ALU | BPF_XOR | BPF_X: /* (u32) dst ^= src */
460 case BPF_ALU64 | BPF_XOR | BPF_X: /* dst ^= src */
461 PPC_XOR(dst_reg, dst_reg, src_reg);
462 goto bpf_alu32_trunc;
463 case BPF_ALU | BPF_XOR | BPF_K: /* (u32) dst ^= (u32) imm */
464 case BPF_ALU64 | BPF_XOR | BPF_K: /* dst ^= imm */
465 if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
467 PPC_LI32(b2p[TMP_REG_1], imm);
468 PPC_XOR(dst_reg, dst_reg, b2p[TMP_REG_1]);
471 PPC_XORI(dst_reg, dst_reg, IMM_L(imm));
473 PPC_XORIS(dst_reg, dst_reg, IMM_H(imm));
475 goto bpf_alu32_trunc;
476 case BPF_ALU | BPF_LSH | BPF_X: /* (u32) dst <<= (u32) src */
477 /* slw clears top 32 bits */
478 PPC_SLW(dst_reg, dst_reg, src_reg);
480 case BPF_ALU64 | BPF_LSH | BPF_X: /* dst <<= src; */
481 PPC_SLD(dst_reg, dst_reg, src_reg);
483 case BPF_ALU | BPF_LSH | BPF_K: /* (u32) dst <<== (u32) imm */
484 /* with imm 0, we still need to clear top 32 bits */
485 PPC_SLWI(dst_reg, dst_reg, imm);
487 case BPF_ALU64 | BPF_LSH | BPF_K: /* dst <<== imm */
489 PPC_SLDI(dst_reg, dst_reg, imm);
491 case BPF_ALU | BPF_RSH | BPF_X: /* (u32) dst >>= (u32) src */
492 PPC_SRW(dst_reg, dst_reg, src_reg);
494 case BPF_ALU64 | BPF_RSH | BPF_X: /* dst >>= src */
495 PPC_SRD(dst_reg, dst_reg, src_reg);
497 case BPF_ALU | BPF_RSH | BPF_K: /* (u32) dst >>= (u32) imm */
498 PPC_SRWI(dst_reg, dst_reg, imm);
500 case BPF_ALU64 | BPF_RSH | BPF_K: /* dst >>= imm */
502 PPC_SRDI(dst_reg, dst_reg, imm);
504 case BPF_ALU64 | BPF_ARSH | BPF_X: /* (s64) dst >>= src */
505 PPC_SRAD(dst_reg, dst_reg, src_reg);
507 case BPF_ALU64 | BPF_ARSH | BPF_K: /* (s64) dst >>= imm */
509 PPC_SRADI(dst_reg, dst_reg, imm);
515 case BPF_ALU | BPF_MOV | BPF_X: /* (u32) dst = src */
516 case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */
517 PPC_MR(dst_reg, src_reg);
518 goto bpf_alu32_trunc;
519 case BPF_ALU | BPF_MOV | BPF_K: /* (u32) dst = imm */
520 case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = (s64) imm */
521 PPC_LI32(dst_reg, imm);
523 goto bpf_alu32_trunc;
527 /* Truncate to 32-bits */
528 if (BPF_CLASS(code) == BPF_ALU)
529 PPC_RLWINM(dst_reg, dst_reg, 0, 0, 31);
535 case BPF_ALU | BPF_END | BPF_FROM_LE:
536 case BPF_ALU | BPF_END | BPF_FROM_BE:
537 #ifdef __BIG_ENDIAN__
538 if (BPF_SRC(code) == BPF_FROM_BE)
540 #else /* !__BIG_ENDIAN__ */
541 if (BPF_SRC(code) == BPF_FROM_LE)
546 /* Rotate 8 bits left & mask with 0x0000ff00 */
547 PPC_RLWINM(b2p[TMP_REG_1], dst_reg, 8, 16, 23);
548 /* Rotate 8 bits right & insert LSB to reg */
549 PPC_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 24, 31);
550 /* Move result back to dst_reg */
551 PPC_MR(dst_reg, b2p[TMP_REG_1]);
555 * Rotate word left by 8 bits:
556 * 2 bytes are already in their final position
557 * -- byte 2 and 4 (of bytes 1, 2, 3 and 4)
559 PPC_RLWINM(b2p[TMP_REG_1], dst_reg, 8, 0, 31);
560 /* Rotate 24 bits and insert byte 1 */
561 PPC_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 0, 7);
562 /* Rotate 24 bits and insert byte 3 */
563 PPC_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 16, 23);
564 PPC_MR(dst_reg, b2p[TMP_REG_1]);
568 * Way easier and faster(?) to store the value
569 * into stack and then use ldbrx
571 * ctx->seen will be reliable in pass2, but
572 * the instructions generated will remain the
573 * same across all passes
575 PPC_STD(dst_reg, 1, bpf_jit_stack_local(ctx));
576 PPC_ADDI(b2p[TMP_REG_1], 1, bpf_jit_stack_local(ctx));
577 PPC_LDBRX(dst_reg, 0, b2p[TMP_REG_1]);
585 /* zero-extend 16 bits into 64 bits */
586 PPC_RLDICL(dst_reg, dst_reg, 0, 48);
589 /* zero-extend 32 bits into 64 bits */
590 PPC_RLDICL(dst_reg, dst_reg, 0, 32);
601 case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src */
602 case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */
603 if (BPF_CLASS(code) == BPF_ST) {
604 PPC_LI(b2p[TMP_REG_1], imm);
605 src_reg = b2p[TMP_REG_1];
607 PPC_STB(src_reg, dst_reg, off);
609 case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */
610 case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */
611 if (BPF_CLASS(code) == BPF_ST) {
612 PPC_LI(b2p[TMP_REG_1], imm);
613 src_reg = b2p[TMP_REG_1];
615 PPC_STH(src_reg, dst_reg, off);
617 case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */
618 case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */
619 if (BPF_CLASS(code) == BPF_ST) {
620 PPC_LI32(b2p[TMP_REG_1], imm);
621 src_reg = b2p[TMP_REG_1];
623 PPC_STW(src_reg, dst_reg, off);
625 case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */
626 case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */
627 if (BPF_CLASS(code) == BPF_ST) {
628 PPC_LI32(b2p[TMP_REG_1], imm);
629 src_reg = b2p[TMP_REG_1];
631 PPC_STD(src_reg, dst_reg, off);
635 * BPF_STX XADD (atomic_add)
637 /* *(u32 *)(dst + off) += src */
638 case BPF_STX | BPF_XADD | BPF_W:
639 /* Get EA into TMP_REG_1 */
640 PPC_ADDI(b2p[TMP_REG_1], dst_reg, off);
641 tmp_idx = ctx->idx * 4;
642 /* load value from memory into TMP_REG_2 */
643 PPC_BPF_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
644 /* add value from src_reg into this */
645 PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
646 /* store result back */
647 PPC_BPF_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
648 /* we're done if this succeeded */
649 PPC_BCC_SHORT(COND_NE, tmp_idx);
651 /* *(u64 *)(dst + off) += src */
652 case BPF_STX | BPF_XADD | BPF_DW:
653 PPC_ADDI(b2p[TMP_REG_1], dst_reg, off);
654 tmp_idx = ctx->idx * 4;
655 PPC_BPF_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
656 PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
657 PPC_BPF_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
658 PPC_BCC_SHORT(COND_NE, tmp_idx);
664 /* dst = *(u8 *)(ul) (src + off) */
665 case BPF_LDX | BPF_MEM | BPF_B:
666 PPC_LBZ(dst_reg, src_reg, off);
668 /* dst = *(u16 *)(ul) (src + off) */
669 case BPF_LDX | BPF_MEM | BPF_H:
670 PPC_LHZ(dst_reg, src_reg, off);
672 /* dst = *(u32 *)(ul) (src + off) */
673 case BPF_LDX | BPF_MEM | BPF_W:
674 PPC_LWZ(dst_reg, src_reg, off);
676 /* dst = *(u64 *)(ul) (src + off) */
677 case BPF_LDX | BPF_MEM | BPF_DW:
678 PPC_LD(dst_reg, src_reg, off);
683 * 16 byte instruction that uses two 'struct bpf_insn'
685 case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
686 imm64 = ((u64)(u32) insn[i].imm) |
687 (((u64)(u32) insn[i+1].imm) << 32);
688 /* Adjust for two bpf instructions */
689 addrs[++i] = ctx->idx * 4;
690 PPC_LI64(dst_reg, imm64);
696 case BPF_JMP | BPF_EXIT:
698 * If this isn't the very last instruction, branch to
699 * the epilogue. If we _are_ the last instruction,
700 * we'll just fall through to the epilogue.
704 /* else fall through to the epilogue */
708 * Call kernel helper or bpf function
710 case BPF_JMP | BPF_CALL:
711 ctx->seen |= SEEN_FUNC;
713 /* bpf function call */
714 if (insn[i].src_reg == BPF_PSEUDO_CALL)
717 else if (fp->aux->func && off < fp->aux->func_cnt)
718 /* use the subprog id from the off
719 * field to lookup the callee address
721 func = (u8 *) fp->aux->func[off]->bpf_func;
724 /* kernel helper call */
726 func = (u8 *) __bpf_call_base + imm;
728 bpf_jit_emit_func_call(image, ctx, (u64)func);
730 /* move return value from r3 to BPF_REG_0 */
731 PPC_MR(b2p[BPF_REG_0], 3);
737 case BPF_JMP | BPF_JA:
738 PPC_JMP(addrs[i + 1 + off]);
741 case BPF_JMP | BPF_JGT | BPF_K:
742 case BPF_JMP | BPF_JGT | BPF_X:
743 case BPF_JMP | BPF_JSGT | BPF_K:
744 case BPF_JMP | BPF_JSGT | BPF_X:
747 case BPF_JMP | BPF_JLT | BPF_K:
748 case BPF_JMP | BPF_JLT | BPF_X:
749 case BPF_JMP | BPF_JSLT | BPF_K:
750 case BPF_JMP | BPF_JSLT | BPF_X:
753 case BPF_JMP | BPF_JGE | BPF_K:
754 case BPF_JMP | BPF_JGE | BPF_X:
755 case BPF_JMP | BPF_JSGE | BPF_K:
756 case BPF_JMP | BPF_JSGE | BPF_X:
759 case BPF_JMP | BPF_JLE | BPF_K:
760 case BPF_JMP | BPF_JLE | BPF_X:
761 case BPF_JMP | BPF_JSLE | BPF_K:
762 case BPF_JMP | BPF_JSLE | BPF_X:
765 case BPF_JMP | BPF_JEQ | BPF_K:
766 case BPF_JMP | BPF_JEQ | BPF_X:
769 case BPF_JMP | BPF_JNE | BPF_K:
770 case BPF_JMP | BPF_JNE | BPF_X:
773 case BPF_JMP | BPF_JSET | BPF_K:
774 case BPF_JMP | BPF_JSET | BPF_X:
780 case BPF_JMP | BPF_JGT | BPF_X:
781 case BPF_JMP | BPF_JLT | BPF_X:
782 case BPF_JMP | BPF_JGE | BPF_X:
783 case BPF_JMP | BPF_JLE | BPF_X:
784 case BPF_JMP | BPF_JEQ | BPF_X:
785 case BPF_JMP | BPF_JNE | BPF_X:
786 /* unsigned comparison */
787 PPC_CMPLD(dst_reg, src_reg);
789 case BPF_JMP | BPF_JSGT | BPF_X:
790 case BPF_JMP | BPF_JSLT | BPF_X:
791 case BPF_JMP | BPF_JSGE | BPF_X:
792 case BPF_JMP | BPF_JSLE | BPF_X:
793 /* signed comparison */
794 PPC_CMPD(dst_reg, src_reg);
796 case BPF_JMP | BPF_JSET | BPF_X:
797 PPC_AND_DOT(b2p[TMP_REG_1], dst_reg, src_reg);
799 case BPF_JMP | BPF_JNE | BPF_K:
800 case BPF_JMP | BPF_JEQ | BPF_K:
801 case BPF_JMP | BPF_JGT | BPF_K:
802 case BPF_JMP | BPF_JLT | BPF_K:
803 case BPF_JMP | BPF_JGE | BPF_K:
804 case BPF_JMP | BPF_JLE | BPF_K:
806 * Need sign-extended load, so only positive
807 * values can be used as imm in cmpldi
809 if (imm >= 0 && imm < 32768)
810 PPC_CMPLDI(dst_reg, imm);
812 /* sign-extending load */
813 PPC_LI32(b2p[TMP_REG_1], imm);
814 /* ... but unsigned comparison */
815 PPC_CMPLD(dst_reg, b2p[TMP_REG_1]);
818 case BPF_JMP | BPF_JSGT | BPF_K:
819 case BPF_JMP | BPF_JSLT | BPF_K:
820 case BPF_JMP | BPF_JSGE | BPF_K:
821 case BPF_JMP | BPF_JSLE | BPF_K:
823 * signed comparison, so any 16-bit value
824 * can be used in cmpdi
826 if (imm >= -32768 && imm < 32768)
827 PPC_CMPDI(dst_reg, imm);
829 PPC_LI32(b2p[TMP_REG_1], imm);
830 PPC_CMPD(dst_reg, b2p[TMP_REG_1]);
833 case BPF_JMP | BPF_JSET | BPF_K:
834 /* andi does not sign-extend the immediate */
835 if (imm >= 0 && imm < 32768)
836 /* PPC_ANDI is _only/always_ dot-form */
837 PPC_ANDI(b2p[TMP_REG_1], dst_reg, imm);
839 PPC_LI32(b2p[TMP_REG_1], imm);
840 PPC_AND_DOT(b2p[TMP_REG_1], dst_reg,
845 PPC_BCC(true_cond, addrs[i + 1 + off]);
851 case BPF_JMP | BPF_TAIL_CALL:
852 ctx->seen |= SEEN_TAILCALL;
853 bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
858 * The filter contains something cruel & unusual.
859 * We don't handle it, but also there shouldn't be
860 * anything missing from our list.
862 pr_err_ratelimited("eBPF filter opcode %04x (@%d) unsupported\n",
868 /* Set end-of-body-code address for exit. */
869 addrs[i] = ctx->idx * 4;
874 struct powerpc64_jit_data {
875 struct bpf_binary_header *header;
879 struct codegen_context ctx;
882 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
889 struct powerpc64_jit_data *jit_data;
890 struct codegen_context cgctx;
893 struct bpf_binary_header *bpf_hdr;
894 struct bpf_prog *org_fp = fp;
895 struct bpf_prog *tmp_fp;
896 bool bpf_blinded = false;
897 bool extra_pass = false;
899 if (!fp->jit_requested)
902 tmp_fp = bpf_jit_blind_constants(org_fp);
906 if (tmp_fp != org_fp) {
911 jit_data = fp->aux->jit_data;
913 jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
918 fp->aux->jit_data = jit_data;
922 addrs = jit_data->addrs;
924 cgctx = jit_data->ctx;
925 image = jit_data->image;
926 bpf_hdr = jit_data->header;
927 proglen = jit_data->proglen;
928 alloclen = proglen + FUNCTION_DESCR_SIZE;
933 addrs = kcalloc(flen + 1, sizeof(*addrs), GFP_KERNEL);
939 memset(&cgctx, 0, sizeof(struct codegen_context));
941 /* Make sure that the stack is quadword aligned. */
942 cgctx.stack_size = round_up(fp->aux->stack_depth, 16);
944 /* Scouting faux-generate pass 0 */
945 if (bpf_jit_build_body(fp, 0, &cgctx, addrs, false)) {
946 /* We hit something illegal or unsupported. */
952 * Pretend to build prologue, given the features we've seen. This will
953 * update ctgtx.idx as it pretends to output instructions, then we can
954 * calculate total size from idx.
956 bpf_jit_build_prologue(0, &cgctx);
957 bpf_jit_build_epilogue(0, &cgctx);
959 proglen = cgctx.idx * 4;
960 alloclen = proglen + FUNCTION_DESCR_SIZE;
962 bpf_hdr = bpf_jit_binary_alloc(alloclen, &image, 4,
963 bpf_jit_fill_ill_insns);
970 code_base = (u32 *)(image + FUNCTION_DESCR_SIZE);
972 /* Code generation passes 1-2 */
973 for (pass = 1; pass < 3; pass++) {
974 /* Now build the prologue, body code & epilogue for real. */
976 bpf_jit_build_prologue(code_base, &cgctx);
977 bpf_jit_build_body(fp, code_base, &cgctx, addrs, extra_pass);
978 bpf_jit_build_epilogue(code_base, &cgctx);
980 if (bpf_jit_enable > 1)
981 pr_info("Pass %d: shrink = %d, seen = 0x%x\n", pass,
982 proglen - (cgctx.idx * 4), cgctx.seen);
985 if (bpf_jit_enable > 1)
987 * Note that we output the base address of the code_base
988 * rather than image, since opcodes are in code_base.
990 bpf_jit_dump(flen, proglen, pass, code_base);
992 #ifdef PPC64_ELF_ABI_v1
993 /* Function descriptor nastiness: Address + TOC */
994 ((u64 *)image)[0] = (u64)code_base;
995 ((u64 *)image)[1] = local_paca->kernel_toc;
998 fp->bpf_func = (void *)image;
1000 fp->jited_len = alloclen;
1002 bpf_flush_icache(bpf_hdr, (u8 *)bpf_hdr + (bpf_hdr->pages * PAGE_SIZE));
1003 if (!fp->is_func || extra_pass) {
1007 fp->aux->jit_data = NULL;
1009 jit_data->addrs = addrs;
1010 jit_data->ctx = cgctx;
1011 jit_data->proglen = proglen;
1012 jit_data->image = image;
1013 jit_data->header = bpf_hdr;
1018 bpf_jit_prog_release_other(fp, fp == org_fp ? tmp_fp : org_fp);
1023 /* Overriding bpf_jit_free() as we don't set images read-only. */
1024 void bpf_jit_free(struct bpf_prog *fp)
1026 unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
1027 struct bpf_binary_header *bpf_hdr = (void *)addr;
1030 bpf_jit_binary_free(bpf_hdr);
1032 bpf_prog_unlock_free(fp);