2 * bpf_jit_comp64.c: eBPF JIT compiler
4 * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
7 * Based on the powerpc classic BPF JIT compiler by Matt Evans
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; version 2
14 #include <linux/moduleloader.h>
15 #include <asm/cacheflush.h>
16 #include <linux/netdevice.h>
17 #include <linux/filter.h>
18 #include <linux/if_vlan.h>
19 #include <asm/kprobes.h>
20 #include <linux/bpf.h>
22 #include "bpf_jit64.h"
24 static void bpf_jit_fill_ill_insns(void *area, unsigned int size)
26 memset32(area, BREAKPOINT_INSTRUCTION, size/4);
29 static inline void bpf_flush_icache(void *start, void *end)
32 flush_icache_range((unsigned long)start, (unsigned long)end);
35 static inline bool bpf_is_seen_register(struct codegen_context *ctx, int i)
37 return (ctx->seen & (1 << (31 - b2p[i])));
40 static inline void bpf_set_seen_register(struct codegen_context *ctx, int i)
42 ctx->seen |= (1 << (31 - b2p[i]));
45 static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
48 * We only need a stack frame if:
49 * - we call other functions (kernel helpers), or
50 * - the bpf program uses its stack area
51 * The latter condition is deduced from the usage of BPF_REG_FP
53 return ctx->seen & SEEN_FUNC || bpf_is_seen_register(ctx, BPF_REG_FP);
57 * When not setting up our own stackframe, the redzone usage is:
59 * [ prev sp ] <-------------
61 * sp (r1) ---> [ stack pointer ] --------------
62 * [ nv gpr save area ] 8*8
65 * [ unused red zone ] 208 bytes protected
67 static int bpf_jit_stack_local(struct codegen_context *ctx)
69 if (bpf_has_stack_frame(ctx))
70 return STACK_FRAME_MIN_SIZE + ctx->stack_size;
72 return -(BPF_PPC_STACK_SAVE + 16);
75 static int bpf_jit_stack_tailcallcnt(struct codegen_context *ctx)
77 return bpf_jit_stack_local(ctx) + 8;
80 static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg)
82 if (reg >= BPF_PPC_NVR_MIN && reg < 32)
83 return (bpf_has_stack_frame(ctx) ?
84 (BPF_PPC_STACKFRAME + ctx->stack_size) : 0)
87 pr_err("BPF JIT is asking about unknown registers");
91 static void bpf_jit_emit_skb_loads(u32 *image, struct codegen_context *ctx)
94 * Load skb->len and skb->data_len
97 PPC_LWZ(b2p[SKB_HLEN_REG], 3, offsetof(struct sk_buff, len));
98 PPC_LWZ(b2p[TMP_REG_1], 3, offsetof(struct sk_buff, data_len));
99 /* header_len = len - data_len */
100 PPC_SUB(b2p[SKB_HLEN_REG], b2p[SKB_HLEN_REG], b2p[TMP_REG_1]);
102 /* skb->data pointer */
103 PPC_BPF_LL(b2p[SKB_DATA_REG], 3, offsetof(struct sk_buff, data));
106 static void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
111 * Initialize tail_call_cnt if we do tail calls.
112 * Otherwise, put in NOPs so that it can be skipped when we are
113 * invoked through a tail call.
115 if (ctx->seen & SEEN_TAILCALL) {
116 PPC_LI(b2p[TMP_REG_1], 0);
117 /* this goes in the redzone */
118 PPC_BPF_STL(b2p[TMP_REG_1], 1, -(BPF_PPC_STACK_SAVE + 8));
124 #define BPF_TAILCALL_PROLOGUE_SIZE 8
126 if (bpf_has_stack_frame(ctx)) {
128 * We need a stack frame, but we don't necessarily need to
129 * save/restore LR unless we call other functions
131 if (ctx->seen & SEEN_FUNC) {
132 EMIT(PPC_INST_MFLR | __PPC_RT(R0));
133 PPC_BPF_STL(0, 1, PPC_LR_STKOFF);
136 PPC_BPF_STLU(1, 1, -(BPF_PPC_STACKFRAME + ctx->stack_size));
140 * Back up non-volatile regs -- BPF registers 6-10
141 * If we haven't created our own stack frame, we save these
142 * in the protected zone below the previous stack frame
144 for (i = BPF_REG_6; i <= BPF_REG_10; i++)
145 if (bpf_is_seen_register(ctx, i))
146 PPC_BPF_STL(b2p[i], 1, bpf_jit_stack_offsetof(ctx, b2p[i]));
149 * Save additional non-volatile regs if we cache skb
150 * Also, setup skb data
152 if (ctx->seen & SEEN_SKB) {
153 PPC_BPF_STL(b2p[SKB_HLEN_REG], 1,
154 bpf_jit_stack_offsetof(ctx, b2p[SKB_HLEN_REG]));
155 PPC_BPF_STL(b2p[SKB_DATA_REG], 1,
156 bpf_jit_stack_offsetof(ctx, b2p[SKB_DATA_REG]));
157 bpf_jit_emit_skb_loads(image, ctx);
160 /* Setup frame pointer to point to the bpf stack area */
161 if (bpf_is_seen_register(ctx, BPF_REG_FP))
162 PPC_ADDI(b2p[BPF_REG_FP], 1,
163 STACK_FRAME_MIN_SIZE + ctx->stack_size);
166 static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx)
171 for (i = BPF_REG_6; i <= BPF_REG_10; i++)
172 if (bpf_is_seen_register(ctx, i))
173 PPC_BPF_LL(b2p[i], 1, bpf_jit_stack_offsetof(ctx, b2p[i]));
175 /* Restore non-volatile registers used for skb cache */
176 if (ctx->seen & SEEN_SKB) {
177 PPC_BPF_LL(b2p[SKB_HLEN_REG], 1,
178 bpf_jit_stack_offsetof(ctx, b2p[SKB_HLEN_REG]));
179 PPC_BPF_LL(b2p[SKB_DATA_REG], 1,
180 bpf_jit_stack_offsetof(ctx, b2p[SKB_DATA_REG]));
183 /* Tear down our stack frame */
184 if (bpf_has_stack_frame(ctx)) {
185 PPC_ADDI(1, 1, BPF_PPC_STACKFRAME + ctx->stack_size);
186 if (ctx->seen & SEEN_FUNC) {
187 PPC_BPF_LL(0, 1, PPC_LR_STKOFF);
193 static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
195 bpf_jit_emit_common_epilogue(image, ctx);
197 /* Move result to r3 */
198 PPC_MR(3, b2p[BPF_REG_0]);
203 static void bpf_jit_emit_func_call(u32 *image, struct codegen_context *ctx, u64 func)
205 #ifdef PPC64_ELF_ABI_v1
206 /* func points to the function descriptor */
207 PPC_LI64(b2p[TMP_REG_2], func);
208 /* Load actual entry point from function descriptor */
209 PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_2], 0);
210 /* ... and move it to LR */
211 PPC_MTLR(b2p[TMP_REG_1]);
213 * Load TOC from function descriptor at offset 8.
214 * We can clobber r2 since we get called through a
215 * function pointer (so caller will save/restore r2)
216 * and since we don't use a TOC ourself.
218 PPC_BPF_LL(2, b2p[TMP_REG_2], 8);
220 /* We can clobber r12 */
221 PPC_FUNC_ADDR(12, func);
227 static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
230 * By now, the eBPF program has already setup parameters in r3, r4 and r5
231 * r3/BPF_REG_1 - pointer to ctx -- passed as is to the next bpf program
232 * r4/BPF_REG_2 - pointer to bpf_array
233 * r5/BPF_REG_3 - index in bpf_array
235 int b2p_bpf_array = b2p[BPF_REG_2];
236 int b2p_index = b2p[BPF_REG_3];
239 * if (index >= array->map.max_entries)
242 PPC_LWZ(b2p[TMP_REG_1], b2p_bpf_array, offsetof(struct bpf_array, map.max_entries));
243 PPC_CMPLW(b2p_index, b2p[TMP_REG_1]);
244 PPC_BCC(COND_GE, out);
247 * if (tail_call_cnt > MAX_TAIL_CALL_CNT)
250 PPC_LD(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx));
251 PPC_CMPLWI(b2p[TMP_REG_1], MAX_TAIL_CALL_CNT);
252 PPC_BCC(COND_GT, out);
257 PPC_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1], 1);
258 PPC_BPF_STL(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx));
260 /* prog = array->ptrs[index]; */
261 PPC_MULI(b2p[TMP_REG_1], b2p_index, 8);
262 PPC_ADD(b2p[TMP_REG_1], b2p[TMP_REG_1], b2p_bpf_array);
263 PPC_LD(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_array, ptrs));
269 PPC_CMPLDI(b2p[TMP_REG_1], 0);
270 PPC_BCC(COND_EQ, out);
272 /* goto *(prog->bpf_func + prologue_size); */
273 PPC_LD(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_prog, bpf_func));
274 #ifdef PPC64_ELF_ABI_v1
275 /* skip past the function descriptor */
276 PPC_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1],
277 FUNCTION_DESCR_SIZE + BPF_TAILCALL_PROLOGUE_SIZE);
279 PPC_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1], BPF_TAILCALL_PROLOGUE_SIZE);
281 PPC_MTCTR(b2p[TMP_REG_1]);
283 /* tear down stack, restore NVRs, ... */
284 bpf_jit_emit_common_epilogue(image, ctx);
290 /* Assemble the body code between the prologue & epilogue */
291 static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
292 struct codegen_context *ctx,
295 const struct bpf_insn *insn = fp->insnsi;
299 /* Start of epilogue code - will only be valid 2nd pass onwards */
300 u32 exit_addr = addrs[flen];
302 for (i = 0; i < flen; i++) {
303 u32 code = insn[i].code;
304 u32 dst_reg = b2p[insn[i].dst_reg];
305 u32 src_reg = b2p[insn[i].src_reg];
306 s16 off = insn[i].off;
307 s32 imm = insn[i].imm;
313 * addrs[] maps a BPF bytecode address into a real offset from
314 * the start of the body code.
316 addrs[i] = ctx->idx * 4;
319 * As an optimization, we note down which non-volatile registers
320 * are used so that we can only save/restore those in our
321 * prologue and epilogue. We do this here regardless of whether
322 * the actual BPF instruction uses src/dst registers or not
323 * (for instance, BPF_CALL does not use them). The expectation
324 * is that those instructions will have src_reg/dst_reg set to
325 * 0. Even otherwise, we just lose some prologue/epilogue
326 * optimization but everything else should work without
329 if (dst_reg >= BPF_PPC_NVR_MIN && dst_reg < 32)
330 bpf_set_seen_register(ctx, insn[i].dst_reg);
331 if (src_reg >= BPF_PPC_NVR_MIN && src_reg < 32)
332 bpf_set_seen_register(ctx, insn[i].src_reg);
336 * Arithmetic operations: ADD/SUB/MUL/DIV/MOD/NEG
338 case BPF_ALU | BPF_ADD | BPF_X: /* (u32) dst += (u32) src */
339 case BPF_ALU64 | BPF_ADD | BPF_X: /* dst += src */
340 PPC_ADD(dst_reg, dst_reg, src_reg);
341 goto bpf_alu32_trunc;
342 case BPF_ALU | BPF_SUB | BPF_X: /* (u32) dst -= (u32) src */
343 case BPF_ALU64 | BPF_SUB | BPF_X: /* dst -= src */
344 PPC_SUB(dst_reg, dst_reg, src_reg);
345 goto bpf_alu32_trunc;
346 case BPF_ALU | BPF_ADD | BPF_K: /* (u32) dst += (u32) imm */
347 case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
348 case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */
349 case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */
350 if (BPF_OP(code) == BPF_SUB)
353 if (imm >= -32768 && imm < 32768)
354 PPC_ADDI(dst_reg, dst_reg, IMM_L(imm));
356 PPC_LI32(b2p[TMP_REG_1], imm);
357 PPC_ADD(dst_reg, dst_reg, b2p[TMP_REG_1]);
360 goto bpf_alu32_trunc;
361 case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */
362 case BPF_ALU64 | BPF_MUL | BPF_X: /* dst *= src */
363 if (BPF_CLASS(code) == BPF_ALU)
364 PPC_MULW(dst_reg, dst_reg, src_reg);
366 PPC_MULD(dst_reg, dst_reg, src_reg);
367 goto bpf_alu32_trunc;
368 case BPF_ALU | BPF_MUL | BPF_K: /* (u32) dst *= (u32) imm */
369 case BPF_ALU64 | BPF_MUL | BPF_K: /* dst *= imm */
370 if (imm >= -32768 && imm < 32768)
371 PPC_MULI(dst_reg, dst_reg, IMM_L(imm));
373 PPC_LI32(b2p[TMP_REG_1], imm);
374 if (BPF_CLASS(code) == BPF_ALU)
375 PPC_MULW(dst_reg, dst_reg,
378 PPC_MULD(dst_reg, dst_reg,
381 goto bpf_alu32_trunc;
382 case BPF_ALU | BPF_DIV | BPF_X: /* (u32) dst /= (u32) src */
383 case BPF_ALU | BPF_MOD | BPF_X: /* (u32) dst %= (u32) src */
384 if (BPF_OP(code) == BPF_MOD) {
385 PPC_DIVWU(b2p[TMP_REG_1], dst_reg, src_reg);
386 PPC_MULW(b2p[TMP_REG_1], src_reg,
388 PPC_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]);
390 PPC_DIVWU(dst_reg, dst_reg, src_reg);
391 goto bpf_alu32_trunc;
392 case BPF_ALU64 | BPF_DIV | BPF_X: /* dst /= src */
393 case BPF_ALU64 | BPF_MOD | BPF_X: /* dst %= src */
394 if (BPF_OP(code) == BPF_MOD) {
395 PPC_DIVD(b2p[TMP_REG_1], dst_reg, src_reg);
396 PPC_MULD(b2p[TMP_REG_1], src_reg,
398 PPC_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]);
400 PPC_DIVD(dst_reg, dst_reg, src_reg);
402 case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */
403 case BPF_ALU | BPF_DIV | BPF_K: /* (u32) dst /= (u32) imm */
404 case BPF_ALU64 | BPF_MOD | BPF_K: /* dst %= imm */
405 case BPF_ALU64 | BPF_DIV | BPF_K: /* dst /= imm */
409 goto bpf_alu32_trunc;
411 PPC_LI32(b2p[TMP_REG_1], imm);
412 switch (BPF_CLASS(code)) {
414 if (BPF_OP(code) == BPF_MOD) {
415 PPC_DIVWU(b2p[TMP_REG_2], dst_reg,
417 PPC_MULW(b2p[TMP_REG_1],
420 PPC_SUB(dst_reg, dst_reg,
423 PPC_DIVWU(dst_reg, dst_reg,
427 if (BPF_OP(code) == BPF_MOD) {
428 PPC_DIVD(b2p[TMP_REG_2], dst_reg,
430 PPC_MULD(b2p[TMP_REG_1],
433 PPC_SUB(dst_reg, dst_reg,
436 PPC_DIVD(dst_reg, dst_reg,
440 goto bpf_alu32_trunc;
441 case BPF_ALU | BPF_NEG: /* (u32) dst = -dst */
442 case BPF_ALU64 | BPF_NEG: /* dst = -dst */
443 PPC_NEG(dst_reg, dst_reg);
444 goto bpf_alu32_trunc;
447 * Logical operations: AND/OR/XOR/[A]LSH/[A]RSH
449 case BPF_ALU | BPF_AND | BPF_X: /* (u32) dst = dst & src */
450 case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */
451 PPC_AND(dst_reg, dst_reg, src_reg);
452 goto bpf_alu32_trunc;
453 case BPF_ALU | BPF_AND | BPF_K: /* (u32) dst = dst & imm */
454 case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */
456 PPC_ANDI(dst_reg, dst_reg, IMM_L(imm));
459 PPC_LI32(b2p[TMP_REG_1], imm);
460 PPC_AND(dst_reg, dst_reg, b2p[TMP_REG_1]);
462 goto bpf_alu32_trunc;
463 case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */
464 case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */
465 PPC_OR(dst_reg, dst_reg, src_reg);
466 goto bpf_alu32_trunc;
467 case BPF_ALU | BPF_OR | BPF_K:/* dst = (u32) dst | (u32) imm */
468 case BPF_ALU64 | BPF_OR | BPF_K:/* dst = dst | imm */
469 if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
471 PPC_LI32(b2p[TMP_REG_1], imm);
472 PPC_OR(dst_reg, dst_reg, b2p[TMP_REG_1]);
475 PPC_ORI(dst_reg, dst_reg, IMM_L(imm));
477 PPC_ORIS(dst_reg, dst_reg, IMM_H(imm));
479 goto bpf_alu32_trunc;
480 case BPF_ALU | BPF_XOR | BPF_X: /* (u32) dst ^= src */
481 case BPF_ALU64 | BPF_XOR | BPF_X: /* dst ^= src */
482 PPC_XOR(dst_reg, dst_reg, src_reg);
483 goto bpf_alu32_trunc;
484 case BPF_ALU | BPF_XOR | BPF_K: /* (u32) dst ^= (u32) imm */
485 case BPF_ALU64 | BPF_XOR | BPF_K: /* dst ^= imm */
486 if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
488 PPC_LI32(b2p[TMP_REG_1], imm);
489 PPC_XOR(dst_reg, dst_reg, b2p[TMP_REG_1]);
492 PPC_XORI(dst_reg, dst_reg, IMM_L(imm));
494 PPC_XORIS(dst_reg, dst_reg, IMM_H(imm));
496 goto bpf_alu32_trunc;
497 case BPF_ALU | BPF_LSH | BPF_X: /* (u32) dst <<= (u32) src */
498 /* slw clears top 32 bits */
499 PPC_SLW(dst_reg, dst_reg, src_reg);
501 case BPF_ALU64 | BPF_LSH | BPF_X: /* dst <<= src; */
502 PPC_SLD(dst_reg, dst_reg, src_reg);
504 case BPF_ALU | BPF_LSH | BPF_K: /* (u32) dst <<== (u32) imm */
505 /* with imm 0, we still need to clear top 32 bits */
506 PPC_SLWI(dst_reg, dst_reg, imm);
508 case BPF_ALU64 | BPF_LSH | BPF_K: /* dst <<== imm */
510 PPC_SLDI(dst_reg, dst_reg, imm);
512 case BPF_ALU | BPF_RSH | BPF_X: /* (u32) dst >>= (u32) src */
513 PPC_SRW(dst_reg, dst_reg, src_reg);
515 case BPF_ALU64 | BPF_RSH | BPF_X: /* dst >>= src */
516 PPC_SRD(dst_reg, dst_reg, src_reg);
518 case BPF_ALU | BPF_RSH | BPF_K: /* (u32) dst >>= (u32) imm */
519 PPC_SRWI(dst_reg, dst_reg, imm);
521 case BPF_ALU64 | BPF_RSH | BPF_K: /* dst >>= imm */
523 PPC_SRDI(dst_reg, dst_reg, imm);
525 case BPF_ALU64 | BPF_ARSH | BPF_X: /* (s64) dst >>= src */
526 PPC_SRAD(dst_reg, dst_reg, src_reg);
528 case BPF_ALU64 | BPF_ARSH | BPF_K: /* (s64) dst >>= imm */
530 PPC_SRADI(dst_reg, dst_reg, imm);
536 case BPF_ALU | BPF_MOV | BPF_X: /* (u32) dst = src */
537 case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */
538 PPC_MR(dst_reg, src_reg);
539 goto bpf_alu32_trunc;
540 case BPF_ALU | BPF_MOV | BPF_K: /* (u32) dst = imm */
541 case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = (s64) imm */
542 PPC_LI32(dst_reg, imm);
544 goto bpf_alu32_trunc;
548 /* Truncate to 32-bits */
549 if (BPF_CLASS(code) == BPF_ALU)
550 PPC_RLWINM(dst_reg, dst_reg, 0, 0, 31);
556 case BPF_ALU | BPF_END | BPF_FROM_LE:
557 case BPF_ALU | BPF_END | BPF_FROM_BE:
558 #ifdef __BIG_ENDIAN__
559 if (BPF_SRC(code) == BPF_FROM_BE)
561 #else /* !__BIG_ENDIAN__ */
562 if (BPF_SRC(code) == BPF_FROM_LE)
567 /* Rotate 8 bits left & mask with 0x0000ff00 */
568 PPC_RLWINM(b2p[TMP_REG_1], dst_reg, 8, 16, 23);
569 /* Rotate 8 bits right & insert LSB to reg */
570 PPC_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 24, 31);
571 /* Move result back to dst_reg */
572 PPC_MR(dst_reg, b2p[TMP_REG_1]);
576 * Rotate word left by 8 bits:
577 * 2 bytes are already in their final position
578 * -- byte 2 and 4 (of bytes 1, 2, 3 and 4)
580 PPC_RLWINM(b2p[TMP_REG_1], dst_reg, 8, 0, 31);
581 /* Rotate 24 bits and insert byte 1 */
582 PPC_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 0, 7);
583 /* Rotate 24 bits and insert byte 3 */
584 PPC_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 16, 23);
585 PPC_MR(dst_reg, b2p[TMP_REG_1]);
589 * Way easier and faster(?) to store the value
590 * into stack and then use ldbrx
592 * ctx->seen will be reliable in pass2, but
593 * the instructions generated will remain the
594 * same across all passes
596 PPC_STD(dst_reg, 1, bpf_jit_stack_local(ctx));
597 PPC_ADDI(b2p[TMP_REG_1], 1, bpf_jit_stack_local(ctx));
598 PPC_LDBRX(dst_reg, 0, b2p[TMP_REG_1]);
606 /* zero-extend 16 bits into 64 bits */
607 PPC_RLDICL(dst_reg, dst_reg, 0, 48);
610 /* zero-extend 32 bits into 64 bits */
611 PPC_RLDICL(dst_reg, dst_reg, 0, 32);
622 case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src */
623 case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */
624 if (BPF_CLASS(code) == BPF_ST) {
625 PPC_LI(b2p[TMP_REG_1], imm);
626 src_reg = b2p[TMP_REG_1];
628 PPC_STB(src_reg, dst_reg, off);
630 case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */
631 case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */
632 if (BPF_CLASS(code) == BPF_ST) {
633 PPC_LI(b2p[TMP_REG_1], imm);
634 src_reg = b2p[TMP_REG_1];
636 PPC_STH(src_reg, dst_reg, off);
638 case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */
639 case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */
640 if (BPF_CLASS(code) == BPF_ST) {
641 PPC_LI32(b2p[TMP_REG_1], imm);
642 src_reg = b2p[TMP_REG_1];
644 PPC_STW(src_reg, dst_reg, off);
646 case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */
647 case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */
648 if (BPF_CLASS(code) == BPF_ST) {
649 PPC_LI32(b2p[TMP_REG_1], imm);
650 src_reg = b2p[TMP_REG_1];
652 PPC_STD(src_reg, dst_reg, off);
656 * BPF_STX XADD (atomic_add)
658 /* *(u32 *)(dst + off) += src */
659 case BPF_STX | BPF_XADD | BPF_W:
660 /* Get EA into TMP_REG_1 */
661 PPC_ADDI(b2p[TMP_REG_1], dst_reg, off);
662 /* error if EA is not word-aligned */
663 PPC_ANDI(b2p[TMP_REG_2], b2p[TMP_REG_1], 0x03);
664 PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + 12);
665 PPC_LI(b2p[BPF_REG_0], 0);
667 /* load value from memory into TMP_REG_2 */
668 PPC_BPF_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
669 /* add value from src_reg into this */
670 PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
671 /* store result back */
672 PPC_BPF_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
673 /* we're done if this succeeded */
674 PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (7*4));
675 /* otherwise, let's try once more */
676 PPC_BPF_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
677 PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
678 PPC_BPF_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
679 /* exit if the store was not successful */
680 PPC_LI(b2p[BPF_REG_0], 0);
681 PPC_BCC(COND_NE, exit_addr);
683 /* *(u64 *)(dst + off) += src */
684 case BPF_STX | BPF_XADD | BPF_DW:
685 PPC_ADDI(b2p[TMP_REG_1], dst_reg, off);
686 /* error if EA is not doubleword-aligned */
687 PPC_ANDI(b2p[TMP_REG_2], b2p[TMP_REG_1], 0x07);
688 PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (3*4));
689 PPC_LI(b2p[BPF_REG_0], 0);
691 PPC_BPF_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
692 PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
693 PPC_BPF_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
694 PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (7*4));
695 PPC_BPF_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
696 PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
697 PPC_BPF_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
698 PPC_LI(b2p[BPF_REG_0], 0);
699 PPC_BCC(COND_NE, exit_addr);
705 /* dst = *(u8 *)(ul) (src + off) */
706 case BPF_LDX | BPF_MEM | BPF_B:
707 PPC_LBZ(dst_reg, src_reg, off);
709 /* dst = *(u16 *)(ul) (src + off) */
710 case BPF_LDX | BPF_MEM | BPF_H:
711 PPC_LHZ(dst_reg, src_reg, off);
713 /* dst = *(u32 *)(ul) (src + off) */
714 case BPF_LDX | BPF_MEM | BPF_W:
715 PPC_LWZ(dst_reg, src_reg, off);
717 /* dst = *(u64 *)(ul) (src + off) */
718 case BPF_LDX | BPF_MEM | BPF_DW:
719 PPC_LD(dst_reg, src_reg, off);
724 * 16 byte instruction that uses two 'struct bpf_insn'
726 case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
727 imm64 = ((u64)(u32) insn[i].imm) |
728 (((u64)(u32) insn[i+1].imm) << 32);
729 /* Adjust for two bpf instructions */
730 addrs[++i] = ctx->idx * 4;
731 PPC_LI64(dst_reg, imm64);
737 case BPF_JMP | BPF_EXIT:
739 * If this isn't the very last instruction, branch to
740 * the epilogue. If we _are_ the last instruction,
741 * we'll just fall through to the epilogue.
745 /* else fall through to the epilogue */
751 case BPF_JMP | BPF_CALL:
752 ctx->seen |= SEEN_FUNC;
753 func = (u8 *) __bpf_call_base + imm;
755 /* Save skb pointer if we need to re-cache skb data */
756 if ((ctx->seen & SEEN_SKB) &&
757 bpf_helper_changes_pkt_data(func))
758 PPC_BPF_STL(3, 1, bpf_jit_stack_local(ctx));
760 bpf_jit_emit_func_call(image, ctx, (u64)func);
762 /* move return value from r3 to BPF_REG_0 */
763 PPC_MR(b2p[BPF_REG_0], 3);
765 /* refresh skb cache */
766 if ((ctx->seen & SEEN_SKB) &&
767 bpf_helper_changes_pkt_data(func)) {
768 /* reload skb pointer to r3 */
769 PPC_BPF_LL(3, 1, bpf_jit_stack_local(ctx));
770 bpf_jit_emit_skb_loads(image, ctx);
777 case BPF_JMP | BPF_JA:
778 PPC_JMP(addrs[i + 1 + off]);
781 case BPF_JMP | BPF_JGT | BPF_K:
782 case BPF_JMP | BPF_JGT | BPF_X:
783 case BPF_JMP | BPF_JSGT | BPF_K:
784 case BPF_JMP | BPF_JSGT | BPF_X:
787 case BPF_JMP | BPF_JLT | BPF_K:
788 case BPF_JMP | BPF_JLT | BPF_X:
789 case BPF_JMP | BPF_JSLT | BPF_K:
790 case BPF_JMP | BPF_JSLT | BPF_X:
793 case BPF_JMP | BPF_JGE | BPF_K:
794 case BPF_JMP | BPF_JGE | BPF_X:
795 case BPF_JMP | BPF_JSGE | BPF_K:
796 case BPF_JMP | BPF_JSGE | BPF_X:
799 case BPF_JMP | BPF_JLE | BPF_K:
800 case BPF_JMP | BPF_JLE | BPF_X:
801 case BPF_JMP | BPF_JSLE | BPF_K:
802 case BPF_JMP | BPF_JSLE | BPF_X:
805 case BPF_JMP | BPF_JEQ | BPF_K:
806 case BPF_JMP | BPF_JEQ | BPF_X:
809 case BPF_JMP | BPF_JNE | BPF_K:
810 case BPF_JMP | BPF_JNE | BPF_X:
813 case BPF_JMP | BPF_JSET | BPF_K:
814 case BPF_JMP | BPF_JSET | BPF_X:
820 case BPF_JMP | BPF_JGT | BPF_X:
821 case BPF_JMP | BPF_JLT | BPF_X:
822 case BPF_JMP | BPF_JGE | BPF_X:
823 case BPF_JMP | BPF_JLE | BPF_X:
824 case BPF_JMP | BPF_JEQ | BPF_X:
825 case BPF_JMP | BPF_JNE | BPF_X:
826 /* unsigned comparison */
827 PPC_CMPLD(dst_reg, src_reg);
829 case BPF_JMP | BPF_JSGT | BPF_X:
830 case BPF_JMP | BPF_JSLT | BPF_X:
831 case BPF_JMP | BPF_JSGE | BPF_X:
832 case BPF_JMP | BPF_JSLE | BPF_X:
833 /* signed comparison */
834 PPC_CMPD(dst_reg, src_reg);
836 case BPF_JMP | BPF_JSET | BPF_X:
837 PPC_AND_DOT(b2p[TMP_REG_1], dst_reg, src_reg);
839 case BPF_JMP | BPF_JNE | BPF_K:
840 case BPF_JMP | BPF_JEQ | BPF_K:
841 case BPF_JMP | BPF_JGT | BPF_K:
842 case BPF_JMP | BPF_JLT | BPF_K:
843 case BPF_JMP | BPF_JGE | BPF_K:
844 case BPF_JMP | BPF_JLE | BPF_K:
846 * Need sign-extended load, so only positive
847 * values can be used as imm in cmpldi
849 if (imm >= 0 && imm < 32768)
850 PPC_CMPLDI(dst_reg, imm);
852 /* sign-extending load */
853 PPC_LI32(b2p[TMP_REG_1], imm);
854 /* ... but unsigned comparison */
855 PPC_CMPLD(dst_reg, b2p[TMP_REG_1]);
858 case BPF_JMP | BPF_JSGT | BPF_K:
859 case BPF_JMP | BPF_JSLT | BPF_K:
860 case BPF_JMP | BPF_JSGE | BPF_K:
861 case BPF_JMP | BPF_JSLE | BPF_K:
863 * signed comparison, so any 16-bit value
864 * can be used in cmpdi
866 if (imm >= -32768 && imm < 32768)
867 PPC_CMPDI(dst_reg, imm);
869 PPC_LI32(b2p[TMP_REG_1], imm);
870 PPC_CMPD(dst_reg, b2p[TMP_REG_1]);
873 case BPF_JMP | BPF_JSET | BPF_K:
874 /* andi does not sign-extend the immediate */
875 if (imm >= 0 && imm < 32768)
876 /* PPC_ANDI is _only/always_ dot-form */
877 PPC_ANDI(b2p[TMP_REG_1], dst_reg, imm);
879 PPC_LI32(b2p[TMP_REG_1], imm);
880 PPC_AND_DOT(b2p[TMP_REG_1], dst_reg,
885 PPC_BCC(true_cond, addrs[i + 1 + off]);
889 * Loads from packet header/data
890 * Assume 32-bit input value in imm and X (src_reg)
894 case BPF_LD | BPF_W | BPF_ABS:
895 func = (u8 *)CHOOSE_LOAD_FUNC(imm, sk_load_word);
896 goto common_load_abs;
897 case BPF_LD | BPF_H | BPF_ABS:
898 func = (u8 *)CHOOSE_LOAD_FUNC(imm, sk_load_half);
899 goto common_load_abs;
900 case BPF_LD | BPF_B | BPF_ABS:
901 func = (u8 *)CHOOSE_LOAD_FUNC(imm, sk_load_byte);
905 * Load into r4, which can just be passed onto
906 * skb load helpers as the second parameter
912 case BPF_LD | BPF_W | BPF_IND:
913 func = (u8 *)sk_load_word;
914 goto common_load_ind;
915 case BPF_LD | BPF_H | BPF_IND:
916 func = (u8 *)sk_load_half;
917 goto common_load_ind;
918 case BPF_LD | BPF_B | BPF_IND:
919 func = (u8 *)sk_load_byte;
922 * Load from [src_reg + imm]
923 * Treat src_reg as a 32-bit value
925 PPC_EXTSW(4, src_reg);
927 if (imm >= -32768 && imm < 32768)
928 PPC_ADDI(4, 4, IMM_L(imm));
930 PPC_LI32(b2p[TMP_REG_1], imm);
931 PPC_ADD(4, 4, b2p[TMP_REG_1]);
936 ctx->seen |= SEEN_SKB;
937 ctx->seen |= SEEN_FUNC;
938 bpf_jit_emit_func_call(image, ctx, (u64)func);
941 * Helper returns 'lt' condition on error, and an
942 * appropriate return value in BPF_REG_0
944 PPC_BCC(COND_LT, exit_addr);
950 case BPF_JMP | BPF_TAIL_CALL:
951 ctx->seen |= SEEN_TAILCALL;
952 bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
957 * The filter contains something cruel & unusual.
958 * We don't handle it, but also there shouldn't be
959 * anything missing from our list.
961 pr_err_ratelimited("eBPF filter opcode %04x (@%d) unsupported\n",
967 /* Set end-of-body-code address for exit. */
968 addrs[i] = ctx->idx * 4;
973 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
980 struct codegen_context cgctx;
983 struct bpf_binary_header *bpf_hdr;
984 struct bpf_prog *org_fp = fp;
985 struct bpf_prog *tmp_fp;
986 bool bpf_blinded = false;
988 if (!fp->jit_requested)
991 tmp_fp = bpf_jit_blind_constants(org_fp);
995 if (tmp_fp != org_fp) {
1001 addrs = kzalloc((flen+1) * sizeof(*addrs), GFP_KERNEL);
1002 if (addrs == NULL) {
1007 memset(&cgctx, 0, sizeof(struct codegen_context));
1009 /* Make sure that the stack is quadword aligned. */
1010 cgctx.stack_size = round_up(fp->aux->stack_depth, 16);
1012 /* Scouting faux-generate pass 0 */
1013 if (bpf_jit_build_body(fp, 0, &cgctx, addrs)) {
1014 /* We hit something illegal or unsupported. */
1020 * Pretend to build prologue, given the features we've seen. This will
1021 * update ctgtx.idx as it pretends to output instructions, then we can
1022 * calculate total size from idx.
1024 bpf_jit_build_prologue(0, &cgctx);
1025 bpf_jit_build_epilogue(0, &cgctx);
1027 proglen = cgctx.idx * 4;
1028 alloclen = proglen + FUNCTION_DESCR_SIZE;
1030 bpf_hdr = bpf_jit_binary_alloc(alloclen, &image, 4,
1031 bpf_jit_fill_ill_insns);
1037 code_base = (u32 *)(image + FUNCTION_DESCR_SIZE);
1039 /* Code generation passes 1-2 */
1040 for (pass = 1; pass < 3; pass++) {
1041 /* Now build the prologue, body code & epilogue for real. */
1043 bpf_jit_build_prologue(code_base, &cgctx);
1044 bpf_jit_build_body(fp, code_base, &cgctx, addrs);
1045 bpf_jit_build_epilogue(code_base, &cgctx);
1047 if (bpf_jit_enable > 1)
1048 pr_info("Pass %d: shrink = %d, seen = 0x%x\n", pass,
1049 proglen - (cgctx.idx * 4), cgctx.seen);
1052 if (bpf_jit_enable > 1)
1054 * Note that we output the base address of the code_base
1055 * rather than image, since opcodes are in code_base.
1057 bpf_jit_dump(flen, proglen, pass, code_base);
1059 #ifdef PPC64_ELF_ABI_v1
1060 /* Function descriptor nastiness: Address + TOC */
1061 ((u64 *)image)[0] = (u64)code_base;
1062 ((u64 *)image)[1] = local_paca->kernel_toc;
1065 fp->bpf_func = (void *)image;
1067 fp->jited_len = alloclen;
1069 bpf_flush_icache(bpf_hdr, (u8 *)bpf_hdr + (bpf_hdr->pages * PAGE_SIZE));
1075 bpf_jit_prog_release_other(fp, fp == org_fp ? tmp_fp : org_fp);
1080 /* Overriding bpf_jit_free() as we don't set images read-only. */
1081 void bpf_jit_free(struct bpf_prog *fp)
1083 unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
1084 struct bpf_binary_header *bpf_hdr = (void *)addr;
1087 bpf_jit_binary_free(bpf_hdr);
1089 bpf_prog_unlock_free(fp);