Merge tag 'nvme-for-4.18' of git://git.infradead.org/nvme
[sfrench/cifs-2.6.git] / arch / powerpc / net / bpf_jit_comp.c
1 /* bpf_jit_comp.c: BPF JIT compiler
2  *
3  * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation
4  *
5  * Based on the x86 BPF compiler, by Eric Dumazet (eric.dumazet@gmail.com)
6  * Ported to ppc32 by Denis Kirjanov <kda@linux-powerpc.org>
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU General Public License
10  * as published by the Free Software Foundation; version 2
11  * of the License.
12  */
13 #include <linux/moduleloader.h>
14 #include <asm/cacheflush.h>
15 #include <linux/netdevice.h>
16 #include <linux/filter.h>
17 #include <linux/if_vlan.h>
18
19 #include "bpf_jit32.h"
20
21 static inline void bpf_flush_icache(void *start, void *end)
22 {
23         smp_wmb();
24         flush_icache_range((unsigned long)start, (unsigned long)end);
25 }
26
27 static void bpf_jit_build_prologue(struct bpf_prog *fp, u32 *image,
28                                    struct codegen_context *ctx)
29 {
30         int i;
31         const struct sock_filter *filter = fp->insns;
32
33         if (ctx->seen & (SEEN_MEM | SEEN_DATAREF)) {
34                 /* Make stackframe */
35                 if (ctx->seen & SEEN_DATAREF) {
36                         /* If we call any helpers (for loads), save LR */
37                         EMIT(PPC_INST_MFLR | __PPC_RT(R0));
38                         PPC_BPF_STL(0, 1, PPC_LR_STKOFF);
39
40                         /* Back up non-volatile regs. */
41                         PPC_BPF_STL(r_D, 1, -(REG_SZ*(32-r_D)));
42                         PPC_BPF_STL(r_HL, 1, -(REG_SZ*(32-r_HL)));
43                 }
44                 if (ctx->seen & SEEN_MEM) {
45                         /*
46                          * Conditionally save regs r15-r31 as some will be used
47                          * for M[] data.
48                          */
49                         for (i = r_M; i < (r_M+16); i++) {
50                                 if (ctx->seen & (1 << (i-r_M)))
51                                         PPC_BPF_STL(i, 1, -(REG_SZ*(32-i)));
52                         }
53                 }
54                 PPC_BPF_STLU(1, 1, -BPF_PPC_STACKFRAME);
55         }
56
57         if (ctx->seen & SEEN_DATAREF) {
58                 /*
59                  * If this filter needs to access skb data,
60                  * prepare r_D and r_HL:
61                  *  r_HL = skb->len - skb->data_len
62                  *  r_D  = skb->data
63                  */
64                 PPC_LWZ_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff,
65                                                          data_len));
66                 PPC_LWZ_OFFS(r_HL, r_skb, offsetof(struct sk_buff, len));
67                 PPC_SUB(r_HL, r_HL, r_scratch1);
68                 PPC_LL_OFFS(r_D, r_skb, offsetof(struct sk_buff, data));
69         }
70
71         if (ctx->seen & SEEN_XREG) {
72                 /*
73                  * TODO: Could also detect whether first instr. sets X and
74                  * avoid this (as below, with A).
75                  */
76                 PPC_LI(r_X, 0);
77         }
78
79         /* make sure we dont leak kernel information to user */
80         if (bpf_needs_clear_a(&filter[0]))
81                 PPC_LI(r_A, 0);
82 }
83
84 static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
85 {
86         int i;
87
88         if (ctx->seen & (SEEN_MEM | SEEN_DATAREF)) {
89                 PPC_ADDI(1, 1, BPF_PPC_STACKFRAME);
90                 if (ctx->seen & SEEN_DATAREF) {
91                         PPC_BPF_LL(0, 1, PPC_LR_STKOFF);
92                         PPC_MTLR(0);
93                         PPC_BPF_LL(r_D, 1, -(REG_SZ*(32-r_D)));
94                         PPC_BPF_LL(r_HL, 1, -(REG_SZ*(32-r_HL)));
95                 }
96                 if (ctx->seen & SEEN_MEM) {
97                         /* Restore any saved non-vol registers */
98                         for (i = r_M; i < (r_M+16); i++) {
99                                 if (ctx->seen & (1 << (i-r_M)))
100                                         PPC_BPF_LL(i, 1, -(REG_SZ*(32-i)));
101                         }
102                 }
103         }
104         /* The RETs have left a return value in R3. */
105
106         PPC_BLR();
107 }
108
109 #define CHOOSE_LOAD_FUNC(K, func) \
110         ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
111
112 /* Assemble the body code between the prologue & epilogue. */
113 static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
114                               struct codegen_context *ctx,
115                               unsigned int *addrs)
116 {
117         const struct sock_filter *filter = fp->insns;
118         int flen = fp->len;
119         u8 *func;
120         unsigned int true_cond;
121         int i;
122
123         /* Start of epilogue code */
124         unsigned int exit_addr = addrs[flen];
125
126         for (i = 0; i < flen; i++) {
127                 unsigned int K = filter[i].k;
128                 u16 code = bpf_anc_helper(&filter[i]);
129
130                 /*
131                  * addrs[] maps a BPF bytecode address into a real offset from
132                  * the start of the body code.
133                  */
134                 addrs[i] = ctx->idx * 4;
135
136                 switch (code) {
137                         /*** ALU ops ***/
138                 case BPF_ALU | BPF_ADD | BPF_X: /* A += X; */
139                         ctx->seen |= SEEN_XREG;
140                         PPC_ADD(r_A, r_A, r_X);
141                         break;
142                 case BPF_ALU | BPF_ADD | BPF_K: /* A += K; */
143                         if (!K)
144                                 break;
145                         PPC_ADDI(r_A, r_A, IMM_L(K));
146                         if (K >= 32768)
147                                 PPC_ADDIS(r_A, r_A, IMM_HA(K));
148                         break;
149                 case BPF_ALU | BPF_SUB | BPF_X: /* A -= X; */
150                         ctx->seen |= SEEN_XREG;
151                         PPC_SUB(r_A, r_A, r_X);
152                         break;
153                 case BPF_ALU | BPF_SUB | BPF_K: /* A -= K */
154                         if (!K)
155                                 break;
156                         PPC_ADDI(r_A, r_A, IMM_L(-K));
157                         if (K >= 32768)
158                                 PPC_ADDIS(r_A, r_A, IMM_HA(-K));
159                         break;
160                 case BPF_ALU | BPF_MUL | BPF_X: /* A *= X; */
161                         ctx->seen |= SEEN_XREG;
162                         PPC_MULW(r_A, r_A, r_X);
163                         break;
164                 case BPF_ALU | BPF_MUL | BPF_K: /* A *= K */
165                         if (K < 32768)
166                                 PPC_MULI(r_A, r_A, K);
167                         else {
168                                 PPC_LI32(r_scratch1, K);
169                                 PPC_MULW(r_A, r_A, r_scratch1);
170                         }
171                         break;
172                 case BPF_ALU | BPF_MOD | BPF_X: /* A %= X; */
173                 case BPF_ALU | BPF_DIV | BPF_X: /* A /= X; */
174                         ctx->seen |= SEEN_XREG;
175                         PPC_CMPWI(r_X, 0);
176                         if (ctx->pc_ret0 != -1) {
177                                 PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]);
178                         } else {
179                                 PPC_BCC_SHORT(COND_NE, (ctx->idx*4)+12);
180                                 PPC_LI(r_ret, 0);
181                                 PPC_JMP(exit_addr);
182                         }
183                         if (code == (BPF_ALU | BPF_MOD | BPF_X)) {
184                                 PPC_DIVWU(r_scratch1, r_A, r_X);
185                                 PPC_MULW(r_scratch1, r_X, r_scratch1);
186                                 PPC_SUB(r_A, r_A, r_scratch1);
187                         } else {
188                                 PPC_DIVWU(r_A, r_A, r_X);
189                         }
190                         break;
191                 case BPF_ALU | BPF_MOD | BPF_K: /* A %= K; */
192                         PPC_LI32(r_scratch2, K);
193                         PPC_DIVWU(r_scratch1, r_A, r_scratch2);
194                         PPC_MULW(r_scratch1, r_scratch2, r_scratch1);
195                         PPC_SUB(r_A, r_A, r_scratch1);
196                         break;
197                 case BPF_ALU | BPF_DIV | BPF_K: /* A /= K */
198                         if (K == 1)
199                                 break;
200                         PPC_LI32(r_scratch1, K);
201                         PPC_DIVWU(r_A, r_A, r_scratch1);
202                         break;
203                 case BPF_ALU | BPF_AND | BPF_X:
204                         ctx->seen |= SEEN_XREG;
205                         PPC_AND(r_A, r_A, r_X);
206                         break;
207                 case BPF_ALU | BPF_AND | BPF_K:
208                         if (!IMM_H(K))
209                                 PPC_ANDI(r_A, r_A, K);
210                         else {
211                                 PPC_LI32(r_scratch1, K);
212                                 PPC_AND(r_A, r_A, r_scratch1);
213                         }
214                         break;
215                 case BPF_ALU | BPF_OR | BPF_X:
216                         ctx->seen |= SEEN_XREG;
217                         PPC_OR(r_A, r_A, r_X);
218                         break;
219                 case BPF_ALU | BPF_OR | BPF_K:
220                         if (IMM_L(K))
221                                 PPC_ORI(r_A, r_A, IMM_L(K));
222                         if (K >= 65536)
223                                 PPC_ORIS(r_A, r_A, IMM_H(K));
224                         break;
225                 case BPF_ANC | SKF_AD_ALU_XOR_X:
226                 case BPF_ALU | BPF_XOR | BPF_X: /* A ^= X */
227                         ctx->seen |= SEEN_XREG;
228                         PPC_XOR(r_A, r_A, r_X);
229                         break;
230                 case BPF_ALU | BPF_XOR | BPF_K: /* A ^= K */
231                         if (IMM_L(K))
232                                 PPC_XORI(r_A, r_A, IMM_L(K));
233                         if (K >= 65536)
234                                 PPC_XORIS(r_A, r_A, IMM_H(K));
235                         break;
236                 case BPF_ALU | BPF_LSH | BPF_X: /* A <<= X; */
237                         ctx->seen |= SEEN_XREG;
238                         PPC_SLW(r_A, r_A, r_X);
239                         break;
240                 case BPF_ALU | BPF_LSH | BPF_K:
241                         if (K == 0)
242                                 break;
243                         else
244                                 PPC_SLWI(r_A, r_A, K);
245                         break;
246                 case BPF_ALU | BPF_RSH | BPF_X: /* A >>= X; */
247                         ctx->seen |= SEEN_XREG;
248                         PPC_SRW(r_A, r_A, r_X);
249                         break;
250                 case BPF_ALU | BPF_RSH | BPF_K: /* A >>= K; */
251                         if (K == 0)
252                                 break;
253                         else
254                                 PPC_SRWI(r_A, r_A, K);
255                         break;
256                 case BPF_ALU | BPF_NEG:
257                         PPC_NEG(r_A, r_A);
258                         break;
259                 case BPF_RET | BPF_K:
260                         PPC_LI32(r_ret, K);
261                         if (!K) {
262                                 if (ctx->pc_ret0 == -1)
263                                         ctx->pc_ret0 = i;
264                         }
265                         /*
266                          * If this isn't the very last instruction, branch to
267                          * the epilogue if we've stuff to clean up.  Otherwise,
268                          * if there's nothing to tidy, just return.  If we /are/
269                          * the last instruction, we're about to fall through to
270                          * the epilogue to return.
271                          */
272                         if (i != flen - 1) {
273                                 /*
274                                  * Note: 'seen' is properly valid only on pass
275                                  * #2.  Both parts of this conditional are the
276                                  * same instruction size though, meaning the
277                                  * first pass will still correctly determine the
278                                  * code size/addresses.
279                                  */
280                                 if (ctx->seen)
281                                         PPC_JMP(exit_addr);
282                                 else
283                                         PPC_BLR();
284                         }
285                         break;
286                 case BPF_RET | BPF_A:
287                         PPC_MR(r_ret, r_A);
288                         if (i != flen - 1) {
289                                 if (ctx->seen)
290                                         PPC_JMP(exit_addr);
291                                 else
292                                         PPC_BLR();
293                         }
294                         break;
295                 case BPF_MISC | BPF_TAX: /* X = A */
296                         PPC_MR(r_X, r_A);
297                         break;
298                 case BPF_MISC | BPF_TXA: /* A = X */
299                         ctx->seen |= SEEN_XREG;
300                         PPC_MR(r_A, r_X);
301                         break;
302
303                         /*** Constant loads/M[] access ***/
304                 case BPF_LD | BPF_IMM: /* A = K */
305                         PPC_LI32(r_A, K);
306                         break;
307                 case BPF_LDX | BPF_IMM: /* X = K */
308                         PPC_LI32(r_X, K);
309                         break;
310                 case BPF_LD | BPF_MEM: /* A = mem[K] */
311                         PPC_MR(r_A, r_M + (K & 0xf));
312                         ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
313                         break;
314                 case BPF_LDX | BPF_MEM: /* X = mem[K] */
315                         PPC_MR(r_X, r_M + (K & 0xf));
316                         ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
317                         break;
318                 case BPF_ST: /* mem[K] = A */
319                         PPC_MR(r_M + (K & 0xf), r_A);
320                         ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
321                         break;
322                 case BPF_STX: /* mem[K] = X */
323                         PPC_MR(r_M + (K & 0xf), r_X);
324                         ctx->seen |= SEEN_XREG | SEEN_MEM | (1<<(K & 0xf));
325                         break;
326                 case BPF_LD | BPF_W | BPF_LEN: /*       A = skb->len; */
327                         BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
328                         PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, len));
329                         break;
330                 case BPF_LDX | BPF_W | BPF_ABS: /* A = *((u32 *)(seccomp_data + K)); */
331                         PPC_LWZ_OFFS(r_A, r_skb, K);
332                         break;
333                 case BPF_LDX | BPF_W | BPF_LEN: /* X = skb->len; */
334                         PPC_LWZ_OFFS(r_X, r_skb, offsetof(struct sk_buff, len));
335                         break;
336
337                         /*** Ancillary info loads ***/
338                 case BPF_ANC | SKF_AD_PROTOCOL: /* A = ntohs(skb->protocol); */
339                         BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
340                                                   protocol) != 2);
341                         PPC_NTOHS_OFFS(r_A, r_skb, offsetof(struct sk_buff,
342                                                             protocol));
343                         break;
344                 case BPF_ANC | SKF_AD_IFINDEX:
345                 case BPF_ANC | SKF_AD_HATYPE:
346                         BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
347                                                 ifindex) != 4);
348                         BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
349                                                 type) != 2);
350                         PPC_LL_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff,
351                                                                 dev));
352                         PPC_CMPDI(r_scratch1, 0);
353                         if (ctx->pc_ret0 != -1) {
354                                 PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]);
355                         } else {
356                                 /* Exit, returning 0; first pass hits here. */
357                                 PPC_BCC_SHORT(COND_NE, ctx->idx * 4 + 12);
358                                 PPC_LI(r_ret, 0);
359                                 PPC_JMP(exit_addr);
360                         }
361                         if (code == (BPF_ANC | SKF_AD_IFINDEX)) {
362                                 PPC_LWZ_OFFS(r_A, r_scratch1,
363                                      offsetof(struct net_device, ifindex));
364                         } else {
365                                 PPC_LHZ_OFFS(r_A, r_scratch1,
366                                      offsetof(struct net_device, type));
367                         }
368
369                         break;
370                 case BPF_ANC | SKF_AD_MARK:
371                         BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
372                         PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
373                                                           mark));
374                         break;
375                 case BPF_ANC | SKF_AD_RXHASH:
376                         BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
377                         PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
378                                                           hash));
379                         break;
380                 case BPF_ANC | SKF_AD_VLAN_TAG:
381                 case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
382                         BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
383                         BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
384
385                         PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
386                                                           vlan_tci));
387                         if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) {
388                                 PPC_ANDI(r_A, r_A, ~VLAN_TAG_PRESENT);
389                         } else {
390                                 PPC_ANDI(r_A, r_A, VLAN_TAG_PRESENT);
391                                 PPC_SRWI(r_A, r_A, 12);
392                         }
393                         break;
394                 case BPF_ANC | SKF_AD_QUEUE:
395                         BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
396                                                   queue_mapping) != 2);
397                         PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
398                                                           queue_mapping));
399                         break;
400                 case BPF_ANC | SKF_AD_PKTTYPE:
401                         PPC_LBZ_OFFS(r_A, r_skb, PKT_TYPE_OFFSET());
402                         PPC_ANDI(r_A, r_A, PKT_TYPE_MAX);
403                         PPC_SRWI(r_A, r_A, 5);
404                         break;
405                 case BPF_ANC | SKF_AD_CPU:
406                         PPC_BPF_LOAD_CPU(r_A);
407                         break;
408                         /*** Absolute loads from packet header/data ***/
409                 case BPF_LD | BPF_W | BPF_ABS:
410                         func = CHOOSE_LOAD_FUNC(K, sk_load_word);
411                         goto common_load;
412                 case BPF_LD | BPF_H | BPF_ABS:
413                         func = CHOOSE_LOAD_FUNC(K, sk_load_half);
414                         goto common_load;
415                 case BPF_LD | BPF_B | BPF_ABS:
416                         func = CHOOSE_LOAD_FUNC(K, sk_load_byte);
417                 common_load:
418                         /* Load from [K]. */
419                         ctx->seen |= SEEN_DATAREF;
420                         PPC_FUNC_ADDR(r_scratch1, func);
421                         PPC_MTLR(r_scratch1);
422                         PPC_LI32(r_addr, K);
423                         PPC_BLRL();
424                         /*
425                          * Helper returns 'lt' condition on error, and an
426                          * appropriate return value in r3
427                          */
428                         PPC_BCC(COND_LT, exit_addr);
429                         break;
430
431                         /*** Indirect loads from packet header/data ***/
432                 case BPF_LD | BPF_W | BPF_IND:
433                         func = sk_load_word;
434                         goto common_load_ind;
435                 case BPF_LD | BPF_H | BPF_IND:
436                         func = sk_load_half;
437                         goto common_load_ind;
438                 case BPF_LD | BPF_B | BPF_IND:
439                         func = sk_load_byte;
440                 common_load_ind:
441                         /*
442                          * Load from [X + K].  Negative offsets are tested for
443                          * in the helper functions.
444                          */
445                         ctx->seen |= SEEN_DATAREF | SEEN_XREG;
446                         PPC_FUNC_ADDR(r_scratch1, func);
447                         PPC_MTLR(r_scratch1);
448                         PPC_ADDI(r_addr, r_X, IMM_L(K));
449                         if (K >= 32768)
450                                 PPC_ADDIS(r_addr, r_addr, IMM_HA(K));
451                         PPC_BLRL();
452                         /* If error, cr0.LT set */
453                         PPC_BCC(COND_LT, exit_addr);
454                         break;
455
456                 case BPF_LDX | BPF_B | BPF_MSH:
457                         func = CHOOSE_LOAD_FUNC(K, sk_load_byte_msh);
458                         goto common_load;
459                         break;
460
461                         /*** Jump and branches ***/
462                 case BPF_JMP | BPF_JA:
463                         if (K != 0)
464                                 PPC_JMP(addrs[i + 1 + K]);
465                         break;
466
467                 case BPF_JMP | BPF_JGT | BPF_K:
468                 case BPF_JMP | BPF_JGT | BPF_X:
469                         true_cond = COND_GT;
470                         goto cond_branch;
471                 case BPF_JMP | BPF_JGE | BPF_K:
472                 case BPF_JMP | BPF_JGE | BPF_X:
473                         true_cond = COND_GE;
474                         goto cond_branch;
475                 case BPF_JMP | BPF_JEQ | BPF_K:
476                 case BPF_JMP | BPF_JEQ | BPF_X:
477                         true_cond = COND_EQ;
478                         goto cond_branch;
479                 case BPF_JMP | BPF_JSET | BPF_K:
480                 case BPF_JMP | BPF_JSET | BPF_X:
481                         true_cond = COND_NE;
482                         /* Fall through */
483                 cond_branch:
484                         /* same targets, can avoid doing the test :) */
485                         if (filter[i].jt == filter[i].jf) {
486                                 if (filter[i].jt > 0)
487                                         PPC_JMP(addrs[i + 1 + filter[i].jt]);
488                                 break;
489                         }
490
491                         switch (code) {
492                         case BPF_JMP | BPF_JGT | BPF_X:
493                         case BPF_JMP | BPF_JGE | BPF_X:
494                         case BPF_JMP | BPF_JEQ | BPF_X:
495                                 ctx->seen |= SEEN_XREG;
496                                 PPC_CMPLW(r_A, r_X);
497                                 break;
498                         case BPF_JMP | BPF_JSET | BPF_X:
499                                 ctx->seen |= SEEN_XREG;
500                                 PPC_AND_DOT(r_scratch1, r_A, r_X);
501                                 break;
502                         case BPF_JMP | BPF_JEQ | BPF_K:
503                         case BPF_JMP | BPF_JGT | BPF_K:
504                         case BPF_JMP | BPF_JGE | BPF_K:
505                                 if (K < 32768)
506                                         PPC_CMPLWI(r_A, K);
507                                 else {
508                                         PPC_LI32(r_scratch1, K);
509                                         PPC_CMPLW(r_A, r_scratch1);
510                                 }
511                                 break;
512                         case BPF_JMP | BPF_JSET | BPF_K:
513                                 if (K < 32768)
514                                         /* PPC_ANDI is /only/ dot-form */
515                                         PPC_ANDI(r_scratch1, r_A, K);
516                                 else {
517                                         PPC_LI32(r_scratch1, K);
518                                         PPC_AND_DOT(r_scratch1, r_A,
519                                                     r_scratch1);
520                                 }
521                                 break;
522                         }
523                         /* Sometimes branches are constructed "backward", with
524                          * the false path being the branch and true path being
525                          * a fallthrough to the next instruction.
526                          */
527                         if (filter[i].jt == 0)
528                                 /* Swap the sense of the branch */
529                                 PPC_BCC(true_cond ^ COND_CMP_TRUE,
530                                         addrs[i + 1 + filter[i].jf]);
531                         else {
532                                 PPC_BCC(true_cond, addrs[i + 1 + filter[i].jt]);
533                                 if (filter[i].jf != 0)
534                                         PPC_JMP(addrs[i + 1 + filter[i].jf]);
535                         }
536                         break;
537                 default:
538                         /* The filter contains something cruel & unusual.
539                          * We don't handle it, but also there shouldn't be
540                          * anything missing from our list.
541                          */
542                         if (printk_ratelimit())
543                                 pr_err("BPF filter opcode %04x (@%d) unsupported\n",
544                                        filter[i].code, i);
545                         return -ENOTSUPP;
546                 }
547
548         }
549         /* Set end-of-body-code address for exit. */
550         addrs[i] = ctx->idx * 4;
551
552         return 0;
553 }
554
555 void bpf_jit_compile(struct bpf_prog *fp)
556 {
557         unsigned int proglen;
558         unsigned int alloclen;
559         u32 *image = NULL;
560         u32 *code_base;
561         unsigned int *addrs;
562         struct codegen_context cgctx;
563         int pass;
564         int flen = fp->len;
565
566         if (!bpf_jit_enable)
567                 return;
568
569         addrs = kcalloc(flen + 1, sizeof(*addrs), GFP_KERNEL);
570         if (addrs == NULL)
571                 return;
572
573         /*
574          * There are multiple assembly passes as the generated code will change
575          * size as it settles down, figuring out the max branch offsets/exit
576          * paths required.
577          *
578          * The range of standard conditional branches is +/- 32Kbytes.  Since
579          * BPF_MAXINSNS = 4096, we can only jump from (worst case) start to
580          * finish with 8 bytes/instruction.  Not feasible, so long jumps are
581          * used, distinct from short branches.
582          *
583          * Current:
584          *
585          * For now, both branch types assemble to 2 words (short branches padded
586          * with a NOP); this is less efficient, but assembly will always complete
587          * after exactly 3 passes:
588          *
589          * First pass: No code buffer; Program is "faux-generated" -- no code
590          * emitted but maximum size of output determined (and addrs[] filled
591          * in).  Also, we note whether we use M[], whether we use skb data, etc.
592          * All generation choices assumed to be 'worst-case', e.g. branches all
593          * far (2 instructions), return path code reduction not available, etc.
594          *
595          * Second pass: Code buffer allocated with size determined previously.
596          * Prologue generated to support features we have seen used.  Exit paths
597          * determined and addrs[] is filled in again, as code may be slightly
598          * smaller as a result.
599          *
600          * Third pass: Code generated 'for real', and branch destinations
601          * determined from now-accurate addrs[] map.
602          *
603          * Ideal:
604          *
605          * If we optimise this, near branches will be shorter.  On the
606          * first assembly pass, we should err on the side of caution and
607          * generate the biggest code.  On subsequent passes, branches will be
608          * generated short or long and code size will reduce.  With smaller
609          * code, more branches may fall into the short category, and code will
610          * reduce more.
611          *
612          * Finally, if we see one pass generate code the same size as the
613          * previous pass we have converged and should now generate code for
614          * real.  Allocating at the end will also save the memory that would
615          * otherwise be wasted by the (small) current code shrinkage.
616          * Preferably, we should do a small number of passes (e.g. 5) and if we
617          * haven't converged by then, get impatient and force code to generate
618          * as-is, even if the odd branch would be left long.  The chances of a
619          * long jump are tiny with all but the most enormous of BPF filter
620          * inputs, so we should usually converge on the third pass.
621          */
622
623         cgctx.idx = 0;
624         cgctx.seen = 0;
625         cgctx.pc_ret0 = -1;
626         /* Scouting faux-generate pass 0 */
627         if (bpf_jit_build_body(fp, 0, &cgctx, addrs))
628                 /* We hit something illegal or unsupported. */
629                 goto out;
630
631         /*
632          * Pretend to build prologue, given the features we've seen.  This will
633          * update ctgtx.idx as it pretends to output instructions, then we can
634          * calculate total size from idx.
635          */
636         bpf_jit_build_prologue(fp, 0, &cgctx);
637         bpf_jit_build_epilogue(0, &cgctx);
638
639         proglen = cgctx.idx * 4;
640         alloclen = proglen + FUNCTION_DESCR_SIZE;
641         image = module_alloc(alloclen);
642         if (!image)
643                 goto out;
644
645         code_base = image + (FUNCTION_DESCR_SIZE/4);
646
647         /* Code generation passes 1-2 */
648         for (pass = 1; pass < 3; pass++) {
649                 /* Now build the prologue, body code & epilogue for real. */
650                 cgctx.idx = 0;
651                 bpf_jit_build_prologue(fp, code_base, &cgctx);
652                 bpf_jit_build_body(fp, code_base, &cgctx, addrs);
653                 bpf_jit_build_epilogue(code_base, &cgctx);
654
655                 if (bpf_jit_enable > 1)
656                         pr_info("Pass %d: shrink = %d, seen = 0x%x\n", pass,
657                                 proglen - (cgctx.idx * 4), cgctx.seen);
658         }
659
660         if (bpf_jit_enable > 1)
661                 /* Note that we output the base address of the code_base
662                  * rather than image, since opcodes are in code_base.
663                  */
664                 bpf_jit_dump(flen, proglen, pass, code_base);
665
666         bpf_flush_icache(code_base, code_base + (proglen/4));
667
668 #ifdef CONFIG_PPC64
669         /* Function descriptor nastiness: Address + TOC */
670         ((u64 *)image)[0] = (u64)code_base;
671         ((u64 *)image)[1] = local_paca->kernel_toc;
672 #endif
673
674         fp->bpf_func = (void *)image;
675         fp->jited = 1;
676
677 out:
678         kfree(addrs);
679         return;
680 }
681
682 void bpf_jit_free(struct bpf_prog *fp)
683 {
684         if (fp->jited)
685                 module_memfree(fp->bpf_func);
686
687         bpf_prog_unlock_free(fp);
688 }