Merge branch 'timers-nohz-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[sfrench/cifs-2.6.git] / arch / powerpc / net / bpf_jit_comp.c
1 /* bpf_jit_comp.c: BPF JIT compiler for PPC64
2  *
3  * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation
4  *
5  * Based on the x86 BPF compiler, by Eric Dumazet (eric.dumazet@gmail.com)
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License
9  * as published by the Free Software Foundation; version 2
10  * of the License.
11  */
12 #include <linux/moduleloader.h>
13 #include <asm/cacheflush.h>
14 #include <linux/netdevice.h>
15 #include <linux/filter.h>
16 #include <linux/if_vlan.h>
17
18 #include "bpf_jit.h"
19
20 int bpf_jit_enable __read_mostly;
21
22 static inline void bpf_flush_icache(void *start, void *end)
23 {
24         smp_wmb();
25         flush_icache_range((unsigned long)start, (unsigned long)end);
26 }
27
28 static void bpf_jit_build_prologue(struct bpf_prog *fp, u32 *image,
29                                    struct codegen_context *ctx)
30 {
31         int i;
32         const struct sock_filter *filter = fp->insns;
33
34         if (ctx->seen & (SEEN_MEM | SEEN_DATAREF)) {
35                 /* Make stackframe */
36                 if (ctx->seen & SEEN_DATAREF) {
37                         /* If we call any helpers (for loads), save LR */
38                         EMIT(PPC_INST_MFLR | __PPC_RT(R0));
39                         PPC_STD(0, 1, 16);
40
41                         /* Back up non-volatile regs. */
42                         PPC_STD(r_D, 1, -(8*(32-r_D)));
43                         PPC_STD(r_HL, 1, -(8*(32-r_HL)));
44                 }
45                 if (ctx->seen & SEEN_MEM) {
46                         /*
47                          * Conditionally save regs r15-r31 as some will be used
48                          * for M[] data.
49                          */
50                         for (i = r_M; i < (r_M+16); i++) {
51                                 if (ctx->seen & (1 << (i-r_M)))
52                                         PPC_STD(i, 1, -(8*(32-i)));
53                         }
54                 }
55                 EMIT(PPC_INST_STDU | __PPC_RS(R1) | __PPC_RA(R1) |
56                      (-BPF_PPC_STACKFRAME & 0xfffc));
57         }
58
59         if (ctx->seen & SEEN_DATAREF) {
60                 /*
61                  * If this filter needs to access skb data,
62                  * prepare r_D and r_HL:
63                  *  r_HL = skb->len - skb->data_len
64                  *  r_D  = skb->data
65                  */
66                 PPC_LWZ_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff,
67                                                          data_len));
68                 PPC_LWZ_OFFS(r_HL, r_skb, offsetof(struct sk_buff, len));
69                 PPC_SUB(r_HL, r_HL, r_scratch1);
70                 PPC_LD_OFFS(r_D, r_skb, offsetof(struct sk_buff, data));
71         }
72
73         if (ctx->seen & SEEN_XREG) {
74                 /*
75                  * TODO: Could also detect whether first instr. sets X and
76                  * avoid this (as below, with A).
77                  */
78                 PPC_LI(r_X, 0);
79         }
80
81         switch (filter[0].code) {
82         case BPF_RET | BPF_K:
83         case BPF_LD | BPF_W | BPF_LEN:
84         case BPF_LD | BPF_W | BPF_ABS:
85         case BPF_LD | BPF_H | BPF_ABS:
86         case BPF_LD | BPF_B | BPF_ABS:
87                 /* first instruction sets A register (or is RET 'constant') */
88                 break;
89         default:
90                 /* make sure we dont leak kernel information to user */
91                 PPC_LI(r_A, 0);
92         }
93 }
94
95 static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
96 {
97         int i;
98
99         if (ctx->seen & (SEEN_MEM | SEEN_DATAREF)) {
100                 PPC_ADDI(1, 1, BPF_PPC_STACKFRAME);
101                 if (ctx->seen & SEEN_DATAREF) {
102                         PPC_LD(0, 1, 16);
103                         PPC_MTLR(0);
104                         PPC_LD(r_D, 1, -(8*(32-r_D)));
105                         PPC_LD(r_HL, 1, -(8*(32-r_HL)));
106                 }
107                 if (ctx->seen & SEEN_MEM) {
108                         /* Restore any saved non-vol registers */
109                         for (i = r_M; i < (r_M+16); i++) {
110                                 if (ctx->seen & (1 << (i-r_M)))
111                                         PPC_LD(i, 1, -(8*(32-i)));
112                         }
113                 }
114         }
115         /* The RETs have left a return value in R3. */
116
117         PPC_BLR();
118 }
119
120 #define CHOOSE_LOAD_FUNC(K, func) \
121         ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
122
123 /* Assemble the body code between the prologue & epilogue. */
124 static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
125                               struct codegen_context *ctx,
126                               unsigned int *addrs)
127 {
128         const struct sock_filter *filter = fp->insns;
129         int flen = fp->len;
130         u8 *func;
131         unsigned int true_cond;
132         int i;
133
134         /* Start of epilogue code */
135         unsigned int exit_addr = addrs[flen];
136
137         for (i = 0; i < flen; i++) {
138                 unsigned int K = filter[i].k;
139                 u16 code = bpf_anc_helper(&filter[i]);
140
141                 /*
142                  * addrs[] maps a BPF bytecode address into a real offset from
143                  * the start of the body code.
144                  */
145                 addrs[i] = ctx->idx * 4;
146
147                 switch (code) {
148                         /*** ALU ops ***/
149                 case BPF_ALU | BPF_ADD | BPF_X: /* A += X; */
150                         ctx->seen |= SEEN_XREG;
151                         PPC_ADD(r_A, r_A, r_X);
152                         break;
153                 case BPF_ALU | BPF_ADD | BPF_K: /* A += K; */
154                         if (!K)
155                                 break;
156                         PPC_ADDI(r_A, r_A, IMM_L(K));
157                         if (K >= 32768)
158                                 PPC_ADDIS(r_A, r_A, IMM_HA(K));
159                         break;
160                 case BPF_ALU | BPF_SUB | BPF_X: /* A -= X; */
161                         ctx->seen |= SEEN_XREG;
162                         PPC_SUB(r_A, r_A, r_X);
163                         break;
164                 case BPF_ALU | BPF_SUB | BPF_K: /* A -= K */
165                         if (!K)
166                                 break;
167                         PPC_ADDI(r_A, r_A, IMM_L(-K));
168                         if (K >= 32768)
169                                 PPC_ADDIS(r_A, r_A, IMM_HA(-K));
170                         break;
171                 case BPF_ALU | BPF_MUL | BPF_X: /* A *= X; */
172                         ctx->seen |= SEEN_XREG;
173                         PPC_MUL(r_A, r_A, r_X);
174                         break;
175                 case BPF_ALU | BPF_MUL | BPF_K: /* A *= K */
176                         if (K < 32768)
177                                 PPC_MULI(r_A, r_A, K);
178                         else {
179                                 PPC_LI32(r_scratch1, K);
180                                 PPC_MUL(r_A, r_A, r_scratch1);
181                         }
182                         break;
183                 case BPF_ALU | BPF_MOD | BPF_X: /* A %= X; */
184                         ctx->seen |= SEEN_XREG;
185                         PPC_CMPWI(r_X, 0);
186                         if (ctx->pc_ret0 != -1) {
187                                 PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]);
188                         } else {
189                                 PPC_BCC_SHORT(COND_NE, (ctx->idx*4)+12);
190                                 PPC_LI(r_ret, 0);
191                                 PPC_JMP(exit_addr);
192                         }
193                         PPC_DIVWU(r_scratch1, r_A, r_X);
194                         PPC_MUL(r_scratch1, r_X, r_scratch1);
195                         PPC_SUB(r_A, r_A, r_scratch1);
196                         break;
197                 case BPF_ALU | BPF_MOD | BPF_K: /* A %= K; */
198                         PPC_LI32(r_scratch2, K);
199                         PPC_DIVWU(r_scratch1, r_A, r_scratch2);
200                         PPC_MUL(r_scratch1, r_scratch2, r_scratch1);
201                         PPC_SUB(r_A, r_A, r_scratch1);
202                         break;
203                 case BPF_ALU | BPF_DIV | BPF_X: /* A /= X; */
204                         ctx->seen |= SEEN_XREG;
205                         PPC_CMPWI(r_X, 0);
206                         if (ctx->pc_ret0 != -1) {
207                                 PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]);
208                         } else {
209                                 /*
210                                  * Exit, returning 0; first pass hits here
211                                  * (longer worst-case code size).
212                                  */
213                                 PPC_BCC_SHORT(COND_NE, (ctx->idx*4)+12);
214                                 PPC_LI(r_ret, 0);
215                                 PPC_JMP(exit_addr);
216                         }
217                         PPC_DIVWU(r_A, r_A, r_X);
218                         break;
219                 case BPF_ALU | BPF_DIV | BPF_K: /* A /= K */
220                         if (K == 1)
221                                 break;
222                         PPC_LI32(r_scratch1, K);
223                         PPC_DIVWU(r_A, r_A, r_scratch1);
224                         break;
225                 case BPF_ALU | BPF_AND | BPF_X:
226                         ctx->seen |= SEEN_XREG;
227                         PPC_AND(r_A, r_A, r_X);
228                         break;
229                 case BPF_ALU | BPF_AND | BPF_K:
230                         if (!IMM_H(K))
231                                 PPC_ANDI(r_A, r_A, K);
232                         else {
233                                 PPC_LI32(r_scratch1, K);
234                                 PPC_AND(r_A, r_A, r_scratch1);
235                         }
236                         break;
237                 case BPF_ALU | BPF_OR | BPF_X:
238                         ctx->seen |= SEEN_XREG;
239                         PPC_OR(r_A, r_A, r_X);
240                         break;
241                 case BPF_ALU | BPF_OR | BPF_K:
242                         if (IMM_L(K))
243                                 PPC_ORI(r_A, r_A, IMM_L(K));
244                         if (K >= 65536)
245                                 PPC_ORIS(r_A, r_A, IMM_H(K));
246                         break;
247                 case BPF_ANC | SKF_AD_ALU_XOR_X:
248                 case BPF_ALU | BPF_XOR | BPF_X: /* A ^= X */
249                         ctx->seen |= SEEN_XREG;
250                         PPC_XOR(r_A, r_A, r_X);
251                         break;
252                 case BPF_ALU | BPF_XOR | BPF_K: /* A ^= K */
253                         if (IMM_L(K))
254                                 PPC_XORI(r_A, r_A, IMM_L(K));
255                         if (K >= 65536)
256                                 PPC_XORIS(r_A, r_A, IMM_H(K));
257                         break;
258                 case BPF_ALU | BPF_LSH | BPF_X: /* A <<= X; */
259                         ctx->seen |= SEEN_XREG;
260                         PPC_SLW(r_A, r_A, r_X);
261                         break;
262                 case BPF_ALU | BPF_LSH | BPF_K:
263                         if (K == 0)
264                                 break;
265                         else
266                                 PPC_SLWI(r_A, r_A, K);
267                         break;
268                 case BPF_ALU | BPF_RSH | BPF_X: /* A >>= X; */
269                         ctx->seen |= SEEN_XREG;
270                         PPC_SRW(r_A, r_A, r_X);
271                         break;
272                 case BPF_ALU | BPF_RSH | BPF_K: /* A >>= K; */
273                         if (K == 0)
274                                 break;
275                         else
276                                 PPC_SRWI(r_A, r_A, K);
277                         break;
278                 case BPF_ALU | BPF_NEG:
279                         PPC_NEG(r_A, r_A);
280                         break;
281                 case BPF_RET | BPF_K:
282                         PPC_LI32(r_ret, K);
283                         if (!K) {
284                                 if (ctx->pc_ret0 == -1)
285                                         ctx->pc_ret0 = i;
286                         }
287                         /*
288                          * If this isn't the very last instruction, branch to
289                          * the epilogue if we've stuff to clean up.  Otherwise,
290                          * if there's nothing to tidy, just return.  If we /are/
291                          * the last instruction, we're about to fall through to
292                          * the epilogue to return.
293                          */
294                         if (i != flen - 1) {
295                                 /*
296                                  * Note: 'seen' is properly valid only on pass
297                                  * #2.  Both parts of this conditional are the
298                                  * same instruction size though, meaning the
299                                  * first pass will still correctly determine the
300                                  * code size/addresses.
301                                  */
302                                 if (ctx->seen)
303                                         PPC_JMP(exit_addr);
304                                 else
305                                         PPC_BLR();
306                         }
307                         break;
308                 case BPF_RET | BPF_A:
309                         PPC_MR(r_ret, r_A);
310                         if (i != flen - 1) {
311                                 if (ctx->seen)
312                                         PPC_JMP(exit_addr);
313                                 else
314                                         PPC_BLR();
315                         }
316                         break;
317                 case BPF_MISC | BPF_TAX: /* X = A */
318                         PPC_MR(r_X, r_A);
319                         break;
320                 case BPF_MISC | BPF_TXA: /* A = X */
321                         ctx->seen |= SEEN_XREG;
322                         PPC_MR(r_A, r_X);
323                         break;
324
325                         /*** Constant loads/M[] access ***/
326                 case BPF_LD | BPF_IMM: /* A = K */
327                         PPC_LI32(r_A, K);
328                         break;
329                 case BPF_LDX | BPF_IMM: /* X = K */
330                         PPC_LI32(r_X, K);
331                         break;
332                 case BPF_LD | BPF_MEM: /* A = mem[K] */
333                         PPC_MR(r_A, r_M + (K & 0xf));
334                         ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
335                         break;
336                 case BPF_LDX | BPF_MEM: /* X = mem[K] */
337                         PPC_MR(r_X, r_M + (K & 0xf));
338                         ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
339                         break;
340                 case BPF_ST: /* mem[K] = A */
341                         PPC_MR(r_M + (K & 0xf), r_A);
342                         ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
343                         break;
344                 case BPF_STX: /* mem[K] = X */
345                         PPC_MR(r_M + (K & 0xf), r_X);
346                         ctx->seen |= SEEN_XREG | SEEN_MEM | (1<<(K & 0xf));
347                         break;
348                 case BPF_LD | BPF_W | BPF_LEN: /*       A = skb->len; */
349                         BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
350                         PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, len));
351                         break;
352                 case BPF_LDX | BPF_W | BPF_LEN: /* X = skb->len; */
353                         PPC_LWZ_OFFS(r_X, r_skb, offsetof(struct sk_buff, len));
354                         break;
355
356                         /*** Ancillary info loads ***/
357                 case BPF_ANC | SKF_AD_PROTOCOL: /* A = ntohs(skb->protocol); */
358                         BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
359                                                   protocol) != 2);
360                         PPC_NTOHS_OFFS(r_A, r_skb, offsetof(struct sk_buff,
361                                                             protocol));
362                         break;
363                 case BPF_ANC | SKF_AD_IFINDEX:
364                         PPC_LD_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff,
365                                                                 dev));
366                         PPC_CMPDI(r_scratch1, 0);
367                         if (ctx->pc_ret0 != -1) {
368                                 PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]);
369                         } else {
370                                 /* Exit, returning 0; first pass hits here. */
371                                 PPC_BCC_SHORT(COND_NE, (ctx->idx*4)+12);
372                                 PPC_LI(r_ret, 0);
373                                 PPC_JMP(exit_addr);
374                         }
375                         BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
376                                                   ifindex) != 4);
377                         PPC_LWZ_OFFS(r_A, r_scratch1,
378                                      offsetof(struct net_device, ifindex));
379                         break;
380                 case BPF_ANC | SKF_AD_MARK:
381                         BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
382                         PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
383                                                           mark));
384                         break;
385                 case BPF_ANC | SKF_AD_RXHASH:
386                         BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
387                         PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
388                                                           hash));
389                         break;
390                 case BPF_ANC | SKF_AD_VLAN_TAG:
391                 case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
392                         BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
393                         BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
394
395                         PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
396                                                           vlan_tci));
397                         if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) {
398                                 PPC_ANDI(r_A, r_A, ~VLAN_TAG_PRESENT);
399                         } else {
400                                 PPC_ANDI(r_A, r_A, VLAN_TAG_PRESENT);
401                                 PPC_SRWI(r_A, r_A, 12);
402                         }
403                         break;
404                 case BPF_ANC | SKF_AD_QUEUE:
405                         BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
406                                                   queue_mapping) != 2);
407                         PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
408                                                           queue_mapping));
409                         break;
410                 case BPF_ANC | SKF_AD_CPU:
411 #ifdef CONFIG_SMP
412                         /*
413                          * PACA ptr is r13:
414                          * raw_smp_processor_id() = local_paca->paca_index
415                          */
416                         BUILD_BUG_ON(FIELD_SIZEOF(struct paca_struct,
417                                                   paca_index) != 2);
418                         PPC_LHZ_OFFS(r_A, 13,
419                                      offsetof(struct paca_struct, paca_index));
420 #else
421                         PPC_LI(r_A, 0);
422 #endif
423                         break;
424
425                         /*** Absolute loads from packet header/data ***/
426                 case BPF_LD | BPF_W | BPF_ABS:
427                         func = CHOOSE_LOAD_FUNC(K, sk_load_word);
428                         goto common_load;
429                 case BPF_LD | BPF_H | BPF_ABS:
430                         func = CHOOSE_LOAD_FUNC(K, sk_load_half);
431                         goto common_load;
432                 case BPF_LD | BPF_B | BPF_ABS:
433                         func = CHOOSE_LOAD_FUNC(K, sk_load_byte);
434                 common_load:
435                         /* Load from [K]. */
436                         ctx->seen |= SEEN_DATAREF;
437                         PPC_LI64(r_scratch1, func);
438                         PPC_MTLR(r_scratch1);
439                         PPC_LI32(r_addr, K);
440                         PPC_BLRL();
441                         /*
442                          * Helper returns 'lt' condition on error, and an
443                          * appropriate return value in r3
444                          */
445                         PPC_BCC(COND_LT, exit_addr);
446                         break;
447
448                         /*** Indirect loads from packet header/data ***/
449                 case BPF_LD | BPF_W | BPF_IND:
450                         func = sk_load_word;
451                         goto common_load_ind;
452                 case BPF_LD | BPF_H | BPF_IND:
453                         func = sk_load_half;
454                         goto common_load_ind;
455                 case BPF_LD | BPF_B | BPF_IND:
456                         func = sk_load_byte;
457                 common_load_ind:
458                         /*
459                          * Load from [X + K].  Negative offsets are tested for
460                          * in the helper functions.
461                          */
462                         ctx->seen |= SEEN_DATAREF | SEEN_XREG;
463                         PPC_LI64(r_scratch1, func);
464                         PPC_MTLR(r_scratch1);
465                         PPC_ADDI(r_addr, r_X, IMM_L(K));
466                         if (K >= 32768)
467                                 PPC_ADDIS(r_addr, r_addr, IMM_HA(K));
468                         PPC_BLRL();
469                         /* If error, cr0.LT set */
470                         PPC_BCC(COND_LT, exit_addr);
471                         break;
472
473                 case BPF_LDX | BPF_B | BPF_MSH:
474                         func = CHOOSE_LOAD_FUNC(K, sk_load_byte_msh);
475                         goto common_load;
476                         break;
477
478                         /*** Jump and branches ***/
479                 case BPF_JMP | BPF_JA:
480                         if (K != 0)
481                                 PPC_JMP(addrs[i + 1 + K]);
482                         break;
483
484                 case BPF_JMP | BPF_JGT | BPF_K:
485                 case BPF_JMP | BPF_JGT | BPF_X:
486                         true_cond = COND_GT;
487                         goto cond_branch;
488                 case BPF_JMP | BPF_JGE | BPF_K:
489                 case BPF_JMP | BPF_JGE | BPF_X:
490                         true_cond = COND_GE;
491                         goto cond_branch;
492                 case BPF_JMP | BPF_JEQ | BPF_K:
493                 case BPF_JMP | BPF_JEQ | BPF_X:
494                         true_cond = COND_EQ;
495                         goto cond_branch;
496                 case BPF_JMP | BPF_JSET | BPF_K:
497                 case BPF_JMP | BPF_JSET | BPF_X:
498                         true_cond = COND_NE;
499                         /* Fall through */
500                 cond_branch:
501                         /* same targets, can avoid doing the test :) */
502                         if (filter[i].jt == filter[i].jf) {
503                                 if (filter[i].jt > 0)
504                                         PPC_JMP(addrs[i + 1 + filter[i].jt]);
505                                 break;
506                         }
507
508                         switch (code) {
509                         case BPF_JMP | BPF_JGT | BPF_X:
510                         case BPF_JMP | BPF_JGE | BPF_X:
511                         case BPF_JMP | BPF_JEQ | BPF_X:
512                                 ctx->seen |= SEEN_XREG;
513                                 PPC_CMPLW(r_A, r_X);
514                                 break;
515                         case BPF_JMP | BPF_JSET | BPF_X:
516                                 ctx->seen |= SEEN_XREG;
517                                 PPC_AND_DOT(r_scratch1, r_A, r_X);
518                                 break;
519                         case BPF_JMP | BPF_JEQ | BPF_K:
520                         case BPF_JMP | BPF_JGT | BPF_K:
521                         case BPF_JMP | BPF_JGE | BPF_K:
522                                 if (K < 32768)
523                                         PPC_CMPLWI(r_A, K);
524                                 else {
525                                         PPC_LI32(r_scratch1, K);
526                                         PPC_CMPLW(r_A, r_scratch1);
527                                 }
528                                 break;
529                         case BPF_JMP | BPF_JSET | BPF_K:
530                                 if (K < 32768)
531                                         /* PPC_ANDI is /only/ dot-form */
532                                         PPC_ANDI(r_scratch1, r_A, K);
533                                 else {
534                                         PPC_LI32(r_scratch1, K);
535                                         PPC_AND_DOT(r_scratch1, r_A,
536                                                     r_scratch1);
537                                 }
538                                 break;
539                         }
540                         /* Sometimes branches are constructed "backward", with
541                          * the false path being the branch and true path being
542                          * a fallthrough to the next instruction.
543                          */
544                         if (filter[i].jt == 0)
545                                 /* Swap the sense of the branch */
546                                 PPC_BCC(true_cond ^ COND_CMP_TRUE,
547                                         addrs[i + 1 + filter[i].jf]);
548                         else {
549                                 PPC_BCC(true_cond, addrs[i + 1 + filter[i].jt]);
550                                 if (filter[i].jf != 0)
551                                         PPC_JMP(addrs[i + 1 + filter[i].jf]);
552                         }
553                         break;
554                 default:
555                         /* The filter contains something cruel & unusual.
556                          * We don't handle it, but also there shouldn't be
557                          * anything missing from our list.
558                          */
559                         if (printk_ratelimit())
560                                 pr_err("BPF filter opcode %04x (@%d) unsupported\n",
561                                        filter[i].code, i);
562                         return -ENOTSUPP;
563                 }
564
565         }
566         /* Set end-of-body-code address for exit. */
567         addrs[i] = ctx->idx * 4;
568
569         return 0;
570 }
571
572 void bpf_jit_compile(struct bpf_prog *fp)
573 {
574         unsigned int proglen;
575         unsigned int alloclen;
576         u32 *image = NULL;
577         u32 *code_base;
578         unsigned int *addrs;
579         struct codegen_context cgctx;
580         int pass;
581         int flen = fp->len;
582
583         if (!bpf_jit_enable)
584                 return;
585
586         addrs = kzalloc((flen+1) * sizeof(*addrs), GFP_KERNEL);
587         if (addrs == NULL)
588                 return;
589
590         /*
591          * There are multiple assembly passes as the generated code will change
592          * size as it settles down, figuring out the max branch offsets/exit
593          * paths required.
594          *
595          * The range of standard conditional branches is +/- 32Kbytes.  Since
596          * BPF_MAXINSNS = 4096, we can only jump from (worst case) start to
597          * finish with 8 bytes/instruction.  Not feasible, so long jumps are
598          * used, distinct from short branches.
599          *
600          * Current:
601          *
602          * For now, both branch types assemble to 2 words (short branches padded
603          * with a NOP); this is less efficient, but assembly will always complete
604          * after exactly 3 passes:
605          *
606          * First pass: No code buffer; Program is "faux-generated" -- no code
607          * emitted but maximum size of output determined (and addrs[] filled
608          * in).  Also, we note whether we use M[], whether we use skb data, etc.
609          * All generation choices assumed to be 'worst-case', e.g. branches all
610          * far (2 instructions), return path code reduction not available, etc.
611          *
612          * Second pass: Code buffer allocated with size determined previously.
613          * Prologue generated to support features we have seen used.  Exit paths
614          * determined and addrs[] is filled in again, as code may be slightly
615          * smaller as a result.
616          *
617          * Third pass: Code generated 'for real', and branch destinations
618          * determined from now-accurate addrs[] map.
619          *
620          * Ideal:
621          *
622          * If we optimise this, near branches will be shorter.  On the
623          * first assembly pass, we should err on the side of caution and
624          * generate the biggest code.  On subsequent passes, branches will be
625          * generated short or long and code size will reduce.  With smaller
626          * code, more branches may fall into the short category, and code will
627          * reduce more.
628          *
629          * Finally, if we see one pass generate code the same size as the
630          * previous pass we have converged and should now generate code for
631          * real.  Allocating at the end will also save the memory that would
632          * otherwise be wasted by the (small) current code shrinkage.
633          * Preferably, we should do a small number of passes (e.g. 5) and if we
634          * haven't converged by then, get impatient and force code to generate
635          * as-is, even if the odd branch would be left long.  The chances of a
636          * long jump are tiny with all but the most enormous of BPF filter
637          * inputs, so we should usually converge on the third pass.
638          */
639
640         cgctx.idx = 0;
641         cgctx.seen = 0;
642         cgctx.pc_ret0 = -1;
643         /* Scouting faux-generate pass 0 */
644         if (bpf_jit_build_body(fp, 0, &cgctx, addrs))
645                 /* We hit something illegal or unsupported. */
646                 goto out;
647
648         /*
649          * Pretend to build prologue, given the features we've seen.  This will
650          * update ctgtx.idx as it pretends to output instructions, then we can
651          * calculate total size from idx.
652          */
653         bpf_jit_build_prologue(fp, 0, &cgctx);
654         bpf_jit_build_epilogue(0, &cgctx);
655
656         proglen = cgctx.idx * 4;
657         alloclen = proglen + FUNCTION_DESCR_SIZE;
658         image = module_alloc(alloclen);
659         if (!image)
660                 goto out;
661
662         code_base = image + (FUNCTION_DESCR_SIZE/4);
663
664         /* Code generation passes 1-2 */
665         for (pass = 1; pass < 3; pass++) {
666                 /* Now build the prologue, body code & epilogue for real. */
667                 cgctx.idx = 0;
668                 bpf_jit_build_prologue(fp, code_base, &cgctx);
669                 bpf_jit_build_body(fp, code_base, &cgctx, addrs);
670                 bpf_jit_build_epilogue(code_base, &cgctx);
671
672                 if (bpf_jit_enable > 1)
673                         pr_info("Pass %d: shrink = %d, seen = 0x%x\n", pass,
674                                 proglen - (cgctx.idx * 4), cgctx.seen);
675         }
676
677         if (bpf_jit_enable > 1)
678                 /* Note that we output the base address of the code_base
679                  * rather than image, since opcodes are in code_base.
680                  */
681                 bpf_jit_dump(flen, proglen, pass, code_base);
682
683         if (image) {
684                 bpf_flush_icache(code_base, code_base + (proglen/4));
685                 /* Function descriptor nastiness: Address + TOC */
686                 ((u64 *)image)[0] = (u64)code_base;
687                 ((u64 *)image)[1] = local_paca->kernel_toc;
688                 fp->bpf_func = (void *)image;
689                 fp->jited = true;
690         }
691 out:
692         kfree(addrs);
693         return;
694 }
695
696 void bpf_jit_free(struct bpf_prog *fp)
697 {
698         if (fp->jited)
699                 module_free(NULL, fp->bpf_func);
700
701         bpf_prog_unlock_free(fp);
702 }