Merge tag 'docs-4.16' of git://git.lwn.net/linux
[sfrench/cifs-2.6.git] / arch / powerpc / net / bpf_jit_comp.c
1 /* bpf_jit_comp.c: BPF JIT compiler
2  *
3  * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation
4  *
5  * Based on the x86 BPF compiler, by Eric Dumazet (eric.dumazet@gmail.com)
6  * Ported to ppc32 by Denis Kirjanov <kda@linux-powerpc.org>
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU General Public License
10  * as published by the Free Software Foundation; version 2
11  * of the License.
12  */
13 #include <linux/moduleloader.h>
14 #include <asm/cacheflush.h>
15 #include <linux/netdevice.h>
16 #include <linux/filter.h>
17 #include <linux/if_vlan.h>
18
19 #include "bpf_jit32.h"
20
21 static inline void bpf_flush_icache(void *start, void *end)
22 {
23         smp_wmb();
24         flush_icache_range((unsigned long)start, (unsigned long)end);
25 }
26
27 static void bpf_jit_build_prologue(struct bpf_prog *fp, u32 *image,
28                                    struct codegen_context *ctx)
29 {
30         int i;
31         const struct sock_filter *filter = fp->insns;
32
33         if (ctx->seen & (SEEN_MEM | SEEN_DATAREF)) {
34                 /* Make stackframe */
35                 if (ctx->seen & SEEN_DATAREF) {
36                         /* If we call any helpers (for loads), save LR */
37                         EMIT(PPC_INST_MFLR | __PPC_RT(R0));
38                         PPC_BPF_STL(0, 1, PPC_LR_STKOFF);
39
40                         /* Back up non-volatile regs. */
41                         PPC_BPF_STL(r_D, 1, -(REG_SZ*(32-r_D)));
42                         PPC_BPF_STL(r_HL, 1, -(REG_SZ*(32-r_HL)));
43                 }
44                 if (ctx->seen & SEEN_MEM) {
45                         /*
46                          * Conditionally save regs r15-r31 as some will be used
47                          * for M[] data.
48                          */
49                         for (i = r_M; i < (r_M+16); i++) {
50                                 if (ctx->seen & (1 << (i-r_M)))
51                                         PPC_BPF_STL(i, 1, -(REG_SZ*(32-i)));
52                         }
53                 }
54                 PPC_BPF_STLU(1, 1, -BPF_PPC_STACKFRAME);
55         }
56
57         if (ctx->seen & SEEN_DATAREF) {
58                 /*
59                  * If this filter needs to access skb data,
60                  * prepare r_D and r_HL:
61                  *  r_HL = skb->len - skb->data_len
62                  *  r_D  = skb->data
63                  */
64                 PPC_LWZ_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff,
65                                                          data_len));
66                 PPC_LWZ_OFFS(r_HL, r_skb, offsetof(struct sk_buff, len));
67                 PPC_SUB(r_HL, r_HL, r_scratch1);
68                 PPC_LL_OFFS(r_D, r_skb, offsetof(struct sk_buff, data));
69         }
70
71         if (ctx->seen & SEEN_XREG) {
72                 /*
73                  * TODO: Could also detect whether first instr. sets X and
74                  * avoid this (as below, with A).
75                  */
76                 PPC_LI(r_X, 0);
77         }
78
79         /* make sure we dont leak kernel information to user */
80         if (bpf_needs_clear_a(&filter[0]))
81                 PPC_LI(r_A, 0);
82 }
83
84 static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
85 {
86         int i;
87
88         if (ctx->seen & (SEEN_MEM | SEEN_DATAREF)) {
89                 PPC_ADDI(1, 1, BPF_PPC_STACKFRAME);
90                 if (ctx->seen & SEEN_DATAREF) {
91                         PPC_BPF_LL(0, 1, PPC_LR_STKOFF);
92                         PPC_MTLR(0);
93                         PPC_BPF_LL(r_D, 1, -(REG_SZ*(32-r_D)));
94                         PPC_BPF_LL(r_HL, 1, -(REG_SZ*(32-r_HL)));
95                 }
96                 if (ctx->seen & SEEN_MEM) {
97                         /* Restore any saved non-vol registers */
98                         for (i = r_M; i < (r_M+16); i++) {
99                                 if (ctx->seen & (1 << (i-r_M)))
100                                         PPC_BPF_LL(i, 1, -(REG_SZ*(32-i)));
101                         }
102                 }
103         }
104         /* The RETs have left a return value in R3. */
105
106         PPC_BLR();
107 }
108
109 #define CHOOSE_LOAD_FUNC(K, func) \
110         ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
111
112 /* Assemble the body code between the prologue & epilogue. */
113 static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
114                               struct codegen_context *ctx,
115                               unsigned int *addrs)
116 {
117         const struct sock_filter *filter = fp->insns;
118         int flen = fp->len;
119         u8 *func;
120         unsigned int true_cond;
121         int i;
122
123         /* Start of epilogue code */
124         unsigned int exit_addr = addrs[flen];
125
126         for (i = 0; i < flen; i++) {
127                 unsigned int K = filter[i].k;
128                 u16 code = bpf_anc_helper(&filter[i]);
129
130                 /*
131                  * addrs[] maps a BPF bytecode address into a real offset from
132                  * the start of the body code.
133                  */
134                 addrs[i] = ctx->idx * 4;
135
136                 switch (code) {
137                         /*** ALU ops ***/
138                 case BPF_ALU | BPF_ADD | BPF_X: /* A += X; */
139                         ctx->seen |= SEEN_XREG;
140                         PPC_ADD(r_A, r_A, r_X);
141                         break;
142                 case BPF_ALU | BPF_ADD | BPF_K: /* A += K; */
143                         if (!K)
144                                 break;
145                         PPC_ADDI(r_A, r_A, IMM_L(K));
146                         if (K >= 32768)
147                                 PPC_ADDIS(r_A, r_A, IMM_HA(K));
148                         break;
149                 case BPF_ALU | BPF_SUB | BPF_X: /* A -= X; */
150                         ctx->seen |= SEEN_XREG;
151                         PPC_SUB(r_A, r_A, r_X);
152                         break;
153                 case BPF_ALU | BPF_SUB | BPF_K: /* A -= K */
154                         if (!K)
155                                 break;
156                         PPC_ADDI(r_A, r_A, IMM_L(-K));
157                         if (K >= 32768)
158                                 PPC_ADDIS(r_A, r_A, IMM_HA(-K));
159                         break;
160                 case BPF_ALU | BPF_MUL | BPF_X: /* A *= X; */
161                         ctx->seen |= SEEN_XREG;
162                         PPC_MULW(r_A, r_A, r_X);
163                         break;
164                 case BPF_ALU | BPF_MUL | BPF_K: /* A *= K */
165                         if (K < 32768)
166                                 PPC_MULI(r_A, r_A, K);
167                         else {
168                                 PPC_LI32(r_scratch1, K);
169                                 PPC_MULW(r_A, r_A, r_scratch1);
170                         }
171                         break;
172                 case BPF_ALU | BPF_MOD | BPF_X: /* A %= X; */
173                 case BPF_ALU | BPF_DIV | BPF_X: /* A /= X; */
174                         ctx->seen |= SEEN_XREG;
175                         PPC_CMPWI(r_X, 0);
176                         if (ctx->pc_ret0 != -1) {
177                                 PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]);
178                         } else {
179                                 PPC_BCC_SHORT(COND_NE, (ctx->idx*4)+12);
180                                 PPC_LI(r_ret, 0);
181                                 PPC_JMP(exit_addr);
182                         }
183                         if (code == (BPF_ALU | BPF_MOD | BPF_X)) {
184                                 PPC_DIVWU(r_scratch1, r_A, r_X);
185                                 PPC_MULW(r_scratch1, r_X, r_scratch1);
186                                 PPC_SUB(r_A, r_A, r_scratch1);
187                         } else {
188                                 PPC_DIVWU(r_A, r_A, r_X);
189                         }
190                         break;
191                 case BPF_ALU | BPF_MOD | BPF_K: /* A %= K; */
192                         PPC_LI32(r_scratch2, K);
193                         PPC_DIVWU(r_scratch1, r_A, r_scratch2);
194                         PPC_MULW(r_scratch1, r_scratch2, r_scratch1);
195                         PPC_SUB(r_A, r_A, r_scratch1);
196                         break;
197                 case BPF_ALU | BPF_DIV | BPF_K: /* A /= K */
198                         if (K == 1)
199                                 break;
200                         PPC_LI32(r_scratch1, K);
201                         PPC_DIVWU(r_A, r_A, r_scratch1);
202                         break;
203                 case BPF_ALU | BPF_AND | BPF_X:
204                         ctx->seen |= SEEN_XREG;
205                         PPC_AND(r_A, r_A, r_X);
206                         break;
207                 case BPF_ALU | BPF_AND | BPF_K:
208                         if (!IMM_H(K))
209                                 PPC_ANDI(r_A, r_A, K);
210                         else {
211                                 PPC_LI32(r_scratch1, K);
212                                 PPC_AND(r_A, r_A, r_scratch1);
213                         }
214                         break;
215                 case BPF_ALU | BPF_OR | BPF_X:
216                         ctx->seen |= SEEN_XREG;
217                         PPC_OR(r_A, r_A, r_X);
218                         break;
219                 case BPF_ALU | BPF_OR | BPF_K:
220                         if (IMM_L(K))
221                                 PPC_ORI(r_A, r_A, IMM_L(K));
222                         if (K >= 65536)
223                                 PPC_ORIS(r_A, r_A, IMM_H(K));
224                         break;
225                 case BPF_ANC | SKF_AD_ALU_XOR_X:
226                 case BPF_ALU | BPF_XOR | BPF_X: /* A ^= X */
227                         ctx->seen |= SEEN_XREG;
228                         PPC_XOR(r_A, r_A, r_X);
229                         break;
230                 case BPF_ALU | BPF_XOR | BPF_K: /* A ^= K */
231                         if (IMM_L(K))
232                                 PPC_XORI(r_A, r_A, IMM_L(K));
233                         if (K >= 65536)
234                                 PPC_XORIS(r_A, r_A, IMM_H(K));
235                         break;
236                 case BPF_ALU | BPF_LSH | BPF_X: /* A <<= X; */
237                         ctx->seen |= SEEN_XREG;
238                         PPC_SLW(r_A, r_A, r_X);
239                         break;
240                 case BPF_ALU | BPF_LSH | BPF_K:
241                         if (K == 0)
242                                 break;
243                         else
244                                 PPC_SLWI(r_A, r_A, K);
245                         break;
246                 case BPF_ALU | BPF_RSH | BPF_X: /* A >>= X; */
247                         ctx->seen |= SEEN_XREG;
248                         PPC_SRW(r_A, r_A, r_X);
249                         break;
250                 case BPF_ALU | BPF_RSH | BPF_K: /* A >>= K; */
251                         if (K == 0)
252                                 break;
253                         else
254                                 PPC_SRWI(r_A, r_A, K);
255                         break;
256                 case BPF_ALU | BPF_NEG:
257                         PPC_NEG(r_A, r_A);
258                         break;
259                 case BPF_RET | BPF_K:
260                         PPC_LI32(r_ret, K);
261                         if (!K) {
262                                 if (ctx->pc_ret0 == -1)
263                                         ctx->pc_ret0 = i;
264                         }
265                         /*
266                          * If this isn't the very last instruction, branch to
267                          * the epilogue if we've stuff to clean up.  Otherwise,
268                          * if there's nothing to tidy, just return.  If we /are/
269                          * the last instruction, we're about to fall through to
270                          * the epilogue to return.
271                          */
272                         if (i != flen - 1) {
273                                 /*
274                                  * Note: 'seen' is properly valid only on pass
275                                  * #2.  Both parts of this conditional are the
276                                  * same instruction size though, meaning the
277                                  * first pass will still correctly determine the
278                                  * code size/addresses.
279                                  */
280                                 if (ctx->seen)
281                                         PPC_JMP(exit_addr);
282                                 else
283                                         PPC_BLR();
284                         }
285                         break;
286                 case BPF_RET | BPF_A:
287                         PPC_MR(r_ret, r_A);
288                         if (i != flen - 1) {
289                                 if (ctx->seen)
290                                         PPC_JMP(exit_addr);
291                                 else
292                                         PPC_BLR();
293                         }
294                         break;
295                 case BPF_MISC | BPF_TAX: /* X = A */
296                         PPC_MR(r_X, r_A);
297                         break;
298                 case BPF_MISC | BPF_TXA: /* A = X */
299                         ctx->seen |= SEEN_XREG;
300                         PPC_MR(r_A, r_X);
301                         break;
302
303                         /*** Constant loads/M[] access ***/
304                 case BPF_LD | BPF_IMM: /* A = K */
305                         PPC_LI32(r_A, K);
306                         break;
307                 case BPF_LDX | BPF_IMM: /* X = K */
308                         PPC_LI32(r_X, K);
309                         break;
310                 case BPF_LD | BPF_MEM: /* A = mem[K] */
311                         PPC_MR(r_A, r_M + (K & 0xf));
312                         ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
313                         break;
314                 case BPF_LDX | BPF_MEM: /* X = mem[K] */
315                         PPC_MR(r_X, r_M + (K & 0xf));
316                         ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
317                         break;
318                 case BPF_ST: /* mem[K] = A */
319                         PPC_MR(r_M + (K & 0xf), r_A);
320                         ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
321                         break;
322                 case BPF_STX: /* mem[K] = X */
323                         PPC_MR(r_M + (K & 0xf), r_X);
324                         ctx->seen |= SEEN_XREG | SEEN_MEM | (1<<(K & 0xf));
325                         break;
326                 case BPF_LD | BPF_W | BPF_LEN: /*       A = skb->len; */
327                         BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
328                         PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, len));
329                         break;
330                 case BPF_LDX | BPF_W | BPF_LEN: /* X = skb->len; */
331                         PPC_LWZ_OFFS(r_X, r_skb, offsetof(struct sk_buff, len));
332                         break;
333
334                         /*** Ancillary info loads ***/
335                 case BPF_ANC | SKF_AD_PROTOCOL: /* A = ntohs(skb->protocol); */
336                         BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
337                                                   protocol) != 2);
338                         PPC_NTOHS_OFFS(r_A, r_skb, offsetof(struct sk_buff,
339                                                             protocol));
340                         break;
341                 case BPF_ANC | SKF_AD_IFINDEX:
342                 case BPF_ANC | SKF_AD_HATYPE:
343                         BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
344                                                 ifindex) != 4);
345                         BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
346                                                 type) != 2);
347                         PPC_LL_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff,
348                                                                 dev));
349                         PPC_CMPDI(r_scratch1, 0);
350                         if (ctx->pc_ret0 != -1) {
351                                 PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]);
352                         } else {
353                                 /* Exit, returning 0; first pass hits here. */
354                                 PPC_BCC_SHORT(COND_NE, ctx->idx * 4 + 12);
355                                 PPC_LI(r_ret, 0);
356                                 PPC_JMP(exit_addr);
357                         }
358                         if (code == (BPF_ANC | SKF_AD_IFINDEX)) {
359                                 PPC_LWZ_OFFS(r_A, r_scratch1,
360                                      offsetof(struct net_device, ifindex));
361                         } else {
362                                 PPC_LHZ_OFFS(r_A, r_scratch1,
363                                      offsetof(struct net_device, type));
364                         }
365
366                         break;
367                 case BPF_ANC | SKF_AD_MARK:
368                         BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
369                         PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
370                                                           mark));
371                         break;
372                 case BPF_ANC | SKF_AD_RXHASH:
373                         BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
374                         PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
375                                                           hash));
376                         break;
377                 case BPF_ANC | SKF_AD_VLAN_TAG:
378                 case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
379                         BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
380                         BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
381
382                         PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
383                                                           vlan_tci));
384                         if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) {
385                                 PPC_ANDI(r_A, r_A, ~VLAN_TAG_PRESENT);
386                         } else {
387                                 PPC_ANDI(r_A, r_A, VLAN_TAG_PRESENT);
388                                 PPC_SRWI(r_A, r_A, 12);
389                         }
390                         break;
391                 case BPF_ANC | SKF_AD_QUEUE:
392                         BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
393                                                   queue_mapping) != 2);
394                         PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
395                                                           queue_mapping));
396                         break;
397                 case BPF_ANC | SKF_AD_PKTTYPE:
398                         PPC_LBZ_OFFS(r_A, r_skb, PKT_TYPE_OFFSET());
399                         PPC_ANDI(r_A, r_A, PKT_TYPE_MAX);
400                         PPC_SRWI(r_A, r_A, 5);
401                         break;
402                 case BPF_ANC | SKF_AD_CPU:
403                         PPC_BPF_LOAD_CPU(r_A);
404                         break;
405                         /*** Absolute loads from packet header/data ***/
406                 case BPF_LD | BPF_W | BPF_ABS:
407                         func = CHOOSE_LOAD_FUNC(K, sk_load_word);
408                         goto common_load;
409                 case BPF_LD | BPF_H | BPF_ABS:
410                         func = CHOOSE_LOAD_FUNC(K, sk_load_half);
411                         goto common_load;
412                 case BPF_LD | BPF_B | BPF_ABS:
413                         func = CHOOSE_LOAD_FUNC(K, sk_load_byte);
414                 common_load:
415                         /* Load from [K]. */
416                         ctx->seen |= SEEN_DATAREF;
417                         PPC_FUNC_ADDR(r_scratch1, func);
418                         PPC_MTLR(r_scratch1);
419                         PPC_LI32(r_addr, K);
420                         PPC_BLRL();
421                         /*
422                          * Helper returns 'lt' condition on error, and an
423                          * appropriate return value in r3
424                          */
425                         PPC_BCC(COND_LT, exit_addr);
426                         break;
427
428                         /*** Indirect loads from packet header/data ***/
429                 case BPF_LD | BPF_W | BPF_IND:
430                         func = sk_load_word;
431                         goto common_load_ind;
432                 case BPF_LD | BPF_H | BPF_IND:
433                         func = sk_load_half;
434                         goto common_load_ind;
435                 case BPF_LD | BPF_B | BPF_IND:
436                         func = sk_load_byte;
437                 common_load_ind:
438                         /*
439                          * Load from [X + K].  Negative offsets are tested for
440                          * in the helper functions.
441                          */
442                         ctx->seen |= SEEN_DATAREF | SEEN_XREG;
443                         PPC_FUNC_ADDR(r_scratch1, func);
444                         PPC_MTLR(r_scratch1);
445                         PPC_ADDI(r_addr, r_X, IMM_L(K));
446                         if (K >= 32768)
447                                 PPC_ADDIS(r_addr, r_addr, IMM_HA(K));
448                         PPC_BLRL();
449                         /* If error, cr0.LT set */
450                         PPC_BCC(COND_LT, exit_addr);
451                         break;
452
453                 case BPF_LDX | BPF_B | BPF_MSH:
454                         func = CHOOSE_LOAD_FUNC(K, sk_load_byte_msh);
455                         goto common_load;
456                         break;
457
458                         /*** Jump and branches ***/
459                 case BPF_JMP | BPF_JA:
460                         if (K != 0)
461                                 PPC_JMP(addrs[i + 1 + K]);
462                         break;
463
464                 case BPF_JMP | BPF_JGT | BPF_K:
465                 case BPF_JMP | BPF_JGT | BPF_X:
466                         true_cond = COND_GT;
467                         goto cond_branch;
468                 case BPF_JMP | BPF_JGE | BPF_K:
469                 case BPF_JMP | BPF_JGE | BPF_X:
470                         true_cond = COND_GE;
471                         goto cond_branch;
472                 case BPF_JMP | BPF_JEQ | BPF_K:
473                 case BPF_JMP | BPF_JEQ | BPF_X:
474                         true_cond = COND_EQ;
475                         goto cond_branch;
476                 case BPF_JMP | BPF_JSET | BPF_K:
477                 case BPF_JMP | BPF_JSET | BPF_X:
478                         true_cond = COND_NE;
479                         /* Fall through */
480                 cond_branch:
481                         /* same targets, can avoid doing the test :) */
482                         if (filter[i].jt == filter[i].jf) {
483                                 if (filter[i].jt > 0)
484                                         PPC_JMP(addrs[i + 1 + filter[i].jt]);
485                                 break;
486                         }
487
488                         switch (code) {
489                         case BPF_JMP | BPF_JGT | BPF_X:
490                         case BPF_JMP | BPF_JGE | BPF_X:
491                         case BPF_JMP | BPF_JEQ | BPF_X:
492                                 ctx->seen |= SEEN_XREG;
493                                 PPC_CMPLW(r_A, r_X);
494                                 break;
495                         case BPF_JMP | BPF_JSET | BPF_X:
496                                 ctx->seen |= SEEN_XREG;
497                                 PPC_AND_DOT(r_scratch1, r_A, r_X);
498                                 break;
499                         case BPF_JMP | BPF_JEQ | BPF_K:
500                         case BPF_JMP | BPF_JGT | BPF_K:
501                         case BPF_JMP | BPF_JGE | BPF_K:
502                                 if (K < 32768)
503                                         PPC_CMPLWI(r_A, K);
504                                 else {
505                                         PPC_LI32(r_scratch1, K);
506                                         PPC_CMPLW(r_A, r_scratch1);
507                                 }
508                                 break;
509                         case BPF_JMP | BPF_JSET | BPF_K:
510                                 if (K < 32768)
511                                         /* PPC_ANDI is /only/ dot-form */
512                                         PPC_ANDI(r_scratch1, r_A, K);
513                                 else {
514                                         PPC_LI32(r_scratch1, K);
515                                         PPC_AND_DOT(r_scratch1, r_A,
516                                                     r_scratch1);
517                                 }
518                                 break;
519                         }
520                         /* Sometimes branches are constructed "backward", with
521                          * the false path being the branch and true path being
522                          * a fallthrough to the next instruction.
523                          */
524                         if (filter[i].jt == 0)
525                                 /* Swap the sense of the branch */
526                                 PPC_BCC(true_cond ^ COND_CMP_TRUE,
527                                         addrs[i + 1 + filter[i].jf]);
528                         else {
529                                 PPC_BCC(true_cond, addrs[i + 1 + filter[i].jt]);
530                                 if (filter[i].jf != 0)
531                                         PPC_JMP(addrs[i + 1 + filter[i].jf]);
532                         }
533                         break;
534                 default:
535                         /* The filter contains something cruel & unusual.
536                          * We don't handle it, but also there shouldn't be
537                          * anything missing from our list.
538                          */
539                         if (printk_ratelimit())
540                                 pr_err("BPF filter opcode %04x (@%d) unsupported\n",
541                                        filter[i].code, i);
542                         return -ENOTSUPP;
543                 }
544
545         }
546         /* Set end-of-body-code address for exit. */
547         addrs[i] = ctx->idx * 4;
548
549         return 0;
550 }
551
552 void bpf_jit_compile(struct bpf_prog *fp)
553 {
554         unsigned int proglen;
555         unsigned int alloclen;
556         u32 *image = NULL;
557         u32 *code_base;
558         unsigned int *addrs;
559         struct codegen_context cgctx;
560         int pass;
561         int flen = fp->len;
562
563         if (!bpf_jit_enable)
564                 return;
565
566         addrs = kzalloc((flen+1) * sizeof(*addrs), GFP_KERNEL);
567         if (addrs == NULL)
568                 return;
569
570         /*
571          * There are multiple assembly passes as the generated code will change
572          * size as it settles down, figuring out the max branch offsets/exit
573          * paths required.
574          *
575          * The range of standard conditional branches is +/- 32Kbytes.  Since
576          * BPF_MAXINSNS = 4096, we can only jump from (worst case) start to
577          * finish with 8 bytes/instruction.  Not feasible, so long jumps are
578          * used, distinct from short branches.
579          *
580          * Current:
581          *
582          * For now, both branch types assemble to 2 words (short branches padded
583          * with a NOP); this is less efficient, but assembly will always complete
584          * after exactly 3 passes:
585          *
586          * First pass: No code buffer; Program is "faux-generated" -- no code
587          * emitted but maximum size of output determined (and addrs[] filled
588          * in).  Also, we note whether we use M[], whether we use skb data, etc.
589          * All generation choices assumed to be 'worst-case', e.g. branches all
590          * far (2 instructions), return path code reduction not available, etc.
591          *
592          * Second pass: Code buffer allocated with size determined previously.
593          * Prologue generated to support features we have seen used.  Exit paths
594          * determined and addrs[] is filled in again, as code may be slightly
595          * smaller as a result.
596          *
597          * Third pass: Code generated 'for real', and branch destinations
598          * determined from now-accurate addrs[] map.
599          *
600          * Ideal:
601          *
602          * If we optimise this, near branches will be shorter.  On the
603          * first assembly pass, we should err on the side of caution and
604          * generate the biggest code.  On subsequent passes, branches will be
605          * generated short or long and code size will reduce.  With smaller
606          * code, more branches may fall into the short category, and code will
607          * reduce more.
608          *
609          * Finally, if we see one pass generate code the same size as the
610          * previous pass we have converged and should now generate code for
611          * real.  Allocating at the end will also save the memory that would
612          * otherwise be wasted by the (small) current code shrinkage.
613          * Preferably, we should do a small number of passes (e.g. 5) and if we
614          * haven't converged by then, get impatient and force code to generate
615          * as-is, even if the odd branch would be left long.  The chances of a
616          * long jump are tiny with all but the most enormous of BPF filter
617          * inputs, so we should usually converge on the third pass.
618          */
619
620         cgctx.idx = 0;
621         cgctx.seen = 0;
622         cgctx.pc_ret0 = -1;
623         /* Scouting faux-generate pass 0 */
624         if (bpf_jit_build_body(fp, 0, &cgctx, addrs))
625                 /* We hit something illegal or unsupported. */
626                 goto out;
627
628         /*
629          * Pretend to build prologue, given the features we've seen.  This will
630          * update ctgtx.idx as it pretends to output instructions, then we can
631          * calculate total size from idx.
632          */
633         bpf_jit_build_prologue(fp, 0, &cgctx);
634         bpf_jit_build_epilogue(0, &cgctx);
635
636         proglen = cgctx.idx * 4;
637         alloclen = proglen + FUNCTION_DESCR_SIZE;
638         image = module_alloc(alloclen);
639         if (!image)
640                 goto out;
641
642         code_base = image + (FUNCTION_DESCR_SIZE/4);
643
644         /* Code generation passes 1-2 */
645         for (pass = 1; pass < 3; pass++) {
646                 /* Now build the prologue, body code & epilogue for real. */
647                 cgctx.idx = 0;
648                 bpf_jit_build_prologue(fp, code_base, &cgctx);
649                 bpf_jit_build_body(fp, code_base, &cgctx, addrs);
650                 bpf_jit_build_epilogue(code_base, &cgctx);
651
652                 if (bpf_jit_enable > 1)
653                         pr_info("Pass %d: shrink = %d, seen = 0x%x\n", pass,
654                                 proglen - (cgctx.idx * 4), cgctx.seen);
655         }
656
657         if (bpf_jit_enable > 1)
658                 /* Note that we output the base address of the code_base
659                  * rather than image, since opcodes are in code_base.
660                  */
661                 bpf_jit_dump(flen, proglen, pass, code_base);
662
663         bpf_flush_icache(code_base, code_base + (proglen/4));
664
665 #ifdef CONFIG_PPC64
666         /* Function descriptor nastiness: Address + TOC */
667         ((u64 *)image)[0] = (u64)code_base;
668         ((u64 *)image)[1] = local_paca->kernel_toc;
669 #endif
670
671         fp->bpf_func = (void *)image;
672         fp->jited = 1;
673
674 out:
675         kfree(addrs);
676         return;
677 }
678
679 void bpf_jit_free(struct bpf_prog *fp)
680 {
681         if (fp->jited)
682                 module_memfree(fp->bpf_func);
683
684         bpf_prog_unlock_free(fp);
685 }