smp, irq_work: Continue smp_call_function*() and irq_work*() integration
[sfrench/cifs-2.6.git] / include / linux / filter.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Linux Socket Filter Data Structures
4  */
5 #ifndef __LINUX_FILTER_H__
6 #define __LINUX_FILTER_H__
7
8 #include <stdarg.h>
9
10 #include <linux/atomic.h>
11 #include <linux/refcount.h>
12 #include <linux/compat.h>
13 #include <linux/skbuff.h>
14 #include <linux/linkage.h>
15 #include <linux/printk.h>
16 #include <linux/workqueue.h>
17 #include <linux/sched.h>
18 #include <linux/capability.h>
19 #include <linux/set_memory.h>
20 #include <linux/kallsyms.h>
21 #include <linux/if_vlan.h>
22 #include <linux/vmalloc.h>
23 #include <crypto/sha.h>
24
25 #include <net/sch_generic.h>
26
27 #include <asm/byteorder.h>
28 #include <uapi/linux/filter.h>
29 #include <uapi/linux/bpf.h>
30
31 struct sk_buff;
32 struct sock;
33 struct seccomp_data;
34 struct bpf_prog_aux;
35 struct xdp_rxq_info;
36 struct xdp_buff;
37 struct sock_reuseport;
38 struct ctl_table;
39 struct ctl_table_header;
40
41 /* ArgX, context and stack frame pointer register positions. Note,
42  * Arg1, Arg2, Arg3, etc are used as argument mappings of function
43  * calls in BPF_CALL instruction.
44  */
45 #define BPF_REG_ARG1    BPF_REG_1
46 #define BPF_REG_ARG2    BPF_REG_2
47 #define BPF_REG_ARG3    BPF_REG_3
48 #define BPF_REG_ARG4    BPF_REG_4
49 #define BPF_REG_ARG5    BPF_REG_5
50 #define BPF_REG_CTX     BPF_REG_6
51 #define BPF_REG_FP      BPF_REG_10
52
53 /* Additional register mappings for converted user programs. */
54 #define BPF_REG_A       BPF_REG_0
55 #define BPF_REG_X       BPF_REG_7
56 #define BPF_REG_TMP     BPF_REG_2       /* scratch reg */
57 #define BPF_REG_D       BPF_REG_8       /* data, callee-saved */
58 #define BPF_REG_H       BPF_REG_9       /* hlen, callee-saved */
59
60 /* Kernel hidden auxiliary/helper register. */
61 #define BPF_REG_AX              MAX_BPF_REG
62 #define MAX_BPF_EXT_REG         (MAX_BPF_REG + 1)
63 #define MAX_BPF_JIT_REG         MAX_BPF_EXT_REG
64
65 /* unused opcode to mark special call to bpf_tail_call() helper */
66 #define BPF_TAIL_CALL   0xf0
67
68 /* unused opcode to mark special load instruction. Same as BPF_ABS */
69 #define BPF_PROBE_MEM   0x20
70
71 /* unused opcode to mark call to interpreter with arguments */
72 #define BPF_CALL_ARGS   0xe0
73
74 /* As per nm, we expose JITed images as text (code) section for
75  * kallsyms. That way, tools like perf can find it to match
76  * addresses.
77  */
78 #define BPF_SYM_ELF_TYPE        't'
79
80 /* BPF program can access up to 512 bytes of stack space. */
81 #define MAX_BPF_STACK   512
82
83 /* Helper macros for filter block array initializers. */
84
85 /* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */
86
87 #define BPF_ALU64_REG(OP, DST, SRC)                             \
88         ((struct bpf_insn) {                                    \
89                 .code  = BPF_ALU64 | BPF_OP(OP) | BPF_X,        \
90                 .dst_reg = DST,                                 \
91                 .src_reg = SRC,                                 \
92                 .off   = 0,                                     \
93                 .imm   = 0 })
94
95 #define BPF_ALU32_REG(OP, DST, SRC)                             \
96         ((struct bpf_insn) {                                    \
97                 .code  = BPF_ALU | BPF_OP(OP) | BPF_X,          \
98                 .dst_reg = DST,                                 \
99                 .src_reg = SRC,                                 \
100                 .off   = 0,                                     \
101                 .imm   = 0 })
102
103 /* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */
104
105 #define BPF_ALU64_IMM(OP, DST, IMM)                             \
106         ((struct bpf_insn) {                                    \
107                 .code  = BPF_ALU64 | BPF_OP(OP) | BPF_K,        \
108                 .dst_reg = DST,                                 \
109                 .src_reg = 0,                                   \
110                 .off   = 0,                                     \
111                 .imm   = IMM })
112
113 #define BPF_ALU32_IMM(OP, DST, IMM)                             \
114         ((struct bpf_insn) {                                    \
115                 .code  = BPF_ALU | BPF_OP(OP) | BPF_K,          \
116                 .dst_reg = DST,                                 \
117                 .src_reg = 0,                                   \
118                 .off   = 0,                                     \
119                 .imm   = IMM })
120
121 /* Endianess conversion, cpu_to_{l,b}e(), {l,b}e_to_cpu() */
122
123 #define BPF_ENDIAN(TYPE, DST, LEN)                              \
124         ((struct bpf_insn) {                                    \
125                 .code  = BPF_ALU | BPF_END | BPF_SRC(TYPE),     \
126                 .dst_reg = DST,                                 \
127                 .src_reg = 0,                                   \
128                 .off   = 0,                                     \
129                 .imm   = LEN })
130
131 /* Short form of mov, dst_reg = src_reg */
132
133 #define BPF_MOV64_REG(DST, SRC)                                 \
134         ((struct bpf_insn) {                                    \
135                 .code  = BPF_ALU64 | BPF_MOV | BPF_X,           \
136                 .dst_reg = DST,                                 \
137                 .src_reg = SRC,                                 \
138                 .off   = 0,                                     \
139                 .imm   = 0 })
140
141 #define BPF_MOV32_REG(DST, SRC)                                 \
142         ((struct bpf_insn) {                                    \
143                 .code  = BPF_ALU | BPF_MOV | BPF_X,             \
144                 .dst_reg = DST,                                 \
145                 .src_reg = SRC,                                 \
146                 .off   = 0,                                     \
147                 .imm   = 0 })
148
149 /* Short form of mov, dst_reg = imm32 */
150
151 #define BPF_MOV64_IMM(DST, IMM)                                 \
152         ((struct bpf_insn) {                                    \
153                 .code  = BPF_ALU64 | BPF_MOV | BPF_K,           \
154                 .dst_reg = DST,                                 \
155                 .src_reg = 0,                                   \
156                 .off   = 0,                                     \
157                 .imm   = IMM })
158
159 #define BPF_MOV32_IMM(DST, IMM)                                 \
160         ((struct bpf_insn) {                                    \
161                 .code  = BPF_ALU | BPF_MOV | BPF_K,             \
162                 .dst_reg = DST,                                 \
163                 .src_reg = 0,                                   \
164                 .off   = 0,                                     \
165                 .imm   = IMM })
166
167 /* Special form of mov32, used for doing explicit zero extension on dst. */
168 #define BPF_ZEXT_REG(DST)                                       \
169         ((struct bpf_insn) {                                    \
170                 .code  = BPF_ALU | BPF_MOV | BPF_X,             \
171                 .dst_reg = DST,                                 \
172                 .src_reg = DST,                                 \
173                 .off   = 0,                                     \
174                 .imm   = 1 })
175
176 static inline bool insn_is_zext(const struct bpf_insn *insn)
177 {
178         return insn->code == (BPF_ALU | BPF_MOV | BPF_X) && insn->imm == 1;
179 }
180
181 /* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */
182 #define BPF_LD_IMM64(DST, IMM)                                  \
183         BPF_LD_IMM64_RAW(DST, 0, IMM)
184
185 #define BPF_LD_IMM64_RAW(DST, SRC, IMM)                         \
186         ((struct bpf_insn) {                                    \
187                 .code  = BPF_LD | BPF_DW | BPF_IMM,             \
188                 .dst_reg = DST,                                 \
189                 .src_reg = SRC,                                 \
190                 .off   = 0,                                     \
191                 .imm   = (__u32) (IMM) }),                      \
192         ((struct bpf_insn) {                                    \
193                 .code  = 0, /* zero is reserved opcode */       \
194                 .dst_reg = 0,                                   \
195                 .src_reg = 0,                                   \
196                 .off   = 0,                                     \
197                 .imm   = ((__u64) (IMM)) >> 32 })
198
199 /* pseudo BPF_LD_IMM64 insn used to refer to process-local map_fd */
200 #define BPF_LD_MAP_FD(DST, MAP_FD)                              \
201         BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD)
202
203 /* Short form of mov based on type, BPF_X: dst_reg = src_reg, BPF_K: dst_reg = imm32 */
204
205 #define BPF_MOV64_RAW(TYPE, DST, SRC, IMM)                      \
206         ((struct bpf_insn) {                                    \
207                 .code  = BPF_ALU64 | BPF_MOV | BPF_SRC(TYPE),   \
208                 .dst_reg = DST,                                 \
209                 .src_reg = SRC,                                 \
210                 .off   = 0,                                     \
211                 .imm   = IMM })
212
213 #define BPF_MOV32_RAW(TYPE, DST, SRC, IMM)                      \
214         ((struct bpf_insn) {                                    \
215                 .code  = BPF_ALU | BPF_MOV | BPF_SRC(TYPE),     \
216                 .dst_reg = DST,                                 \
217                 .src_reg = SRC,                                 \
218                 .off   = 0,                                     \
219                 .imm   = IMM })
220
221 /* Direct packet access, R0 = *(uint *) (skb->data + imm32) */
222
223 #define BPF_LD_ABS(SIZE, IMM)                                   \
224         ((struct bpf_insn) {                                    \
225                 .code  = BPF_LD | BPF_SIZE(SIZE) | BPF_ABS,     \
226                 .dst_reg = 0,                                   \
227                 .src_reg = 0,                                   \
228                 .off   = 0,                                     \
229                 .imm   = IMM })
230
231 /* Indirect packet access, R0 = *(uint *) (skb->data + src_reg + imm32) */
232
233 #define BPF_LD_IND(SIZE, SRC, IMM)                              \
234         ((struct bpf_insn) {                                    \
235                 .code  = BPF_LD | BPF_SIZE(SIZE) | BPF_IND,     \
236                 .dst_reg = 0,                                   \
237                 .src_reg = SRC,                                 \
238                 .off   = 0,                                     \
239                 .imm   = IMM })
240
241 /* Memory load, dst_reg = *(uint *) (src_reg + off16) */
242
243 #define BPF_LDX_MEM(SIZE, DST, SRC, OFF)                        \
244         ((struct bpf_insn) {                                    \
245                 .code  = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM,    \
246                 .dst_reg = DST,                                 \
247                 .src_reg = SRC,                                 \
248                 .off   = OFF,                                   \
249                 .imm   = 0 })
250
251 /* Memory store, *(uint *) (dst_reg + off16) = src_reg */
252
253 #define BPF_STX_MEM(SIZE, DST, SRC, OFF)                        \
254         ((struct bpf_insn) {                                    \
255                 .code  = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM,    \
256                 .dst_reg = DST,                                 \
257                 .src_reg = SRC,                                 \
258                 .off   = OFF,                                   \
259                 .imm   = 0 })
260
261 /* Atomic memory add, *(uint *)(dst_reg + off16) += src_reg */
262
263 #define BPF_STX_XADD(SIZE, DST, SRC, OFF)                       \
264         ((struct bpf_insn) {                                    \
265                 .code  = BPF_STX | BPF_SIZE(SIZE) | BPF_XADD,   \
266                 .dst_reg = DST,                                 \
267                 .src_reg = SRC,                                 \
268                 .off   = OFF,                                   \
269                 .imm   = 0 })
270
271 /* Memory store, *(uint *) (dst_reg + off16) = imm32 */
272
273 #define BPF_ST_MEM(SIZE, DST, OFF, IMM)                         \
274         ((struct bpf_insn) {                                    \
275                 .code  = BPF_ST | BPF_SIZE(SIZE) | BPF_MEM,     \
276                 .dst_reg = DST,                                 \
277                 .src_reg = 0,                                   \
278                 .off   = OFF,                                   \
279                 .imm   = IMM })
280
281 /* Conditional jumps against registers, if (dst_reg 'op' src_reg) goto pc + off16 */
282
283 #define BPF_JMP_REG(OP, DST, SRC, OFF)                          \
284         ((struct bpf_insn) {                                    \
285                 .code  = BPF_JMP | BPF_OP(OP) | BPF_X,          \
286                 .dst_reg = DST,                                 \
287                 .src_reg = SRC,                                 \
288                 .off   = OFF,                                   \
289                 .imm   = 0 })
290
291 /* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + off16 */
292
293 #define BPF_JMP_IMM(OP, DST, IMM, OFF)                          \
294         ((struct bpf_insn) {                                    \
295                 .code  = BPF_JMP | BPF_OP(OP) | BPF_K,          \
296                 .dst_reg = DST,                                 \
297                 .src_reg = 0,                                   \
298                 .off   = OFF,                                   \
299                 .imm   = IMM })
300
301 /* Like BPF_JMP_REG, but with 32-bit wide operands for comparison. */
302
303 #define BPF_JMP32_REG(OP, DST, SRC, OFF)                        \
304         ((struct bpf_insn) {                                    \
305                 .code  = BPF_JMP32 | BPF_OP(OP) | BPF_X,        \
306                 .dst_reg = DST,                                 \
307                 .src_reg = SRC,                                 \
308                 .off   = OFF,                                   \
309                 .imm   = 0 })
310
311 /* Like BPF_JMP_IMM, but with 32-bit wide operands for comparison. */
312
313 #define BPF_JMP32_IMM(OP, DST, IMM, OFF)                        \
314         ((struct bpf_insn) {                                    \
315                 .code  = BPF_JMP32 | BPF_OP(OP) | BPF_K,        \
316                 .dst_reg = DST,                                 \
317                 .src_reg = 0,                                   \
318                 .off   = OFF,                                   \
319                 .imm   = IMM })
320
321 /* Unconditional jumps, goto pc + off16 */
322
323 #define BPF_JMP_A(OFF)                                          \
324         ((struct bpf_insn) {                                    \
325                 .code  = BPF_JMP | BPF_JA,                      \
326                 .dst_reg = 0,                                   \
327                 .src_reg = 0,                                   \
328                 .off   = OFF,                                   \
329                 .imm   = 0 })
330
331 /* Relative call */
332
333 #define BPF_CALL_REL(TGT)                                       \
334         ((struct bpf_insn) {                                    \
335                 .code  = BPF_JMP | BPF_CALL,                    \
336                 .dst_reg = 0,                                   \
337                 .src_reg = BPF_PSEUDO_CALL,                     \
338                 .off   = 0,                                     \
339                 .imm   = TGT })
340
341 /* Function call */
342
343 #define BPF_CAST_CALL(x)                                        \
344                 ((u64 (*)(u64, u64, u64, u64, u64))(x))
345
346 #define BPF_EMIT_CALL(FUNC)                                     \
347         ((struct bpf_insn) {                                    \
348                 .code  = BPF_JMP | BPF_CALL,                    \
349                 .dst_reg = 0,                                   \
350                 .src_reg = 0,                                   \
351                 .off   = 0,                                     \
352                 .imm   = ((FUNC) - __bpf_call_base) })
353
354 /* Raw code statement block */
355
356 #define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM)                  \
357         ((struct bpf_insn) {                                    \
358                 .code  = CODE,                                  \
359                 .dst_reg = DST,                                 \
360                 .src_reg = SRC,                                 \
361                 .off   = OFF,                                   \
362                 .imm   = IMM })
363
364 /* Program exit */
365
366 #define BPF_EXIT_INSN()                                         \
367         ((struct bpf_insn) {                                    \
368                 .code  = BPF_JMP | BPF_EXIT,                    \
369                 .dst_reg = 0,                                   \
370                 .src_reg = 0,                                   \
371                 .off   = 0,                                     \
372                 .imm   = 0 })
373
374 /* Internal classic blocks for direct assignment */
375
376 #define __BPF_STMT(CODE, K)                                     \
377         ((struct sock_filter) BPF_STMT(CODE, K))
378
379 #define __BPF_JUMP(CODE, K, JT, JF)                             \
380         ((struct sock_filter) BPF_JUMP(CODE, K, JT, JF))
381
382 #define bytes_to_bpf_size(bytes)                                \
383 ({                                                              \
384         int bpf_size = -EINVAL;                                 \
385                                                                 \
386         if (bytes == sizeof(u8))                                \
387                 bpf_size = BPF_B;                               \
388         else if (bytes == sizeof(u16))                          \
389                 bpf_size = BPF_H;                               \
390         else if (bytes == sizeof(u32))                          \
391                 bpf_size = BPF_W;                               \
392         else if (bytes == sizeof(u64))                          \
393                 bpf_size = BPF_DW;                              \
394                                                                 \
395         bpf_size;                                               \
396 })
397
398 #define bpf_size_to_bytes(bpf_size)                             \
399 ({                                                              \
400         int bytes = -EINVAL;                                    \
401                                                                 \
402         if (bpf_size == BPF_B)                                  \
403                 bytes = sizeof(u8);                             \
404         else if (bpf_size == BPF_H)                             \
405                 bytes = sizeof(u16);                            \
406         else if (bpf_size == BPF_W)                             \
407                 bytes = sizeof(u32);                            \
408         else if (bpf_size == BPF_DW)                            \
409                 bytes = sizeof(u64);                            \
410                                                                 \
411         bytes;                                                  \
412 })
413
414 #define BPF_SIZEOF(type)                                        \
415         ({                                                      \
416                 const int __size = bytes_to_bpf_size(sizeof(type)); \
417                 BUILD_BUG_ON(__size < 0);                       \
418                 __size;                                         \
419         })
420
421 #define BPF_FIELD_SIZEOF(type, field)                           \
422         ({                                                      \
423                 const int __size = bytes_to_bpf_size(sizeof_field(type, field)); \
424                 BUILD_BUG_ON(__size < 0);                       \
425                 __size;                                         \
426         })
427
428 #define BPF_LDST_BYTES(insn)                                    \
429         ({                                                      \
430                 const int __size = bpf_size_to_bytes(BPF_SIZE((insn)->code)); \
431                 WARN_ON(__size < 0);                            \
432                 __size;                                         \
433         })
434
435 #define __BPF_MAP_0(m, v, ...) v
436 #define __BPF_MAP_1(m, v, t, a, ...) m(t, a)
437 #define __BPF_MAP_2(m, v, t, a, ...) m(t, a), __BPF_MAP_1(m, v, __VA_ARGS__)
438 #define __BPF_MAP_3(m, v, t, a, ...) m(t, a), __BPF_MAP_2(m, v, __VA_ARGS__)
439 #define __BPF_MAP_4(m, v, t, a, ...) m(t, a), __BPF_MAP_3(m, v, __VA_ARGS__)
440 #define __BPF_MAP_5(m, v, t, a, ...) m(t, a), __BPF_MAP_4(m, v, __VA_ARGS__)
441
442 #define __BPF_REG_0(...) __BPF_PAD(5)
443 #define __BPF_REG_1(...) __BPF_MAP(1, __VA_ARGS__), __BPF_PAD(4)
444 #define __BPF_REG_2(...) __BPF_MAP(2, __VA_ARGS__), __BPF_PAD(3)
445 #define __BPF_REG_3(...) __BPF_MAP(3, __VA_ARGS__), __BPF_PAD(2)
446 #define __BPF_REG_4(...) __BPF_MAP(4, __VA_ARGS__), __BPF_PAD(1)
447 #define __BPF_REG_5(...) __BPF_MAP(5, __VA_ARGS__)
448
449 #define __BPF_MAP(n, ...) __BPF_MAP_##n(__VA_ARGS__)
450 #define __BPF_REG(n, ...) __BPF_REG_##n(__VA_ARGS__)
451
452 #define __BPF_CAST(t, a)                                                       \
453         (__force t)                                                            \
454         (__force                                                               \
455          typeof(__builtin_choose_expr(sizeof(t) == sizeof(unsigned long),      \
456                                       (unsigned long)0, (t)0))) a
457 #define __BPF_V void
458 #define __BPF_N
459
460 #define __BPF_DECL_ARGS(t, a) t   a
461 #define __BPF_DECL_REGS(t, a) u64 a
462
463 #define __BPF_PAD(n)                                                           \
464         __BPF_MAP(n, __BPF_DECL_ARGS, __BPF_N, u64, __ur_1, u64, __ur_2,       \
465                   u64, __ur_3, u64, __ur_4, u64, __ur_5)
466
467 #define BPF_CALL_x(x, name, ...)                                               \
468         static __always_inline                                                 \
469         u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__));   \
470         typedef u64 (*btf_##name)(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)); \
471         u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__));         \
472         u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__))          \
473         {                                                                      \
474                 return ((btf_##name)____##name)(__BPF_MAP(x,__BPF_CAST,__BPF_N,__VA_ARGS__));\
475         }                                                                      \
476         static __always_inline                                                 \
477         u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__))
478
479 #define BPF_CALL_0(name, ...)   BPF_CALL_x(0, name, __VA_ARGS__)
480 #define BPF_CALL_1(name, ...)   BPF_CALL_x(1, name, __VA_ARGS__)
481 #define BPF_CALL_2(name, ...)   BPF_CALL_x(2, name, __VA_ARGS__)
482 #define BPF_CALL_3(name, ...)   BPF_CALL_x(3, name, __VA_ARGS__)
483 #define BPF_CALL_4(name, ...)   BPF_CALL_x(4, name, __VA_ARGS__)
484 #define BPF_CALL_5(name, ...)   BPF_CALL_x(5, name, __VA_ARGS__)
485
486 #define bpf_ctx_range(TYPE, MEMBER)                                             \
487         offsetof(TYPE, MEMBER) ... offsetofend(TYPE, MEMBER) - 1
488 #define bpf_ctx_range_till(TYPE, MEMBER1, MEMBER2)                              \
489         offsetof(TYPE, MEMBER1) ... offsetofend(TYPE, MEMBER2) - 1
490 #if BITS_PER_LONG == 64
491 # define bpf_ctx_range_ptr(TYPE, MEMBER)                                        \
492         offsetof(TYPE, MEMBER) ... offsetofend(TYPE, MEMBER) - 1
493 #else
494 # define bpf_ctx_range_ptr(TYPE, MEMBER)                                        \
495         offsetof(TYPE, MEMBER) ... offsetof(TYPE, MEMBER) + 8 - 1
496 #endif /* BITS_PER_LONG == 64 */
497
498 #define bpf_target_off(TYPE, MEMBER, SIZE, PTR_SIZE)                            \
499         ({                                                                      \
500                 BUILD_BUG_ON(sizeof_field(TYPE, MEMBER) != (SIZE));             \
501                 *(PTR_SIZE) = (SIZE);                                           \
502                 offsetof(TYPE, MEMBER);                                         \
503         })
504
505 #ifdef CONFIG_COMPAT
506 /* A struct sock_filter is architecture independent. */
507 struct compat_sock_fprog {
508         u16             len;
509         compat_uptr_t   filter; /* struct sock_filter * */
510 };
511 #endif
512
513 struct sock_fprog_kern {
514         u16                     len;
515         struct sock_filter      *filter;
516 };
517
518 /* Some arches need doubleword alignment for their instructions and/or data */
519 #define BPF_IMAGE_ALIGNMENT 8
520
521 struct bpf_binary_header {
522         u32 pages;
523         u8 image[] __aligned(BPF_IMAGE_ALIGNMENT);
524 };
525
526 struct bpf_prog {
527         u16                     pages;          /* Number of allocated pages */
528         u16                     jited:1,        /* Is our filter JIT'ed? */
529                                 jit_requested:1,/* archs need to JIT the prog */
530                                 gpl_compatible:1, /* Is filter GPL compatible? */
531                                 cb_access:1,    /* Is control block accessed? */
532                                 dst_needed:1,   /* Do we need dst entry? */
533                                 blinded:1,      /* Was blinded */
534                                 is_func:1,      /* program is a bpf function */
535                                 kprobe_override:1, /* Do we override a kprobe? */
536                                 has_callchain_buf:1, /* callchain buffer allocated? */
537                                 enforce_expected_attach_type:1; /* Enforce expected_attach_type checking at attach time */
538         enum bpf_prog_type      type;           /* Type of BPF program */
539         enum bpf_attach_type    expected_attach_type; /* For some prog types */
540         u32                     len;            /* Number of filter blocks */
541         u32                     jited_len;      /* Size of jited insns in bytes */
542         u8                      tag[BPF_TAG_SIZE];
543         struct bpf_prog_aux     *aux;           /* Auxiliary fields */
544         struct sock_fprog_kern  *orig_prog;     /* Original BPF program */
545         unsigned int            (*bpf_func)(const void *ctx,
546                                             const struct bpf_insn *insn);
547         /* Instructions for interpreter */
548         struct sock_filter      insns[0];
549         struct bpf_insn         insnsi[];
550 };
551
552 struct sk_filter {
553         refcount_t      refcnt;
554         struct rcu_head rcu;
555         struct bpf_prog *prog;
556 };
557
558 DECLARE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
559
560 #define __BPF_PROG_RUN(prog, ctx, dfunc)        ({                      \
561         u32 ret;                                                        \
562         cant_migrate();                                                 \
563         if (static_branch_unlikely(&bpf_stats_enabled_key)) {           \
564                 struct bpf_prog_stats *stats;                           \
565                 u64 start = sched_clock();                              \
566                 ret = dfunc(ctx, (prog)->insnsi, (prog)->bpf_func);     \
567                 stats = this_cpu_ptr(prog->aux->stats);                 \
568                 u64_stats_update_begin(&stats->syncp);                  \
569                 stats->cnt++;                                           \
570                 stats->nsecs += sched_clock() - start;                  \
571                 u64_stats_update_end(&stats->syncp);                    \
572         } else {                                                        \
573                 ret = dfunc(ctx, (prog)->insnsi, (prog)->bpf_func);     \
574         }                                                               \
575         ret; })
576
577 #define BPF_PROG_RUN(prog, ctx)                                         \
578         __BPF_PROG_RUN(prog, ctx, bpf_dispatcher_nop_func)
579
580 /*
581  * Use in preemptible and therefore migratable context to make sure that
582  * the execution of the BPF program runs on one CPU.
583  *
584  * This uses migrate_disable/enable() explicitly to document that the
585  * invocation of a BPF program does not require reentrancy protection
586  * against a BPF program which is invoked from a preempting task.
587  *
588  * For non RT enabled kernels migrate_disable/enable() maps to
589  * preempt_disable/enable(), i.e. it disables also preemption.
590  */
591 static inline u32 bpf_prog_run_pin_on_cpu(const struct bpf_prog *prog,
592                                           const void *ctx)
593 {
594         u32 ret;
595
596         migrate_disable();
597         ret = __BPF_PROG_RUN(prog, ctx, bpf_dispatcher_nop_func);
598         migrate_enable();
599         return ret;
600 }
601
602 #define BPF_SKB_CB_LEN QDISC_CB_PRIV_LEN
603
604 struct bpf_skb_data_end {
605         struct qdisc_skb_cb qdisc_cb;
606         void *data_meta;
607         void *data_end;
608 };
609
610 struct bpf_redirect_info {
611         u32 flags;
612         u32 tgt_index;
613         void *tgt_value;
614         struct bpf_map *map;
615         u32 kern_flags;
616 };
617
618 DECLARE_PER_CPU(struct bpf_redirect_info, bpf_redirect_info);
619
620 /* flags for bpf_redirect_info kern_flags */
621 #define BPF_RI_F_RF_NO_DIRECT   BIT(0)  /* no napi_direct on return_frame */
622
623 /* Compute the linear packet data range [data, data_end) which
624  * will be accessed by various program types (cls_bpf, act_bpf,
625  * lwt, ...). Subsystems allowing direct data access must (!)
626  * ensure that cb[] area can be written to when BPF program is
627  * invoked (otherwise cb[] save/restore is necessary).
628  */
629 static inline void bpf_compute_data_pointers(struct sk_buff *skb)
630 {
631         struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb;
632
633         BUILD_BUG_ON(sizeof(*cb) > sizeof_field(struct sk_buff, cb));
634         cb->data_meta = skb->data - skb_metadata_len(skb);
635         cb->data_end  = skb->data + skb_headlen(skb);
636 }
637
638 /* Similar to bpf_compute_data_pointers(), except that save orginal
639  * data in cb->data and cb->meta_data for restore.
640  */
641 static inline void bpf_compute_and_save_data_end(
642         struct sk_buff *skb, void **saved_data_end)
643 {
644         struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb;
645
646         *saved_data_end = cb->data_end;
647         cb->data_end  = skb->data + skb_headlen(skb);
648 }
649
650 /* Restore data saved by bpf_compute_data_pointers(). */
651 static inline void bpf_restore_data_end(
652         struct sk_buff *skb, void *saved_data_end)
653 {
654         struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb;
655
656         cb->data_end = saved_data_end;
657 }
658
659 static inline u8 *bpf_skb_cb(struct sk_buff *skb)
660 {
661         /* eBPF programs may read/write skb->cb[] area to transfer meta
662          * data between tail calls. Since this also needs to work with
663          * tc, that scratch memory is mapped to qdisc_skb_cb's data area.
664          *
665          * In some socket filter cases, the cb unfortunately needs to be
666          * saved/restored so that protocol specific skb->cb[] data won't
667          * be lost. In any case, due to unpriviledged eBPF programs
668          * attached to sockets, we need to clear the bpf_skb_cb() area
669          * to not leak previous contents to user space.
670          */
671         BUILD_BUG_ON(sizeof_field(struct __sk_buff, cb) != BPF_SKB_CB_LEN);
672         BUILD_BUG_ON(sizeof_field(struct __sk_buff, cb) !=
673                      sizeof_field(struct qdisc_skb_cb, data));
674
675         return qdisc_skb_cb(skb)->data;
676 }
677
678 /* Must be invoked with migration disabled */
679 static inline u32 __bpf_prog_run_save_cb(const struct bpf_prog *prog,
680                                          struct sk_buff *skb)
681 {
682         u8 *cb_data = bpf_skb_cb(skb);
683         u8 cb_saved[BPF_SKB_CB_LEN];
684         u32 res;
685
686         if (unlikely(prog->cb_access)) {
687                 memcpy(cb_saved, cb_data, sizeof(cb_saved));
688                 memset(cb_data, 0, sizeof(cb_saved));
689         }
690
691         res = BPF_PROG_RUN(prog, skb);
692
693         if (unlikely(prog->cb_access))
694                 memcpy(cb_data, cb_saved, sizeof(cb_saved));
695
696         return res;
697 }
698
699 static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog,
700                                        struct sk_buff *skb)
701 {
702         u32 res;
703
704         migrate_disable();
705         res = __bpf_prog_run_save_cb(prog, skb);
706         migrate_enable();
707         return res;
708 }
709
710 static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog,
711                                         struct sk_buff *skb)
712 {
713         u8 *cb_data = bpf_skb_cb(skb);
714         u32 res;
715
716         if (unlikely(prog->cb_access))
717                 memset(cb_data, 0, BPF_SKB_CB_LEN);
718
719         res = bpf_prog_run_pin_on_cpu(prog, skb);
720         return res;
721 }
722
723 DECLARE_BPF_DISPATCHER(xdp)
724
725 static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog,
726                                             struct xdp_buff *xdp)
727 {
728         /* Caller needs to hold rcu_read_lock() (!), otherwise program
729          * can be released while still running, or map elements could be
730          * freed early while still having concurrent users. XDP fastpath
731          * already takes rcu_read_lock() when fetching the program, so
732          * it's not necessary here anymore.
733          */
734         return __BPF_PROG_RUN(prog, xdp, BPF_DISPATCHER_FUNC(xdp));
735 }
736
737 void bpf_prog_change_xdp(struct bpf_prog *prev_prog, struct bpf_prog *prog);
738
739 static inline u32 bpf_prog_insn_size(const struct bpf_prog *prog)
740 {
741         return prog->len * sizeof(struct bpf_insn);
742 }
743
744 static inline u32 bpf_prog_tag_scratch_size(const struct bpf_prog *prog)
745 {
746         return round_up(bpf_prog_insn_size(prog) +
747                         sizeof(__be64) + 1, SHA1_BLOCK_SIZE);
748 }
749
750 static inline unsigned int bpf_prog_size(unsigned int proglen)
751 {
752         return max(sizeof(struct bpf_prog),
753                    offsetof(struct bpf_prog, insns[proglen]));
754 }
755
756 static inline bool bpf_prog_was_classic(const struct bpf_prog *prog)
757 {
758         /* When classic BPF programs have been loaded and the arch
759          * does not have a classic BPF JIT (anymore), they have been
760          * converted via bpf_migrate_filter() to eBPF and thus always
761          * have an unspec program type.
762          */
763         return prog->type == BPF_PROG_TYPE_UNSPEC;
764 }
765
766 static inline u32 bpf_ctx_off_adjust_machine(u32 size)
767 {
768         const u32 size_machine = sizeof(unsigned long);
769
770         if (size > size_machine && size % size_machine == 0)
771                 size = size_machine;
772
773         return size;
774 }
775
776 static inline bool
777 bpf_ctx_narrow_access_ok(u32 off, u32 size, u32 size_default)
778 {
779         return size <= size_default && (size & (size - 1)) == 0;
780 }
781
782 static inline u8
783 bpf_ctx_narrow_access_offset(u32 off, u32 size, u32 size_default)
784 {
785         u8 access_off = off & (size_default - 1);
786
787 #ifdef __LITTLE_ENDIAN
788         return access_off;
789 #else
790         return size_default - (access_off + size);
791 #endif
792 }
793
794 #define bpf_ctx_wide_access_ok(off, size, type, field)                  \
795         (size == sizeof(__u64) &&                                       \
796         off >= offsetof(type, field) &&                                 \
797         off + sizeof(__u64) <= offsetofend(type, field) &&              \
798         off % sizeof(__u64) == 0)
799
800 #define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
801
802 static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
803 {
804 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
805         if (!fp->jited) {
806                 set_vm_flush_reset_perms(fp);
807                 set_memory_ro((unsigned long)fp, fp->pages);
808         }
809 #endif
810 }
811
812 static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
813 {
814         set_vm_flush_reset_perms(hdr);
815         set_memory_ro((unsigned long)hdr, hdr->pages);
816         set_memory_x((unsigned long)hdr, hdr->pages);
817 }
818
819 static inline struct bpf_binary_header *
820 bpf_jit_binary_hdr(const struct bpf_prog *fp)
821 {
822         unsigned long real_start = (unsigned long)fp->bpf_func;
823         unsigned long addr = real_start & PAGE_MASK;
824
825         return (void *)addr;
826 }
827
828 int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap);
829 static inline int sk_filter(struct sock *sk, struct sk_buff *skb)
830 {
831         return sk_filter_trim_cap(sk, skb, 1);
832 }
833
834 struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err);
835 void bpf_prog_free(struct bpf_prog *fp);
836
837 bool bpf_opcode_in_insntable(u8 code);
838
839 void bpf_prog_free_linfo(struct bpf_prog *prog);
840 void bpf_prog_fill_jited_linfo(struct bpf_prog *prog,
841                                const u32 *insn_to_jit_off);
842 int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog);
843 void bpf_prog_free_jited_linfo(struct bpf_prog *prog);
844 void bpf_prog_free_unused_jited_linfo(struct bpf_prog *prog);
845
846 struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags);
847 struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags);
848 struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
849                                   gfp_t gfp_extra_flags);
850 void __bpf_prog_free(struct bpf_prog *fp);
851
852 static inline void bpf_prog_unlock_free(struct bpf_prog *fp)
853 {
854         __bpf_prog_free(fp);
855 }
856
857 typedef int (*bpf_aux_classic_check_t)(struct sock_filter *filter,
858                                        unsigned int flen);
859
860 int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog);
861 int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog,
862                               bpf_aux_classic_check_t trans, bool save_orig);
863 void bpf_prog_destroy(struct bpf_prog *fp);
864
865 int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
866 int sk_attach_bpf(u32 ufd, struct sock *sk);
867 int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk);
868 int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk);
869 void sk_reuseport_prog_free(struct bpf_prog *prog);
870 int sk_detach_filter(struct sock *sk);
871 int sk_get_filter(struct sock *sk, struct sock_filter __user *filter,
872                   unsigned int len);
873
874 bool sk_filter_charge(struct sock *sk, struct sk_filter *fp);
875 void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
876
877 u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
878 #define __bpf_call_base_args \
879         ((u64 (*)(u64, u64, u64, u64, u64, const struct bpf_insn *)) \
880          __bpf_call_base)
881
882 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog);
883 void bpf_jit_compile(struct bpf_prog *prog);
884 bool bpf_jit_needs_zext(void);
885 bool bpf_helper_changes_pkt_data(void *func);
886
887 static inline bool bpf_dump_raw_ok(void)
888 {
889         /* Reconstruction of call-sites is dependent on kallsyms,
890          * thus make dump the same restriction.
891          */
892         return kallsyms_show_value() == 1;
893 }
894
895 struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
896                                        const struct bpf_insn *patch, u32 len);
897 int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt);
898
899 void bpf_clear_redirect_map(struct bpf_map *map);
900
901 static inline bool xdp_return_frame_no_direct(void)
902 {
903         struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
904
905         return ri->kern_flags & BPF_RI_F_RF_NO_DIRECT;
906 }
907
908 static inline void xdp_set_return_frame_no_direct(void)
909 {
910         struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
911
912         ri->kern_flags |= BPF_RI_F_RF_NO_DIRECT;
913 }
914
915 static inline void xdp_clear_return_frame_no_direct(void)
916 {
917         struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
918
919         ri->kern_flags &= ~BPF_RI_F_RF_NO_DIRECT;
920 }
921
922 static inline int xdp_ok_fwd_dev(const struct net_device *fwd,
923                                  unsigned int pktlen)
924 {
925         unsigned int len;
926
927         if (unlikely(!(fwd->flags & IFF_UP)))
928                 return -ENETDOWN;
929
930         len = fwd->mtu + fwd->hard_header_len + VLAN_HLEN;
931         if (pktlen > len)
932                 return -EMSGSIZE;
933
934         return 0;
935 }
936
937 /* The pair of xdp_do_redirect and xdp_do_flush MUST be called in the
938  * same cpu context. Further for best results no more than a single map
939  * for the do_redirect/do_flush pair should be used. This limitation is
940  * because we only track one map and force a flush when the map changes.
941  * This does not appear to be a real limitation for existing software.
942  */
943 int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
944                             struct xdp_buff *xdp, struct bpf_prog *prog);
945 int xdp_do_redirect(struct net_device *dev,
946                     struct xdp_buff *xdp,
947                     struct bpf_prog *prog);
948 void xdp_do_flush(void);
949
950 /* The xdp_do_flush_map() helper has been renamed to drop the _map suffix, as
951  * it is no longer only flushing maps. Keep this define for compatibility
952  * until all drivers are updated - do not use xdp_do_flush_map() in new code!
953  */
954 #define xdp_do_flush_map xdp_do_flush
955
956 void bpf_warn_invalid_xdp_action(u32 act);
957
958 #ifdef CONFIG_INET
959 struct sock *bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk,
960                                   struct bpf_prog *prog, struct sk_buff *skb,
961                                   u32 hash);
962 #else
963 static inline struct sock *
964 bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk,
965                      struct bpf_prog *prog, struct sk_buff *skb,
966                      u32 hash)
967 {
968         return NULL;
969 }
970 #endif
971
972 #ifdef CONFIG_BPF_JIT
973 extern int bpf_jit_enable;
974 extern int bpf_jit_harden;
975 extern int bpf_jit_kallsyms;
976 extern long bpf_jit_limit;
977
978 typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size);
979
980 struct bpf_binary_header *
981 bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
982                      unsigned int alignment,
983                      bpf_jit_fill_hole_t bpf_fill_ill_insns);
984 void bpf_jit_binary_free(struct bpf_binary_header *hdr);
985 u64 bpf_jit_alloc_exec_limit(void);
986 void *bpf_jit_alloc_exec(unsigned long size);
987 void bpf_jit_free_exec(void *addr);
988 void bpf_jit_free(struct bpf_prog *fp);
989
990 int bpf_jit_add_poke_descriptor(struct bpf_prog *prog,
991                                 struct bpf_jit_poke_descriptor *poke);
992
993 int bpf_jit_get_func_addr(const struct bpf_prog *prog,
994                           const struct bpf_insn *insn, bool extra_pass,
995                           u64 *func_addr, bool *func_addr_fixed);
996
997 struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *fp);
998 void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other);
999
1000 static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
1001                                 u32 pass, void *image)
1002 {
1003         pr_err("flen=%u proglen=%u pass=%u image=%pK from=%s pid=%d\n", flen,
1004                proglen, pass, image, current->comm, task_pid_nr(current));
1005
1006         if (image)
1007                 print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_OFFSET,
1008                                16, 1, image, proglen, false);
1009 }
1010
1011 static inline bool bpf_jit_is_ebpf(void)
1012 {
1013 # ifdef CONFIG_HAVE_EBPF_JIT
1014         return true;
1015 # else
1016         return false;
1017 # endif
1018 }
1019
1020 static inline bool ebpf_jit_enabled(void)
1021 {
1022         return bpf_jit_enable && bpf_jit_is_ebpf();
1023 }
1024
1025 static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp)
1026 {
1027         return fp->jited && bpf_jit_is_ebpf();
1028 }
1029
1030 static inline bool bpf_jit_blinding_enabled(struct bpf_prog *prog)
1031 {
1032         /* These are the prerequisites, should someone ever have the
1033          * idea to call blinding outside of them, we make sure to
1034          * bail out.
1035          */
1036         if (!bpf_jit_is_ebpf())
1037                 return false;
1038         if (!prog->jit_requested)
1039                 return false;
1040         if (!bpf_jit_harden)
1041                 return false;
1042         if (bpf_jit_harden == 1 && capable(CAP_SYS_ADMIN))
1043                 return false;
1044
1045         return true;
1046 }
1047
1048 static inline bool bpf_jit_kallsyms_enabled(void)
1049 {
1050         /* There are a couple of corner cases where kallsyms should
1051          * not be enabled f.e. on hardening.
1052          */
1053         if (bpf_jit_harden)
1054                 return false;
1055         if (!bpf_jit_kallsyms)
1056                 return false;
1057         if (bpf_jit_kallsyms == 1)
1058                 return true;
1059
1060         return false;
1061 }
1062
1063 const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
1064                                  unsigned long *off, char *sym);
1065 bool is_bpf_text_address(unsigned long addr);
1066 int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
1067                     char *sym);
1068
1069 static inline const char *
1070 bpf_address_lookup(unsigned long addr, unsigned long *size,
1071                    unsigned long *off, char **modname, char *sym)
1072 {
1073         const char *ret = __bpf_address_lookup(addr, size, off, sym);
1074
1075         if (ret && modname)
1076                 *modname = NULL;
1077         return ret;
1078 }
1079
1080 void bpf_prog_kallsyms_add(struct bpf_prog *fp);
1081 void bpf_prog_kallsyms_del(struct bpf_prog *fp);
1082
1083 #else /* CONFIG_BPF_JIT */
1084
1085 static inline bool ebpf_jit_enabled(void)
1086 {
1087         return false;
1088 }
1089
1090 static inline bool bpf_jit_blinding_enabled(struct bpf_prog *prog)
1091 {
1092         return false;
1093 }
1094
1095 static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp)
1096 {
1097         return false;
1098 }
1099
1100 static inline int
1101 bpf_jit_add_poke_descriptor(struct bpf_prog *prog,
1102                             struct bpf_jit_poke_descriptor *poke)
1103 {
1104         return -ENOTSUPP;
1105 }
1106
1107 static inline void bpf_jit_free(struct bpf_prog *fp)
1108 {
1109         bpf_prog_unlock_free(fp);
1110 }
1111
1112 static inline bool bpf_jit_kallsyms_enabled(void)
1113 {
1114         return false;
1115 }
1116
1117 static inline const char *
1118 __bpf_address_lookup(unsigned long addr, unsigned long *size,
1119                      unsigned long *off, char *sym)
1120 {
1121         return NULL;
1122 }
1123
1124 static inline bool is_bpf_text_address(unsigned long addr)
1125 {
1126         return false;
1127 }
1128
1129 static inline int bpf_get_kallsym(unsigned int symnum, unsigned long *value,
1130                                   char *type, char *sym)
1131 {
1132         return -ERANGE;
1133 }
1134
1135 static inline const char *
1136 bpf_address_lookup(unsigned long addr, unsigned long *size,
1137                    unsigned long *off, char **modname, char *sym)
1138 {
1139         return NULL;
1140 }
1141
1142 static inline void bpf_prog_kallsyms_add(struct bpf_prog *fp)
1143 {
1144 }
1145
1146 static inline void bpf_prog_kallsyms_del(struct bpf_prog *fp)
1147 {
1148 }
1149
1150 #endif /* CONFIG_BPF_JIT */
1151
1152 void bpf_prog_kallsyms_del_all(struct bpf_prog *fp);
1153
1154 #define BPF_ANC         BIT(15)
1155
1156 static inline bool bpf_needs_clear_a(const struct sock_filter *first)
1157 {
1158         switch (first->code) {
1159         case BPF_RET | BPF_K:
1160         case BPF_LD | BPF_W | BPF_LEN:
1161                 return false;
1162
1163         case BPF_LD | BPF_W | BPF_ABS:
1164         case BPF_LD | BPF_H | BPF_ABS:
1165         case BPF_LD | BPF_B | BPF_ABS:
1166                 if (first->k == SKF_AD_OFF + SKF_AD_ALU_XOR_X)
1167                         return true;
1168                 return false;
1169
1170         default:
1171                 return true;
1172         }
1173 }
1174
1175 static inline u16 bpf_anc_helper(const struct sock_filter *ftest)
1176 {
1177         BUG_ON(ftest->code & BPF_ANC);
1178
1179         switch (ftest->code) {
1180         case BPF_LD | BPF_W | BPF_ABS:
1181         case BPF_LD | BPF_H | BPF_ABS:
1182         case BPF_LD | BPF_B | BPF_ABS:
1183 #define BPF_ANCILLARY(CODE)     case SKF_AD_OFF + SKF_AD_##CODE:        \
1184                                 return BPF_ANC | SKF_AD_##CODE
1185                 switch (ftest->k) {
1186                 BPF_ANCILLARY(PROTOCOL);
1187                 BPF_ANCILLARY(PKTTYPE);
1188                 BPF_ANCILLARY(IFINDEX);
1189                 BPF_ANCILLARY(NLATTR);
1190                 BPF_ANCILLARY(NLATTR_NEST);
1191                 BPF_ANCILLARY(MARK);
1192                 BPF_ANCILLARY(QUEUE);
1193                 BPF_ANCILLARY(HATYPE);
1194                 BPF_ANCILLARY(RXHASH);
1195                 BPF_ANCILLARY(CPU);
1196                 BPF_ANCILLARY(ALU_XOR_X);
1197                 BPF_ANCILLARY(VLAN_TAG);
1198                 BPF_ANCILLARY(VLAN_TAG_PRESENT);
1199                 BPF_ANCILLARY(PAY_OFFSET);
1200                 BPF_ANCILLARY(RANDOM);
1201                 BPF_ANCILLARY(VLAN_TPID);
1202                 }
1203                 /* Fallthrough. */
1204         default:
1205                 return ftest->code;
1206         }
1207 }
1208
1209 void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb,
1210                                            int k, unsigned int size);
1211
1212 static inline void *bpf_load_pointer(const struct sk_buff *skb, int k,
1213                                      unsigned int size, void *buffer)
1214 {
1215         if (k >= 0)
1216                 return skb_header_pointer(skb, k, size, buffer);
1217
1218         return bpf_internal_load_pointer_neg_helper(skb, k, size);
1219 }
1220
1221 static inline int bpf_tell_extensions(void)
1222 {
1223         return SKF_AD_MAX;
1224 }
1225
1226 struct bpf_sock_addr_kern {
1227         struct sock *sk;
1228         struct sockaddr *uaddr;
1229         /* Temporary "register" to make indirect stores to nested structures
1230          * defined above. We need three registers to make such a store, but
1231          * only two (src and dst) are available at convert_ctx_access time
1232          */
1233         u64 tmp_reg;
1234         void *t_ctx;    /* Attach type specific context. */
1235 };
1236
1237 struct bpf_sock_ops_kern {
1238         struct  sock *sk;
1239         u32     op;
1240         union {
1241                 u32 args[4];
1242                 u32 reply;
1243                 u32 replylong[4];
1244         };
1245         u32     is_fullsock;
1246         u64     temp;                   /* temp and everything after is not
1247                                          * initialized to 0 before calling
1248                                          * the BPF program. New fields that
1249                                          * should be initialized to 0 should
1250                                          * be inserted before temp.
1251                                          * temp is scratch storage used by
1252                                          * sock_ops_convert_ctx_access
1253                                          * as temporary storage of a register.
1254                                          */
1255 };
1256
1257 struct bpf_sysctl_kern {
1258         struct ctl_table_header *head;
1259         struct ctl_table *table;
1260         void *cur_val;
1261         size_t cur_len;
1262         void *new_val;
1263         size_t new_len;
1264         int new_updated;
1265         int write;
1266         loff_t *ppos;
1267         /* Temporary "register" for indirect stores to ppos. */
1268         u64 tmp_reg;
1269 };
1270
1271 struct bpf_sockopt_kern {
1272         struct sock     *sk;
1273         u8              *optval;
1274         u8              *optval_end;
1275         s32             level;
1276         s32             optname;
1277         s32             optlen;
1278         s32             retval;
1279 };
1280
1281 #endif /* __LINUX_FILTER_H__ */