Merge https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
[sfrench/cifs-2.6.git] / net / core / filter.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Linux Socket Filter - Kernel level socket filtering
4  *
5  * Based on the design of the Berkeley Packet Filter. The new
6  * internal format has been designed by PLUMgrid:
7  *
8  *      Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
9  *
10  * Authors:
11  *
12  *      Jay Schulist <jschlst@samba.org>
13  *      Alexei Starovoitov <ast@plumgrid.com>
14  *      Daniel Borkmann <dborkman@redhat.com>
15  *
16  * Andi Kleen - Fix a few bad bugs and races.
17  * Kris Katterjohn - Added many additional checks in bpf_check_classic()
18  */
19
20 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/mm.h>
23 #include <linux/fcntl.h>
24 #include <linux/socket.h>
25 #include <linux/sock_diag.h>
26 #include <linux/in.h>
27 #include <linux/inet.h>
28 #include <linux/netdevice.h>
29 #include <linux/if_packet.h>
30 #include <linux/if_arp.h>
31 #include <linux/gfp.h>
32 #include <net/inet_common.h>
33 #include <net/ip.h>
34 #include <net/protocol.h>
35 #include <net/netlink.h>
36 #include <linux/skbuff.h>
37 #include <linux/skmsg.h>
38 #include <net/sock.h>
39 #include <net/flow_dissector.h>
40 #include <linux/errno.h>
41 #include <linux/timer.h>
42 #include <linux/uaccess.h>
43 #include <asm/unaligned.h>
44 #include <asm/cmpxchg.h>
45 #include <linux/filter.h>
46 #include <linux/ratelimit.h>
47 #include <linux/seccomp.h>
48 #include <linux/if_vlan.h>
49 #include <linux/bpf.h>
50 #include <linux/btf.h>
51 #include <net/sch_generic.h>
52 #include <net/cls_cgroup.h>
53 #include <net/dst_metadata.h>
54 #include <net/dst.h>
55 #include <net/sock_reuseport.h>
56 #include <net/busy_poll.h>
57 #include <net/tcp.h>
58 #include <net/xfrm.h>
59 #include <net/udp.h>
60 #include <linux/bpf_trace.h>
61 #include <net/xdp_sock.h>
62 #include <linux/inetdevice.h>
63 #include <net/inet_hashtables.h>
64 #include <net/inet6_hashtables.h>
65 #include <net/ip_fib.h>
66 #include <net/nexthop.h>
67 #include <net/flow.h>
68 #include <net/arp.h>
69 #include <net/ipv6.h>
70 #include <net/net_namespace.h>
71 #include <linux/seg6_local.h>
72 #include <net/seg6.h>
73 #include <net/seg6_local.h>
74 #include <net/lwtunnel.h>
75 #include <net/ipv6_stubs.h>
76 #include <net/bpf_sk_storage.h>
77 #include <net/transp_v6.h>
78 #include <linux/btf_ids.h>
79 #include <net/tls.h>
80
81 static const struct bpf_func_proto *
82 bpf_sk_base_func_proto(enum bpf_func_id func_id);
83
84 int copy_bpf_fprog_from_user(struct sock_fprog *dst, sockptr_t src, int len)
85 {
86         if (in_compat_syscall()) {
87                 struct compat_sock_fprog f32;
88
89                 if (len != sizeof(f32))
90                         return -EINVAL;
91                 if (copy_from_sockptr(&f32, src, sizeof(f32)))
92                         return -EFAULT;
93                 memset(dst, 0, sizeof(*dst));
94                 dst->len = f32.len;
95                 dst->filter = compat_ptr(f32.filter);
96         } else {
97                 if (len != sizeof(*dst))
98                         return -EINVAL;
99                 if (copy_from_sockptr(dst, src, sizeof(*dst)))
100                         return -EFAULT;
101         }
102
103         return 0;
104 }
105 EXPORT_SYMBOL_GPL(copy_bpf_fprog_from_user);
106
107 /**
108  *      sk_filter_trim_cap - run a packet through a socket filter
109  *      @sk: sock associated with &sk_buff
110  *      @skb: buffer to filter
111  *      @cap: limit on how short the eBPF program may trim the packet
112  *
113  * Run the eBPF program and then cut skb->data to correct size returned by
114  * the program. If pkt_len is 0 we toss packet. If skb->len is smaller
115  * than pkt_len we keep whole skb->data. This is the socket level
116  * wrapper to BPF_PROG_RUN. It returns 0 if the packet should
117  * be accepted or -EPERM if the packet should be tossed.
118  *
119  */
120 int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap)
121 {
122         int err;
123         struct sk_filter *filter;
124
125         /*
126          * If the skb was allocated from pfmemalloc reserves, only
127          * allow SOCK_MEMALLOC sockets to use it as this socket is
128          * helping free memory
129          */
130         if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC)) {
131                 NET_INC_STATS(sock_net(sk), LINUX_MIB_PFMEMALLOCDROP);
132                 return -ENOMEM;
133         }
134         err = BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb);
135         if (err)
136                 return err;
137
138         err = security_sock_rcv_skb(sk, skb);
139         if (err)
140                 return err;
141
142         rcu_read_lock();
143         filter = rcu_dereference(sk->sk_filter);
144         if (filter) {
145                 struct sock *save_sk = skb->sk;
146                 unsigned int pkt_len;
147
148                 skb->sk = sk;
149                 pkt_len = bpf_prog_run_save_cb(filter->prog, skb);
150                 skb->sk = save_sk;
151                 err = pkt_len ? pskb_trim(skb, max(cap, pkt_len)) : -EPERM;
152         }
153         rcu_read_unlock();
154
155         return err;
156 }
157 EXPORT_SYMBOL(sk_filter_trim_cap);
158
159 BPF_CALL_1(bpf_skb_get_pay_offset, struct sk_buff *, skb)
160 {
161         return skb_get_poff(skb);
162 }
163
164 BPF_CALL_3(bpf_skb_get_nlattr, struct sk_buff *, skb, u32, a, u32, x)
165 {
166         struct nlattr *nla;
167
168         if (skb_is_nonlinear(skb))
169                 return 0;
170
171         if (skb->len < sizeof(struct nlattr))
172                 return 0;
173
174         if (a > skb->len - sizeof(struct nlattr))
175                 return 0;
176
177         nla = nla_find((struct nlattr *) &skb->data[a], skb->len - a, x);
178         if (nla)
179                 return (void *) nla - (void *) skb->data;
180
181         return 0;
182 }
183
184 BPF_CALL_3(bpf_skb_get_nlattr_nest, struct sk_buff *, skb, u32, a, u32, x)
185 {
186         struct nlattr *nla;
187
188         if (skb_is_nonlinear(skb))
189                 return 0;
190
191         if (skb->len < sizeof(struct nlattr))
192                 return 0;
193
194         if (a > skb->len - sizeof(struct nlattr))
195                 return 0;
196
197         nla = (struct nlattr *) &skb->data[a];
198         if (nla->nla_len > skb->len - a)
199                 return 0;
200
201         nla = nla_find_nested(nla, x);
202         if (nla)
203                 return (void *) nla - (void *) skb->data;
204
205         return 0;
206 }
207
208 BPF_CALL_4(bpf_skb_load_helper_8, const struct sk_buff *, skb, const void *,
209            data, int, headlen, int, offset)
210 {
211         u8 tmp, *ptr;
212         const int len = sizeof(tmp);
213
214         if (offset >= 0) {
215                 if (headlen - offset >= len)
216                         return *(u8 *)(data + offset);
217                 if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp)))
218                         return tmp;
219         } else {
220                 ptr = bpf_internal_load_pointer_neg_helper(skb, offset, len);
221                 if (likely(ptr))
222                         return *(u8 *)ptr;
223         }
224
225         return -EFAULT;
226 }
227
228 BPF_CALL_2(bpf_skb_load_helper_8_no_cache, const struct sk_buff *, skb,
229            int, offset)
230 {
231         return ____bpf_skb_load_helper_8(skb, skb->data, skb->len - skb->data_len,
232                                          offset);
233 }
234
235 BPF_CALL_4(bpf_skb_load_helper_16, const struct sk_buff *, skb, const void *,
236            data, int, headlen, int, offset)
237 {
238         u16 tmp, *ptr;
239         const int len = sizeof(tmp);
240
241         if (offset >= 0) {
242                 if (headlen - offset >= len)
243                         return get_unaligned_be16(data + offset);
244                 if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp)))
245                         return be16_to_cpu(tmp);
246         } else {
247                 ptr = bpf_internal_load_pointer_neg_helper(skb, offset, len);
248                 if (likely(ptr))
249                         return get_unaligned_be16(ptr);
250         }
251
252         return -EFAULT;
253 }
254
255 BPF_CALL_2(bpf_skb_load_helper_16_no_cache, const struct sk_buff *, skb,
256            int, offset)
257 {
258         return ____bpf_skb_load_helper_16(skb, skb->data, skb->len - skb->data_len,
259                                           offset);
260 }
261
262 BPF_CALL_4(bpf_skb_load_helper_32, const struct sk_buff *, skb, const void *,
263            data, int, headlen, int, offset)
264 {
265         u32 tmp, *ptr;
266         const int len = sizeof(tmp);
267
268         if (likely(offset >= 0)) {
269                 if (headlen - offset >= len)
270                         return get_unaligned_be32(data + offset);
271                 if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp)))
272                         return be32_to_cpu(tmp);
273         } else {
274                 ptr = bpf_internal_load_pointer_neg_helper(skb, offset, len);
275                 if (likely(ptr))
276                         return get_unaligned_be32(ptr);
277         }
278
279         return -EFAULT;
280 }
281
282 BPF_CALL_2(bpf_skb_load_helper_32_no_cache, const struct sk_buff *, skb,
283            int, offset)
284 {
285         return ____bpf_skb_load_helper_32(skb, skb->data, skb->len - skb->data_len,
286                                           offset);
287 }
288
289 static u32 convert_skb_access(int skb_field, int dst_reg, int src_reg,
290                               struct bpf_insn *insn_buf)
291 {
292         struct bpf_insn *insn = insn_buf;
293
294         switch (skb_field) {
295         case SKF_AD_MARK:
296                 BUILD_BUG_ON(sizeof_field(struct sk_buff, mark) != 4);
297
298                 *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
299                                       offsetof(struct sk_buff, mark));
300                 break;
301
302         case SKF_AD_PKTTYPE:
303                 *insn++ = BPF_LDX_MEM(BPF_B, dst_reg, src_reg, PKT_TYPE_OFFSET());
304                 *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, PKT_TYPE_MAX);
305 #ifdef __BIG_ENDIAN_BITFIELD
306                 *insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, 5);
307 #endif
308                 break;
309
310         case SKF_AD_QUEUE:
311                 BUILD_BUG_ON(sizeof_field(struct sk_buff, queue_mapping) != 2);
312
313                 *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
314                                       offsetof(struct sk_buff, queue_mapping));
315                 break;
316
317         case SKF_AD_VLAN_TAG:
318                 BUILD_BUG_ON(sizeof_field(struct sk_buff, vlan_tci) != 2);
319
320                 /* dst_reg = *(u16 *) (src_reg + offsetof(vlan_tci)) */
321                 *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
322                                       offsetof(struct sk_buff, vlan_tci));
323                 break;
324         case SKF_AD_VLAN_TAG_PRESENT:
325                 *insn++ = BPF_LDX_MEM(BPF_B, dst_reg, src_reg, PKT_VLAN_PRESENT_OFFSET());
326                 if (PKT_VLAN_PRESENT_BIT)
327                         *insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, PKT_VLAN_PRESENT_BIT);
328                 if (PKT_VLAN_PRESENT_BIT < 7)
329                         *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, 1);
330                 break;
331         }
332
333         return insn - insn_buf;
334 }
335
336 static bool convert_bpf_extensions(struct sock_filter *fp,
337                                    struct bpf_insn **insnp)
338 {
339         struct bpf_insn *insn = *insnp;
340         u32 cnt;
341
342         switch (fp->k) {
343         case SKF_AD_OFF + SKF_AD_PROTOCOL:
344                 BUILD_BUG_ON(sizeof_field(struct sk_buff, protocol) != 2);
345
346                 /* A = *(u16 *) (CTX + offsetof(protocol)) */
347                 *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
348                                       offsetof(struct sk_buff, protocol));
349                 /* A = ntohs(A) [emitting a nop or swap16] */
350                 *insn = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, 16);
351                 break;
352
353         case SKF_AD_OFF + SKF_AD_PKTTYPE:
354                 cnt = convert_skb_access(SKF_AD_PKTTYPE, BPF_REG_A, BPF_REG_CTX, insn);
355                 insn += cnt - 1;
356                 break;
357
358         case SKF_AD_OFF + SKF_AD_IFINDEX:
359         case SKF_AD_OFF + SKF_AD_HATYPE:
360                 BUILD_BUG_ON(sizeof_field(struct net_device, ifindex) != 4);
361                 BUILD_BUG_ON(sizeof_field(struct net_device, type) != 2);
362
363                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev),
364                                       BPF_REG_TMP, BPF_REG_CTX,
365                                       offsetof(struct sk_buff, dev));
366                 /* if (tmp != 0) goto pc + 1 */
367                 *insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_TMP, 0, 1);
368                 *insn++ = BPF_EXIT_INSN();
369                 if (fp->k == SKF_AD_OFF + SKF_AD_IFINDEX)
370                         *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_TMP,
371                                             offsetof(struct net_device, ifindex));
372                 else
373                         *insn = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_TMP,
374                                             offsetof(struct net_device, type));
375                 break;
376
377         case SKF_AD_OFF + SKF_AD_MARK:
378                 cnt = convert_skb_access(SKF_AD_MARK, BPF_REG_A, BPF_REG_CTX, insn);
379                 insn += cnt - 1;
380                 break;
381
382         case SKF_AD_OFF + SKF_AD_RXHASH:
383                 BUILD_BUG_ON(sizeof_field(struct sk_buff, hash) != 4);
384
385                 *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX,
386                                     offsetof(struct sk_buff, hash));
387                 break;
388
389         case SKF_AD_OFF + SKF_AD_QUEUE:
390                 cnt = convert_skb_access(SKF_AD_QUEUE, BPF_REG_A, BPF_REG_CTX, insn);
391                 insn += cnt - 1;
392                 break;
393
394         case SKF_AD_OFF + SKF_AD_VLAN_TAG:
395                 cnt = convert_skb_access(SKF_AD_VLAN_TAG,
396                                          BPF_REG_A, BPF_REG_CTX, insn);
397                 insn += cnt - 1;
398                 break;
399
400         case SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT:
401                 cnt = convert_skb_access(SKF_AD_VLAN_TAG_PRESENT,
402                                          BPF_REG_A, BPF_REG_CTX, insn);
403                 insn += cnt - 1;
404                 break;
405
406         case SKF_AD_OFF + SKF_AD_VLAN_TPID:
407                 BUILD_BUG_ON(sizeof_field(struct sk_buff, vlan_proto) != 2);
408
409                 /* A = *(u16 *) (CTX + offsetof(vlan_proto)) */
410                 *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
411                                       offsetof(struct sk_buff, vlan_proto));
412                 /* A = ntohs(A) [emitting a nop or swap16] */
413                 *insn = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, 16);
414                 break;
415
416         case SKF_AD_OFF + SKF_AD_PAY_OFFSET:
417         case SKF_AD_OFF + SKF_AD_NLATTR:
418         case SKF_AD_OFF + SKF_AD_NLATTR_NEST:
419         case SKF_AD_OFF + SKF_AD_CPU:
420         case SKF_AD_OFF + SKF_AD_RANDOM:
421                 /* arg1 = CTX */
422                 *insn++ = BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX);
423                 /* arg2 = A */
424                 *insn++ = BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_A);
425                 /* arg3 = X */
426                 *insn++ = BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_X);
427                 /* Emit call(arg1=CTX, arg2=A, arg3=X) */
428                 switch (fp->k) {
429                 case SKF_AD_OFF + SKF_AD_PAY_OFFSET:
430                         *insn = BPF_EMIT_CALL(bpf_skb_get_pay_offset);
431                         break;
432                 case SKF_AD_OFF + SKF_AD_NLATTR:
433                         *insn = BPF_EMIT_CALL(bpf_skb_get_nlattr);
434                         break;
435                 case SKF_AD_OFF + SKF_AD_NLATTR_NEST:
436                         *insn = BPF_EMIT_CALL(bpf_skb_get_nlattr_nest);
437                         break;
438                 case SKF_AD_OFF + SKF_AD_CPU:
439                         *insn = BPF_EMIT_CALL(bpf_get_raw_cpu_id);
440                         break;
441                 case SKF_AD_OFF + SKF_AD_RANDOM:
442                         *insn = BPF_EMIT_CALL(bpf_user_rnd_u32);
443                         bpf_user_rnd_init_once();
444                         break;
445                 }
446                 break;
447
448         case SKF_AD_OFF + SKF_AD_ALU_XOR_X:
449                 /* A ^= X */
450                 *insn = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_X);
451                 break;
452
453         default:
454                 /* This is just a dummy call to avoid letting the compiler
455                  * evict __bpf_call_base() as an optimization. Placed here
456                  * where no-one bothers.
457                  */
458                 BUG_ON(__bpf_call_base(0, 0, 0, 0, 0) != 0);
459                 return false;
460         }
461
462         *insnp = insn;
463         return true;
464 }
465
466 static bool convert_bpf_ld_abs(struct sock_filter *fp, struct bpf_insn **insnp)
467 {
468         const bool unaligned_ok = IS_BUILTIN(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS);
469         int size = bpf_size_to_bytes(BPF_SIZE(fp->code));
470         bool endian = BPF_SIZE(fp->code) == BPF_H ||
471                       BPF_SIZE(fp->code) == BPF_W;
472         bool indirect = BPF_MODE(fp->code) == BPF_IND;
473         const int ip_align = NET_IP_ALIGN;
474         struct bpf_insn *insn = *insnp;
475         int offset = fp->k;
476
477         if (!indirect &&
478             ((unaligned_ok && offset >= 0) ||
479              (!unaligned_ok && offset >= 0 &&
480               offset + ip_align >= 0 &&
481               offset + ip_align % size == 0))) {
482                 bool ldx_off_ok = offset <= S16_MAX;
483
484                 *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_H);
485                 if (offset)
486                         *insn++ = BPF_ALU64_IMM(BPF_SUB, BPF_REG_TMP, offset);
487                 *insn++ = BPF_JMP_IMM(BPF_JSLT, BPF_REG_TMP,
488                                       size, 2 + endian + (!ldx_off_ok * 2));
489                 if (ldx_off_ok) {
490                         *insn++ = BPF_LDX_MEM(BPF_SIZE(fp->code), BPF_REG_A,
491                                               BPF_REG_D, offset);
492                 } else {
493                         *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_D);
494                         *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_TMP, offset);
495                         *insn++ = BPF_LDX_MEM(BPF_SIZE(fp->code), BPF_REG_A,
496                                               BPF_REG_TMP, 0);
497                 }
498                 if (endian)
499                         *insn++ = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, size * 8);
500                 *insn++ = BPF_JMP_A(8);
501         }
502
503         *insn++ = BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX);
504         *insn++ = BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_D);
505         *insn++ = BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_H);
506         if (!indirect) {
507                 *insn++ = BPF_MOV64_IMM(BPF_REG_ARG4, offset);
508         } else {
509                 *insn++ = BPF_MOV64_REG(BPF_REG_ARG4, BPF_REG_X);
510                 if (fp->k)
511                         *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG4, offset);
512         }
513
514         switch (BPF_SIZE(fp->code)) {
515         case BPF_B:
516                 *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_8);
517                 break;
518         case BPF_H:
519                 *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_16);
520                 break;
521         case BPF_W:
522                 *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_32);
523                 break;
524         default:
525                 return false;
526         }
527
528         *insn++ = BPF_JMP_IMM(BPF_JSGE, BPF_REG_A, 0, 2);
529         *insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_A);
530         *insn   = BPF_EXIT_INSN();
531
532         *insnp = insn;
533         return true;
534 }
535
536 /**
537  *      bpf_convert_filter - convert filter program
538  *      @prog: the user passed filter program
539  *      @len: the length of the user passed filter program
540  *      @new_prog: allocated 'struct bpf_prog' or NULL
541  *      @new_len: pointer to store length of converted program
542  *      @seen_ld_abs: bool whether we've seen ld_abs/ind
543  *
544  * Remap 'sock_filter' style classic BPF (cBPF) instruction set to 'bpf_insn'
545  * style extended BPF (eBPF).
546  * Conversion workflow:
547  *
548  * 1) First pass for calculating the new program length:
549  *   bpf_convert_filter(old_prog, old_len, NULL, &new_len, &seen_ld_abs)
550  *
551  * 2) 2nd pass to remap in two passes: 1st pass finds new
552  *    jump offsets, 2nd pass remapping:
553  *   bpf_convert_filter(old_prog, old_len, new_prog, &new_len, &seen_ld_abs)
554  */
555 static int bpf_convert_filter(struct sock_filter *prog, int len,
556                               struct bpf_prog *new_prog, int *new_len,
557                               bool *seen_ld_abs)
558 {
559         int new_flen = 0, pass = 0, target, i, stack_off;
560         struct bpf_insn *new_insn, *first_insn = NULL;
561         struct sock_filter *fp;
562         int *addrs = NULL;
563         u8 bpf_src;
564
565         BUILD_BUG_ON(BPF_MEMWORDS * sizeof(u32) > MAX_BPF_STACK);
566         BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
567
568         if (len <= 0 || len > BPF_MAXINSNS)
569                 return -EINVAL;
570
571         if (new_prog) {
572                 first_insn = new_prog->insnsi;
573                 addrs = kcalloc(len, sizeof(*addrs),
574                                 GFP_KERNEL | __GFP_NOWARN);
575                 if (!addrs)
576                         return -ENOMEM;
577         }
578
579 do_pass:
580         new_insn = first_insn;
581         fp = prog;
582
583         /* Classic BPF related prologue emission. */
584         if (new_prog) {
585                 /* Classic BPF expects A and X to be reset first. These need
586                  * to be guaranteed to be the first two instructions.
587                  */
588                 *new_insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_A);
589                 *new_insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_X, BPF_REG_X);
590
591                 /* All programs must keep CTX in callee saved BPF_REG_CTX.
592                  * In eBPF case it's done by the compiler, here we need to
593                  * do this ourself. Initial CTX is present in BPF_REG_ARG1.
594                  */
595                 *new_insn++ = BPF_MOV64_REG(BPF_REG_CTX, BPF_REG_ARG1);
596                 if (*seen_ld_abs) {
597                         /* For packet access in classic BPF, cache skb->data
598                          * in callee-saved BPF R8 and skb->len - skb->data_len
599                          * (headlen) in BPF R9. Since classic BPF is read-only
600                          * on CTX, we only need to cache it once.
601                          */
602                         *new_insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, data),
603                                                   BPF_REG_D, BPF_REG_CTX,
604                                                   offsetof(struct sk_buff, data));
605                         *new_insn++ = BPF_LDX_MEM(BPF_W, BPF_REG_H, BPF_REG_CTX,
606                                                   offsetof(struct sk_buff, len));
607                         *new_insn++ = BPF_LDX_MEM(BPF_W, BPF_REG_TMP, BPF_REG_CTX,
608                                                   offsetof(struct sk_buff, data_len));
609                         *new_insn++ = BPF_ALU32_REG(BPF_SUB, BPF_REG_H, BPF_REG_TMP);
610                 }
611         } else {
612                 new_insn += 3;
613         }
614
615         for (i = 0; i < len; fp++, i++) {
616                 struct bpf_insn tmp_insns[32] = { };
617                 struct bpf_insn *insn = tmp_insns;
618
619                 if (addrs)
620                         addrs[i] = new_insn - first_insn;
621
622                 switch (fp->code) {
623                 /* All arithmetic insns and skb loads map as-is. */
624                 case BPF_ALU | BPF_ADD | BPF_X:
625                 case BPF_ALU | BPF_ADD | BPF_K:
626                 case BPF_ALU | BPF_SUB | BPF_X:
627                 case BPF_ALU | BPF_SUB | BPF_K:
628                 case BPF_ALU | BPF_AND | BPF_X:
629                 case BPF_ALU | BPF_AND | BPF_K:
630                 case BPF_ALU | BPF_OR | BPF_X:
631                 case BPF_ALU | BPF_OR | BPF_K:
632                 case BPF_ALU | BPF_LSH | BPF_X:
633                 case BPF_ALU | BPF_LSH | BPF_K:
634                 case BPF_ALU | BPF_RSH | BPF_X:
635                 case BPF_ALU | BPF_RSH | BPF_K:
636                 case BPF_ALU | BPF_XOR | BPF_X:
637                 case BPF_ALU | BPF_XOR | BPF_K:
638                 case BPF_ALU | BPF_MUL | BPF_X:
639                 case BPF_ALU | BPF_MUL | BPF_K:
640                 case BPF_ALU | BPF_DIV | BPF_X:
641                 case BPF_ALU | BPF_DIV | BPF_K:
642                 case BPF_ALU | BPF_MOD | BPF_X:
643                 case BPF_ALU | BPF_MOD | BPF_K:
644                 case BPF_ALU | BPF_NEG:
645                 case BPF_LD | BPF_ABS | BPF_W:
646                 case BPF_LD | BPF_ABS | BPF_H:
647                 case BPF_LD | BPF_ABS | BPF_B:
648                 case BPF_LD | BPF_IND | BPF_W:
649                 case BPF_LD | BPF_IND | BPF_H:
650                 case BPF_LD | BPF_IND | BPF_B:
651                         /* Check for overloaded BPF extension and
652                          * directly convert it if found, otherwise
653                          * just move on with mapping.
654                          */
655                         if (BPF_CLASS(fp->code) == BPF_LD &&
656                             BPF_MODE(fp->code) == BPF_ABS &&
657                             convert_bpf_extensions(fp, &insn))
658                                 break;
659                         if (BPF_CLASS(fp->code) == BPF_LD &&
660                             convert_bpf_ld_abs(fp, &insn)) {
661                                 *seen_ld_abs = true;
662                                 break;
663                         }
664
665                         if (fp->code == (BPF_ALU | BPF_DIV | BPF_X) ||
666                             fp->code == (BPF_ALU | BPF_MOD | BPF_X)) {
667                                 *insn++ = BPF_MOV32_REG(BPF_REG_X, BPF_REG_X);
668                                 /* Error with exception code on div/mod by 0.
669                                  * For cBPF programs, this was always return 0.
670                                  */
671                                 *insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_X, 0, 2);
672                                 *insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_A);
673                                 *insn++ = BPF_EXIT_INSN();
674                         }
675
676                         *insn = BPF_RAW_INSN(fp->code, BPF_REG_A, BPF_REG_X, 0, fp->k);
677                         break;
678
679                 /* Jump transformation cannot use BPF block macros
680                  * everywhere as offset calculation and target updates
681                  * require a bit more work than the rest, i.e. jump
682                  * opcodes map as-is, but offsets need adjustment.
683                  */
684
685 #define BPF_EMIT_JMP                                                    \
686         do {                                                            \
687                 const s32 off_min = S16_MIN, off_max = S16_MAX;         \
688                 s32 off;                                                \
689                                                                         \
690                 if (target >= len || target < 0)                        \
691                         goto err;                                       \
692                 off = addrs ? addrs[target] - addrs[i] - 1 : 0;         \
693                 /* Adjust pc relative offset for 2nd or 3rd insn. */    \
694                 off -= insn - tmp_insns;                                \
695                 /* Reject anything not fitting into insn->off. */       \
696                 if (off < off_min || off > off_max)                     \
697                         goto err;                                       \
698                 insn->off = off;                                        \
699         } while (0)
700
701                 case BPF_JMP | BPF_JA:
702                         target = i + fp->k + 1;
703                         insn->code = fp->code;
704                         BPF_EMIT_JMP;
705                         break;
706
707                 case BPF_JMP | BPF_JEQ | BPF_K:
708                 case BPF_JMP | BPF_JEQ | BPF_X:
709                 case BPF_JMP | BPF_JSET | BPF_K:
710                 case BPF_JMP | BPF_JSET | BPF_X:
711                 case BPF_JMP | BPF_JGT | BPF_K:
712                 case BPF_JMP | BPF_JGT | BPF_X:
713                 case BPF_JMP | BPF_JGE | BPF_K:
714                 case BPF_JMP | BPF_JGE | BPF_X:
715                         if (BPF_SRC(fp->code) == BPF_K && (int) fp->k < 0) {
716                                 /* BPF immediates are signed, zero extend
717                                  * immediate into tmp register and use it
718                                  * in compare insn.
719                                  */
720                                 *insn++ = BPF_MOV32_IMM(BPF_REG_TMP, fp->k);
721
722                                 insn->dst_reg = BPF_REG_A;
723                                 insn->src_reg = BPF_REG_TMP;
724                                 bpf_src = BPF_X;
725                         } else {
726                                 insn->dst_reg = BPF_REG_A;
727                                 insn->imm = fp->k;
728                                 bpf_src = BPF_SRC(fp->code);
729                                 insn->src_reg = bpf_src == BPF_X ? BPF_REG_X : 0;
730                         }
731
732                         /* Common case where 'jump_false' is next insn. */
733                         if (fp->jf == 0) {
734                                 insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src;
735                                 target = i + fp->jt + 1;
736                                 BPF_EMIT_JMP;
737                                 break;
738                         }
739
740                         /* Convert some jumps when 'jump_true' is next insn. */
741                         if (fp->jt == 0) {
742                                 switch (BPF_OP(fp->code)) {
743                                 case BPF_JEQ:
744                                         insn->code = BPF_JMP | BPF_JNE | bpf_src;
745                                         break;
746                                 case BPF_JGT:
747                                         insn->code = BPF_JMP | BPF_JLE | bpf_src;
748                                         break;
749                                 case BPF_JGE:
750                                         insn->code = BPF_JMP | BPF_JLT | bpf_src;
751                                         break;
752                                 default:
753                                         goto jmp_rest;
754                                 }
755
756                                 target = i + fp->jf + 1;
757                                 BPF_EMIT_JMP;
758                                 break;
759                         }
760 jmp_rest:
761                         /* Other jumps are mapped into two insns: Jxx and JA. */
762                         target = i + fp->jt + 1;
763                         insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src;
764                         BPF_EMIT_JMP;
765                         insn++;
766
767                         insn->code = BPF_JMP | BPF_JA;
768                         target = i + fp->jf + 1;
769                         BPF_EMIT_JMP;
770                         break;
771
772                 /* ldxb 4 * ([14] & 0xf) is remaped into 6 insns. */
773                 case BPF_LDX | BPF_MSH | BPF_B: {
774                         struct sock_filter tmp = {
775                                 .code   = BPF_LD | BPF_ABS | BPF_B,
776                                 .k      = fp->k,
777                         };
778
779                         *seen_ld_abs = true;
780
781                         /* X = A */
782                         *insn++ = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A);
783                         /* A = BPF_R0 = *(u8 *) (skb->data + K) */
784                         convert_bpf_ld_abs(&tmp, &insn);
785                         insn++;
786                         /* A &= 0xf */
787                         *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 0xf);
788                         /* A <<= 2 */
789                         *insn++ = BPF_ALU32_IMM(BPF_LSH, BPF_REG_A, 2);
790                         /* tmp = X */
791                         *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_X);
792                         /* X = A */
793                         *insn++ = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A);
794                         /* A = tmp */
795                         *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_TMP);
796                         break;
797                 }
798                 /* RET_K is remaped into 2 insns. RET_A case doesn't need an
799                  * extra mov as BPF_REG_0 is already mapped into BPF_REG_A.
800                  */
801                 case BPF_RET | BPF_A:
802                 case BPF_RET | BPF_K:
803                         if (BPF_RVAL(fp->code) == BPF_K)
804                                 *insn++ = BPF_MOV32_RAW(BPF_K, BPF_REG_0,
805                                                         0, fp->k);
806                         *insn = BPF_EXIT_INSN();
807                         break;
808
809                 /* Store to stack. */
810                 case BPF_ST:
811                 case BPF_STX:
812                         stack_off = fp->k * 4  + 4;
813                         *insn = BPF_STX_MEM(BPF_W, BPF_REG_FP, BPF_CLASS(fp->code) ==
814                                             BPF_ST ? BPF_REG_A : BPF_REG_X,
815                                             -stack_off);
816                         /* check_load_and_stores() verifies that classic BPF can
817                          * load from stack only after write, so tracking
818                          * stack_depth for ST|STX insns is enough
819                          */
820                         if (new_prog && new_prog->aux->stack_depth < stack_off)
821                                 new_prog->aux->stack_depth = stack_off;
822                         break;
823
824                 /* Load from stack. */
825                 case BPF_LD | BPF_MEM:
826                 case BPF_LDX | BPF_MEM:
827                         stack_off = fp->k * 4  + 4;
828                         *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD  ?
829                                             BPF_REG_A : BPF_REG_X, BPF_REG_FP,
830                                             -stack_off);
831                         break;
832
833                 /* A = K or X = K */
834                 case BPF_LD | BPF_IMM:
835                 case BPF_LDX | BPF_IMM:
836                         *insn = BPF_MOV32_IMM(BPF_CLASS(fp->code) == BPF_LD ?
837                                               BPF_REG_A : BPF_REG_X, fp->k);
838                         break;
839
840                 /* X = A */
841                 case BPF_MISC | BPF_TAX:
842                         *insn = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A);
843                         break;
844
845                 /* A = X */
846                 case BPF_MISC | BPF_TXA:
847                         *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_X);
848                         break;
849
850                 /* A = skb->len or X = skb->len */
851                 case BPF_LD | BPF_W | BPF_LEN:
852                 case BPF_LDX | BPF_W | BPF_LEN:
853                         *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ?
854                                             BPF_REG_A : BPF_REG_X, BPF_REG_CTX,
855                                             offsetof(struct sk_buff, len));
856                         break;
857
858                 /* Access seccomp_data fields. */
859                 case BPF_LDX | BPF_ABS | BPF_W:
860                         /* A = *(u32 *) (ctx + K) */
861                         *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX, fp->k);
862                         break;
863
864                 /* Unknown instruction. */
865                 default:
866                         goto err;
867                 }
868
869                 insn++;
870                 if (new_prog)
871                         memcpy(new_insn, tmp_insns,
872                                sizeof(*insn) * (insn - tmp_insns));
873                 new_insn += insn - tmp_insns;
874         }
875
876         if (!new_prog) {
877                 /* Only calculating new length. */
878                 *new_len = new_insn - first_insn;
879                 if (*seen_ld_abs)
880                         *new_len += 4; /* Prologue bits. */
881                 return 0;
882         }
883
884         pass++;
885         if (new_flen != new_insn - first_insn) {
886                 new_flen = new_insn - first_insn;
887                 if (pass > 2)
888                         goto err;
889                 goto do_pass;
890         }
891
892         kfree(addrs);
893         BUG_ON(*new_len != new_flen);
894         return 0;
895 err:
896         kfree(addrs);
897         return -EINVAL;
898 }
899
900 /* Security:
901  *
902  * As we dont want to clear mem[] array for each packet going through
903  * __bpf_prog_run(), we check that filter loaded by user never try to read
904  * a cell if not previously written, and we check all branches to be sure
905  * a malicious user doesn't try to abuse us.
906  */
907 static int check_load_and_stores(const struct sock_filter *filter, int flen)
908 {
909         u16 *masks, memvalid = 0; /* One bit per cell, 16 cells */
910         int pc, ret = 0;
911
912         BUILD_BUG_ON(BPF_MEMWORDS > 16);
913
914         masks = kmalloc_array(flen, sizeof(*masks), GFP_KERNEL);
915         if (!masks)
916                 return -ENOMEM;
917
918         memset(masks, 0xff, flen * sizeof(*masks));
919
920         for (pc = 0; pc < flen; pc++) {
921                 memvalid &= masks[pc];
922
923                 switch (filter[pc].code) {
924                 case BPF_ST:
925                 case BPF_STX:
926                         memvalid |= (1 << filter[pc].k);
927                         break;
928                 case BPF_LD | BPF_MEM:
929                 case BPF_LDX | BPF_MEM:
930                         if (!(memvalid & (1 << filter[pc].k))) {
931                                 ret = -EINVAL;
932                                 goto error;
933                         }
934                         break;
935                 case BPF_JMP | BPF_JA:
936                         /* A jump must set masks on target */
937                         masks[pc + 1 + filter[pc].k] &= memvalid;
938                         memvalid = ~0;
939                         break;
940                 case BPF_JMP | BPF_JEQ | BPF_K:
941                 case BPF_JMP | BPF_JEQ | BPF_X:
942                 case BPF_JMP | BPF_JGE | BPF_K:
943                 case BPF_JMP | BPF_JGE | BPF_X:
944                 case BPF_JMP | BPF_JGT | BPF_K:
945                 case BPF_JMP | BPF_JGT | BPF_X:
946                 case BPF_JMP | BPF_JSET | BPF_K:
947                 case BPF_JMP | BPF_JSET | BPF_X:
948                         /* A jump must set masks on targets */
949                         masks[pc + 1 + filter[pc].jt] &= memvalid;
950                         masks[pc + 1 + filter[pc].jf] &= memvalid;
951                         memvalid = ~0;
952                         break;
953                 }
954         }
955 error:
956         kfree(masks);
957         return ret;
958 }
959
960 static bool chk_code_allowed(u16 code_to_probe)
961 {
962         static const bool codes[] = {
963                 /* 32 bit ALU operations */
964                 [BPF_ALU | BPF_ADD | BPF_K] = true,
965                 [BPF_ALU | BPF_ADD | BPF_X] = true,
966                 [BPF_ALU | BPF_SUB | BPF_K] = true,
967                 [BPF_ALU | BPF_SUB | BPF_X] = true,
968                 [BPF_ALU | BPF_MUL | BPF_K] = true,
969                 [BPF_ALU | BPF_MUL | BPF_X] = true,
970                 [BPF_ALU | BPF_DIV | BPF_K] = true,
971                 [BPF_ALU | BPF_DIV | BPF_X] = true,
972                 [BPF_ALU | BPF_MOD | BPF_K] = true,
973                 [BPF_ALU | BPF_MOD | BPF_X] = true,
974                 [BPF_ALU | BPF_AND | BPF_K] = true,
975                 [BPF_ALU | BPF_AND | BPF_X] = true,
976                 [BPF_ALU | BPF_OR | BPF_K] = true,
977                 [BPF_ALU | BPF_OR | BPF_X] = true,
978                 [BPF_ALU | BPF_XOR | BPF_K] = true,
979                 [BPF_ALU | BPF_XOR | BPF_X] = true,
980                 [BPF_ALU | BPF_LSH | BPF_K] = true,
981                 [BPF_ALU | BPF_LSH | BPF_X] = true,
982                 [BPF_ALU | BPF_RSH | BPF_K] = true,
983                 [BPF_ALU | BPF_RSH | BPF_X] = true,
984                 [BPF_ALU | BPF_NEG] = true,
985                 /* Load instructions */
986                 [BPF_LD | BPF_W | BPF_ABS] = true,
987                 [BPF_LD | BPF_H | BPF_ABS] = true,
988                 [BPF_LD | BPF_B | BPF_ABS] = true,
989                 [BPF_LD | BPF_W | BPF_LEN] = true,
990                 [BPF_LD | BPF_W | BPF_IND] = true,
991                 [BPF_LD | BPF_H | BPF_IND] = true,
992                 [BPF_LD | BPF_B | BPF_IND] = true,
993                 [BPF_LD | BPF_IMM] = true,
994                 [BPF_LD | BPF_MEM] = true,
995                 [BPF_LDX | BPF_W | BPF_LEN] = true,
996                 [BPF_LDX | BPF_B | BPF_MSH] = true,
997                 [BPF_LDX | BPF_IMM] = true,
998                 [BPF_LDX | BPF_MEM] = true,
999                 /* Store instructions */
1000                 [BPF_ST] = true,
1001                 [BPF_STX] = true,
1002                 /* Misc instructions */
1003                 [BPF_MISC | BPF_TAX] = true,
1004                 [BPF_MISC | BPF_TXA] = true,
1005                 /* Return instructions */
1006                 [BPF_RET | BPF_K] = true,
1007                 [BPF_RET | BPF_A] = true,
1008                 /* Jump instructions */
1009                 [BPF_JMP | BPF_JA] = true,
1010                 [BPF_JMP | BPF_JEQ | BPF_K] = true,
1011                 [BPF_JMP | BPF_JEQ | BPF_X] = true,
1012                 [BPF_JMP | BPF_JGE | BPF_K] = true,
1013                 [BPF_JMP | BPF_JGE | BPF_X] = true,
1014                 [BPF_JMP | BPF_JGT | BPF_K] = true,
1015                 [BPF_JMP | BPF_JGT | BPF_X] = true,
1016                 [BPF_JMP | BPF_JSET | BPF_K] = true,
1017                 [BPF_JMP | BPF_JSET | BPF_X] = true,
1018         };
1019
1020         if (code_to_probe >= ARRAY_SIZE(codes))
1021                 return false;
1022
1023         return codes[code_to_probe];
1024 }
1025
1026 static bool bpf_check_basics_ok(const struct sock_filter *filter,
1027                                 unsigned int flen)
1028 {
1029         if (filter == NULL)
1030                 return false;
1031         if (flen == 0 || flen > BPF_MAXINSNS)
1032                 return false;
1033
1034         return true;
1035 }
1036
1037 /**
1038  *      bpf_check_classic - verify socket filter code
1039  *      @filter: filter to verify
1040  *      @flen: length of filter
1041  *
1042  * Check the user's filter code. If we let some ugly
1043  * filter code slip through kaboom! The filter must contain
1044  * no references or jumps that are out of range, no illegal
1045  * instructions, and must end with a RET instruction.
1046  *
1047  * All jumps are forward as they are not signed.
1048  *
1049  * Returns 0 if the rule set is legal or -EINVAL if not.
1050  */
1051 static int bpf_check_classic(const struct sock_filter *filter,
1052                              unsigned int flen)
1053 {
1054         bool anc_found;
1055         int pc;
1056
1057         /* Check the filter code now */
1058         for (pc = 0; pc < flen; pc++) {
1059                 const struct sock_filter *ftest = &filter[pc];
1060
1061                 /* May we actually operate on this code? */
1062                 if (!chk_code_allowed(ftest->code))
1063                         return -EINVAL;
1064
1065                 /* Some instructions need special checks */
1066                 switch (ftest->code) {
1067                 case BPF_ALU | BPF_DIV | BPF_K:
1068                 case BPF_ALU | BPF_MOD | BPF_K:
1069                         /* Check for division by zero */
1070                         if (ftest->k == 0)
1071                                 return -EINVAL;
1072                         break;
1073                 case BPF_ALU | BPF_LSH | BPF_K:
1074                 case BPF_ALU | BPF_RSH | BPF_K:
1075                         if (ftest->k >= 32)
1076                                 return -EINVAL;
1077                         break;
1078                 case BPF_LD | BPF_MEM:
1079                 case BPF_LDX | BPF_MEM:
1080                 case BPF_ST:
1081                 case BPF_STX:
1082                         /* Check for invalid memory addresses */
1083                         if (ftest->k >= BPF_MEMWORDS)
1084                                 return -EINVAL;
1085                         break;
1086                 case BPF_JMP | BPF_JA:
1087                         /* Note, the large ftest->k might cause loops.
1088                          * Compare this with conditional jumps below,
1089                          * where offsets are limited. --ANK (981016)
1090                          */
1091                         if (ftest->k >= (unsigned int)(flen - pc - 1))
1092                                 return -EINVAL;
1093                         break;
1094                 case BPF_JMP | BPF_JEQ | BPF_K:
1095                 case BPF_JMP | BPF_JEQ | BPF_X:
1096                 case BPF_JMP | BPF_JGE | BPF_K:
1097                 case BPF_JMP | BPF_JGE | BPF_X:
1098                 case BPF_JMP | BPF_JGT | BPF_K:
1099                 case BPF_JMP | BPF_JGT | BPF_X:
1100                 case BPF_JMP | BPF_JSET | BPF_K:
1101                 case BPF_JMP | BPF_JSET | BPF_X:
1102                         /* Both conditionals must be safe */
1103                         if (pc + ftest->jt + 1 >= flen ||
1104                             pc + ftest->jf + 1 >= flen)
1105                                 return -EINVAL;
1106                         break;
1107                 case BPF_LD | BPF_W | BPF_ABS:
1108                 case BPF_LD | BPF_H | BPF_ABS:
1109                 case BPF_LD | BPF_B | BPF_ABS:
1110                         anc_found = false;
1111                         if (bpf_anc_helper(ftest) & BPF_ANC)
1112                                 anc_found = true;
1113                         /* Ancillary operation unknown or unsupported */
1114                         if (anc_found == false && ftest->k >= SKF_AD_OFF)
1115                                 return -EINVAL;
1116                 }
1117         }
1118
1119         /* Last instruction must be a RET code */
1120         switch (filter[flen - 1].code) {
1121         case BPF_RET | BPF_K:
1122         case BPF_RET | BPF_A:
1123                 return check_load_and_stores(filter, flen);
1124         }
1125
1126         return -EINVAL;
1127 }
1128
1129 static int bpf_prog_store_orig_filter(struct bpf_prog *fp,
1130                                       const struct sock_fprog *fprog)
1131 {
1132         unsigned int fsize = bpf_classic_proglen(fprog);
1133         struct sock_fprog_kern *fkprog;
1134
1135         fp->orig_prog = kmalloc(sizeof(*fkprog), GFP_KERNEL);
1136         if (!fp->orig_prog)
1137                 return -ENOMEM;
1138
1139         fkprog = fp->orig_prog;
1140         fkprog->len = fprog->len;
1141
1142         fkprog->filter = kmemdup(fp->insns, fsize,
1143                                  GFP_KERNEL | __GFP_NOWARN);
1144         if (!fkprog->filter) {
1145                 kfree(fp->orig_prog);
1146                 return -ENOMEM;
1147         }
1148
1149         return 0;
1150 }
1151
1152 static void bpf_release_orig_filter(struct bpf_prog *fp)
1153 {
1154         struct sock_fprog_kern *fprog = fp->orig_prog;
1155
1156         if (fprog) {
1157                 kfree(fprog->filter);
1158                 kfree(fprog);
1159         }
1160 }
1161
1162 static void __bpf_prog_release(struct bpf_prog *prog)
1163 {
1164         if (prog->type == BPF_PROG_TYPE_SOCKET_FILTER) {
1165                 bpf_prog_put(prog);
1166         } else {
1167                 bpf_release_orig_filter(prog);
1168                 bpf_prog_free(prog);
1169         }
1170 }
1171
1172 static void __sk_filter_release(struct sk_filter *fp)
1173 {
1174         __bpf_prog_release(fp->prog);
1175         kfree(fp);
1176 }
1177
1178 /**
1179  *      sk_filter_release_rcu - Release a socket filter by rcu_head
1180  *      @rcu: rcu_head that contains the sk_filter to free
1181  */
1182 static void sk_filter_release_rcu(struct rcu_head *rcu)
1183 {
1184         struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);
1185
1186         __sk_filter_release(fp);
1187 }
1188
1189 /**
1190  *      sk_filter_release - release a socket filter
1191  *      @fp: filter to remove
1192  *
1193  *      Remove a filter from a socket and release its resources.
1194  */
1195 static void sk_filter_release(struct sk_filter *fp)
1196 {
1197         if (refcount_dec_and_test(&fp->refcnt))
1198                 call_rcu(&fp->rcu, sk_filter_release_rcu);
1199 }
1200
1201 void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
1202 {
1203         u32 filter_size = bpf_prog_size(fp->prog->len);
1204
1205         atomic_sub(filter_size, &sk->sk_omem_alloc);
1206         sk_filter_release(fp);
1207 }
1208
1209 /* try to charge the socket memory if there is space available
1210  * return true on success
1211  */
1212 static bool __sk_filter_charge(struct sock *sk, struct sk_filter *fp)
1213 {
1214         u32 filter_size = bpf_prog_size(fp->prog->len);
1215
1216         /* same check as in sock_kmalloc() */
1217         if (filter_size <= sysctl_optmem_max &&
1218             atomic_read(&sk->sk_omem_alloc) + filter_size < sysctl_optmem_max) {
1219                 atomic_add(filter_size, &sk->sk_omem_alloc);
1220                 return true;
1221         }
1222         return false;
1223 }
1224
1225 bool sk_filter_charge(struct sock *sk, struct sk_filter *fp)
1226 {
1227         if (!refcount_inc_not_zero(&fp->refcnt))
1228                 return false;
1229
1230         if (!__sk_filter_charge(sk, fp)) {
1231                 sk_filter_release(fp);
1232                 return false;
1233         }
1234         return true;
1235 }
1236
1237 static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp)
1238 {
1239         struct sock_filter *old_prog;
1240         struct bpf_prog *old_fp;
1241         int err, new_len, old_len = fp->len;
1242         bool seen_ld_abs = false;
1243
1244         /* We are free to overwrite insns et al right here as it
1245          * won't be used at this point in time anymore internally
1246          * after the migration to the internal BPF instruction
1247          * representation.
1248          */
1249         BUILD_BUG_ON(sizeof(struct sock_filter) !=
1250                      sizeof(struct bpf_insn));
1251
1252         /* Conversion cannot happen on overlapping memory areas,
1253          * so we need to keep the user BPF around until the 2nd
1254          * pass. At this time, the user BPF is stored in fp->insns.
1255          */
1256         old_prog = kmemdup(fp->insns, old_len * sizeof(struct sock_filter),
1257                            GFP_KERNEL | __GFP_NOWARN);
1258         if (!old_prog) {
1259                 err = -ENOMEM;
1260                 goto out_err;
1261         }
1262
1263         /* 1st pass: calculate the new program length. */
1264         err = bpf_convert_filter(old_prog, old_len, NULL, &new_len,
1265                                  &seen_ld_abs);
1266         if (err)
1267                 goto out_err_free;
1268
1269         /* Expand fp for appending the new filter representation. */
1270         old_fp = fp;
1271         fp = bpf_prog_realloc(old_fp, bpf_prog_size(new_len), 0);
1272         if (!fp) {
1273                 /* The old_fp is still around in case we couldn't
1274                  * allocate new memory, so uncharge on that one.
1275                  */
1276                 fp = old_fp;
1277                 err = -ENOMEM;
1278                 goto out_err_free;
1279         }
1280
1281         fp->len = new_len;
1282
1283         /* 2nd pass: remap sock_filter insns into bpf_insn insns. */
1284         err = bpf_convert_filter(old_prog, old_len, fp, &new_len,
1285                                  &seen_ld_abs);
1286         if (err)
1287                 /* 2nd bpf_convert_filter() can fail only if it fails
1288                  * to allocate memory, remapping must succeed. Note,
1289                  * that at this time old_fp has already been released
1290                  * by krealloc().
1291                  */
1292                 goto out_err_free;
1293
1294         fp = bpf_prog_select_runtime(fp, &err);
1295         if (err)
1296                 goto out_err_free;
1297
1298         kfree(old_prog);
1299         return fp;
1300
1301 out_err_free:
1302         kfree(old_prog);
1303 out_err:
1304         __bpf_prog_release(fp);
1305         return ERR_PTR(err);
1306 }
1307
1308 static struct bpf_prog *bpf_prepare_filter(struct bpf_prog *fp,
1309                                            bpf_aux_classic_check_t trans)
1310 {
1311         int err;
1312
1313         fp->bpf_func = NULL;
1314         fp->jited = 0;
1315
1316         err = bpf_check_classic(fp->insns, fp->len);
1317         if (err) {
1318                 __bpf_prog_release(fp);
1319                 return ERR_PTR(err);
1320         }
1321
1322         /* There might be additional checks and transformations
1323          * needed on classic filters, f.e. in case of seccomp.
1324          */
1325         if (trans) {
1326                 err = trans(fp->insns, fp->len);
1327                 if (err) {
1328                         __bpf_prog_release(fp);
1329                         return ERR_PTR(err);
1330                 }
1331         }
1332
1333         /* Probe if we can JIT compile the filter and if so, do
1334          * the compilation of the filter.
1335          */
1336         bpf_jit_compile(fp);
1337
1338         /* JIT compiler couldn't process this filter, so do the
1339          * internal BPF translation for the optimized interpreter.
1340          */
1341         if (!fp->jited)
1342                 fp = bpf_migrate_filter(fp);
1343
1344         return fp;
1345 }
1346
1347 /**
1348  *      bpf_prog_create - create an unattached filter
1349  *      @pfp: the unattached filter that is created
1350  *      @fprog: the filter program
1351  *
1352  * Create a filter independent of any socket. We first run some
1353  * sanity checks on it to make sure it does not explode on us later.
1354  * If an error occurs or there is insufficient memory for the filter
1355  * a negative errno code is returned. On success the return is zero.
1356  */
1357 int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog)
1358 {
1359         unsigned int fsize = bpf_classic_proglen(fprog);
1360         struct bpf_prog *fp;
1361
1362         /* Make sure new filter is there and in the right amounts. */
1363         if (!bpf_check_basics_ok(fprog->filter, fprog->len))
1364                 return -EINVAL;
1365
1366         fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0);
1367         if (!fp)
1368                 return -ENOMEM;
1369
1370         memcpy(fp->insns, fprog->filter, fsize);
1371
1372         fp->len = fprog->len;
1373         /* Since unattached filters are not copied back to user
1374          * space through sk_get_filter(), we do not need to hold
1375          * a copy here, and can spare us the work.
1376          */
1377         fp->orig_prog = NULL;
1378
1379         /* bpf_prepare_filter() already takes care of freeing
1380          * memory in case something goes wrong.
1381          */
1382         fp = bpf_prepare_filter(fp, NULL);
1383         if (IS_ERR(fp))
1384                 return PTR_ERR(fp);
1385
1386         *pfp = fp;
1387         return 0;
1388 }
1389 EXPORT_SYMBOL_GPL(bpf_prog_create);
1390
1391 /**
1392  *      bpf_prog_create_from_user - create an unattached filter from user buffer
1393  *      @pfp: the unattached filter that is created
1394  *      @fprog: the filter program
1395  *      @trans: post-classic verifier transformation handler
1396  *      @save_orig: save classic BPF program
1397  *
1398  * This function effectively does the same as bpf_prog_create(), only
1399  * that it builds up its insns buffer from user space provided buffer.
1400  * It also allows for passing a bpf_aux_classic_check_t handler.
1401  */
1402 int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog,
1403                               bpf_aux_classic_check_t trans, bool save_orig)
1404 {
1405         unsigned int fsize = bpf_classic_proglen(fprog);
1406         struct bpf_prog *fp;
1407         int err;
1408
1409         /* Make sure new filter is there and in the right amounts. */
1410         if (!bpf_check_basics_ok(fprog->filter, fprog->len))
1411                 return -EINVAL;
1412
1413         fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0);
1414         if (!fp)
1415                 return -ENOMEM;
1416
1417         if (copy_from_user(fp->insns, fprog->filter, fsize)) {
1418                 __bpf_prog_free(fp);
1419                 return -EFAULT;
1420         }
1421
1422         fp->len = fprog->len;
1423         fp->orig_prog = NULL;
1424
1425         if (save_orig) {
1426                 err = bpf_prog_store_orig_filter(fp, fprog);
1427                 if (err) {
1428                         __bpf_prog_free(fp);
1429                         return -ENOMEM;
1430                 }
1431         }
1432
1433         /* bpf_prepare_filter() already takes care of freeing
1434          * memory in case something goes wrong.
1435          */
1436         fp = bpf_prepare_filter(fp, trans);
1437         if (IS_ERR(fp))
1438                 return PTR_ERR(fp);
1439
1440         *pfp = fp;
1441         return 0;
1442 }
1443 EXPORT_SYMBOL_GPL(bpf_prog_create_from_user);
1444
1445 void bpf_prog_destroy(struct bpf_prog *fp)
1446 {
1447         __bpf_prog_release(fp);
1448 }
1449 EXPORT_SYMBOL_GPL(bpf_prog_destroy);
1450
1451 static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk)
1452 {
1453         struct sk_filter *fp, *old_fp;
1454
1455         fp = kmalloc(sizeof(*fp), GFP_KERNEL);
1456         if (!fp)
1457                 return -ENOMEM;
1458
1459         fp->prog = prog;
1460
1461         if (!__sk_filter_charge(sk, fp)) {
1462                 kfree(fp);
1463                 return -ENOMEM;
1464         }
1465         refcount_set(&fp->refcnt, 1);
1466
1467         old_fp = rcu_dereference_protected(sk->sk_filter,
1468                                            lockdep_sock_is_held(sk));
1469         rcu_assign_pointer(sk->sk_filter, fp);
1470
1471         if (old_fp)
1472                 sk_filter_uncharge(sk, old_fp);
1473
1474         return 0;
1475 }
1476
1477 static
1478 struct bpf_prog *__get_filter(struct sock_fprog *fprog, struct sock *sk)
1479 {
1480         unsigned int fsize = bpf_classic_proglen(fprog);
1481         struct bpf_prog *prog;
1482         int err;
1483
1484         if (sock_flag(sk, SOCK_FILTER_LOCKED))
1485                 return ERR_PTR(-EPERM);
1486
1487         /* Make sure new filter is there and in the right amounts. */
1488         if (!bpf_check_basics_ok(fprog->filter, fprog->len))
1489                 return ERR_PTR(-EINVAL);
1490
1491         prog = bpf_prog_alloc(bpf_prog_size(fprog->len), 0);
1492         if (!prog)
1493                 return ERR_PTR(-ENOMEM);
1494
1495         if (copy_from_user(prog->insns, fprog->filter, fsize)) {
1496                 __bpf_prog_free(prog);
1497                 return ERR_PTR(-EFAULT);
1498         }
1499
1500         prog->len = fprog->len;
1501
1502         err = bpf_prog_store_orig_filter(prog, fprog);
1503         if (err) {
1504                 __bpf_prog_free(prog);
1505                 return ERR_PTR(-ENOMEM);
1506         }
1507
1508         /* bpf_prepare_filter() already takes care of freeing
1509          * memory in case something goes wrong.
1510          */
1511         return bpf_prepare_filter(prog, NULL);
1512 }
1513
1514 /**
1515  *      sk_attach_filter - attach a socket filter
1516  *      @fprog: the filter program
1517  *      @sk: the socket to use
1518  *
1519  * Attach the user's filter code. We first run some sanity checks on
1520  * it to make sure it does not explode on us later. If an error
1521  * occurs or there is insufficient memory for the filter a negative
1522  * errno code is returned. On success the return is zero.
1523  */
1524 int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
1525 {
1526         struct bpf_prog *prog = __get_filter(fprog, sk);
1527         int err;
1528
1529         if (IS_ERR(prog))
1530                 return PTR_ERR(prog);
1531
1532         err = __sk_attach_prog(prog, sk);
1533         if (err < 0) {
1534                 __bpf_prog_release(prog);
1535                 return err;
1536         }
1537
1538         return 0;
1539 }
1540 EXPORT_SYMBOL_GPL(sk_attach_filter);
1541
1542 int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk)
1543 {
1544         struct bpf_prog *prog = __get_filter(fprog, sk);
1545         int err;
1546
1547         if (IS_ERR(prog))
1548                 return PTR_ERR(prog);
1549
1550         if (bpf_prog_size(prog->len) > sysctl_optmem_max)
1551                 err = -ENOMEM;
1552         else
1553                 err = reuseport_attach_prog(sk, prog);
1554
1555         if (err)
1556                 __bpf_prog_release(prog);
1557
1558         return err;
1559 }
1560
1561 static struct bpf_prog *__get_bpf(u32 ufd, struct sock *sk)
1562 {
1563         if (sock_flag(sk, SOCK_FILTER_LOCKED))
1564                 return ERR_PTR(-EPERM);
1565
1566         return bpf_prog_get_type(ufd, BPF_PROG_TYPE_SOCKET_FILTER);
1567 }
1568
1569 int sk_attach_bpf(u32 ufd, struct sock *sk)
1570 {
1571         struct bpf_prog *prog = __get_bpf(ufd, sk);
1572         int err;
1573
1574         if (IS_ERR(prog))
1575                 return PTR_ERR(prog);
1576
1577         err = __sk_attach_prog(prog, sk);
1578         if (err < 0) {
1579                 bpf_prog_put(prog);
1580                 return err;
1581         }
1582
1583         return 0;
1584 }
1585
1586 int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk)
1587 {
1588         struct bpf_prog *prog;
1589         int err;
1590
1591         if (sock_flag(sk, SOCK_FILTER_LOCKED))
1592                 return -EPERM;
1593
1594         prog = bpf_prog_get_type(ufd, BPF_PROG_TYPE_SOCKET_FILTER);
1595         if (PTR_ERR(prog) == -EINVAL)
1596                 prog = bpf_prog_get_type(ufd, BPF_PROG_TYPE_SK_REUSEPORT);
1597         if (IS_ERR(prog))
1598                 return PTR_ERR(prog);
1599
1600         if (prog->type == BPF_PROG_TYPE_SK_REUSEPORT) {
1601                 /* Like other non BPF_PROG_TYPE_SOCKET_FILTER
1602                  * bpf prog (e.g. sockmap).  It depends on the
1603                  * limitation imposed by bpf_prog_load().
1604                  * Hence, sysctl_optmem_max is not checked.
1605                  */
1606                 if ((sk->sk_type != SOCK_STREAM &&
1607                      sk->sk_type != SOCK_DGRAM) ||
1608                     (sk->sk_protocol != IPPROTO_UDP &&
1609                      sk->sk_protocol != IPPROTO_TCP) ||
1610                     (sk->sk_family != AF_INET &&
1611                      sk->sk_family != AF_INET6)) {
1612                         err = -ENOTSUPP;
1613                         goto err_prog_put;
1614                 }
1615         } else {
1616                 /* BPF_PROG_TYPE_SOCKET_FILTER */
1617                 if (bpf_prog_size(prog->len) > sysctl_optmem_max) {
1618                         err = -ENOMEM;
1619                         goto err_prog_put;
1620                 }
1621         }
1622
1623         err = reuseport_attach_prog(sk, prog);
1624 err_prog_put:
1625         if (err)
1626                 bpf_prog_put(prog);
1627
1628         return err;
1629 }
1630
1631 void sk_reuseport_prog_free(struct bpf_prog *prog)
1632 {
1633         if (!prog)
1634                 return;
1635
1636         if (prog->type == BPF_PROG_TYPE_SK_REUSEPORT)
1637                 bpf_prog_put(prog);
1638         else
1639                 bpf_prog_destroy(prog);
1640 }
1641
1642 struct bpf_scratchpad {
1643         union {
1644                 __be32 diff[MAX_BPF_STACK / sizeof(__be32)];
1645                 u8     buff[MAX_BPF_STACK];
1646         };
1647 };
1648
1649 static DEFINE_PER_CPU(struct bpf_scratchpad, bpf_sp);
1650
1651 static inline int __bpf_try_make_writable(struct sk_buff *skb,
1652                                           unsigned int write_len)
1653 {
1654         return skb_ensure_writable(skb, write_len);
1655 }
1656
1657 static inline int bpf_try_make_writable(struct sk_buff *skb,
1658                                         unsigned int write_len)
1659 {
1660         int err = __bpf_try_make_writable(skb, write_len);
1661
1662         bpf_compute_data_pointers(skb);
1663         return err;
1664 }
1665
1666 static int bpf_try_make_head_writable(struct sk_buff *skb)
1667 {
1668         return bpf_try_make_writable(skb, skb_headlen(skb));
1669 }
1670
1671 static inline void bpf_push_mac_rcsum(struct sk_buff *skb)
1672 {
1673         if (skb_at_tc_ingress(skb))
1674                 skb_postpush_rcsum(skb, skb_mac_header(skb), skb->mac_len);
1675 }
1676
1677 static inline void bpf_pull_mac_rcsum(struct sk_buff *skb)
1678 {
1679         if (skb_at_tc_ingress(skb))
1680                 skb_postpull_rcsum(skb, skb_mac_header(skb), skb->mac_len);
1681 }
1682
1683 BPF_CALL_5(bpf_skb_store_bytes, struct sk_buff *, skb, u32, offset,
1684            const void *, from, u32, len, u64, flags)
1685 {
1686         void *ptr;
1687
1688         if (unlikely(flags & ~(BPF_F_RECOMPUTE_CSUM | BPF_F_INVALIDATE_HASH)))
1689                 return -EINVAL;
1690         if (unlikely(offset > 0xffff))
1691                 return -EFAULT;
1692         if (unlikely(bpf_try_make_writable(skb, offset + len)))
1693                 return -EFAULT;
1694
1695         ptr = skb->data + offset;
1696         if (flags & BPF_F_RECOMPUTE_CSUM)
1697                 __skb_postpull_rcsum(skb, ptr, len, offset);
1698
1699         memcpy(ptr, from, len);
1700
1701         if (flags & BPF_F_RECOMPUTE_CSUM)
1702                 __skb_postpush_rcsum(skb, ptr, len, offset);
1703         if (flags & BPF_F_INVALIDATE_HASH)
1704                 skb_clear_hash(skb);
1705
1706         return 0;
1707 }
1708
1709 static const struct bpf_func_proto bpf_skb_store_bytes_proto = {
1710         .func           = bpf_skb_store_bytes,
1711         .gpl_only       = false,
1712         .ret_type       = RET_INTEGER,
1713         .arg1_type      = ARG_PTR_TO_CTX,
1714         .arg2_type      = ARG_ANYTHING,
1715         .arg3_type      = ARG_PTR_TO_MEM,
1716         .arg4_type      = ARG_CONST_SIZE,
1717         .arg5_type      = ARG_ANYTHING,
1718 };
1719
1720 BPF_CALL_4(bpf_skb_load_bytes, const struct sk_buff *, skb, u32, offset,
1721            void *, to, u32, len)
1722 {
1723         void *ptr;
1724
1725         if (unlikely(offset > 0xffff))
1726                 goto err_clear;
1727
1728         ptr = skb_header_pointer(skb, offset, len, to);
1729         if (unlikely(!ptr))
1730                 goto err_clear;
1731         if (ptr != to)
1732                 memcpy(to, ptr, len);
1733
1734         return 0;
1735 err_clear:
1736         memset(to, 0, len);
1737         return -EFAULT;
1738 }
1739
1740 static const struct bpf_func_proto bpf_skb_load_bytes_proto = {
1741         .func           = bpf_skb_load_bytes,
1742         .gpl_only       = false,
1743         .ret_type       = RET_INTEGER,
1744         .arg1_type      = ARG_PTR_TO_CTX,
1745         .arg2_type      = ARG_ANYTHING,
1746         .arg3_type      = ARG_PTR_TO_UNINIT_MEM,
1747         .arg4_type      = ARG_CONST_SIZE,
1748 };
1749
1750 BPF_CALL_4(bpf_flow_dissector_load_bytes,
1751            const struct bpf_flow_dissector *, ctx, u32, offset,
1752            void *, to, u32, len)
1753 {
1754         void *ptr;
1755
1756         if (unlikely(offset > 0xffff))
1757                 goto err_clear;
1758
1759         if (unlikely(!ctx->skb))
1760                 goto err_clear;
1761
1762         ptr = skb_header_pointer(ctx->skb, offset, len, to);
1763         if (unlikely(!ptr))
1764                 goto err_clear;
1765         if (ptr != to)
1766                 memcpy(to, ptr, len);
1767
1768         return 0;
1769 err_clear:
1770         memset(to, 0, len);
1771         return -EFAULT;
1772 }
1773
1774 static const struct bpf_func_proto bpf_flow_dissector_load_bytes_proto = {
1775         .func           = bpf_flow_dissector_load_bytes,
1776         .gpl_only       = false,
1777         .ret_type       = RET_INTEGER,
1778         .arg1_type      = ARG_PTR_TO_CTX,
1779         .arg2_type      = ARG_ANYTHING,
1780         .arg3_type      = ARG_PTR_TO_UNINIT_MEM,
1781         .arg4_type      = ARG_CONST_SIZE,
1782 };
1783
1784 BPF_CALL_5(bpf_skb_load_bytes_relative, const struct sk_buff *, skb,
1785            u32, offset, void *, to, u32, len, u32, start_header)
1786 {
1787         u8 *end = skb_tail_pointer(skb);
1788         u8 *start, *ptr;
1789
1790         if (unlikely(offset > 0xffff))
1791                 goto err_clear;
1792
1793         switch (start_header) {
1794         case BPF_HDR_START_MAC:
1795                 if (unlikely(!skb_mac_header_was_set(skb)))
1796                         goto err_clear;
1797                 start = skb_mac_header(skb);
1798                 break;
1799         case BPF_HDR_START_NET:
1800                 start = skb_network_header(skb);
1801                 break;
1802         default:
1803                 goto err_clear;
1804         }
1805
1806         ptr = start + offset;
1807
1808         if (likely(ptr + len <= end)) {
1809                 memcpy(to, ptr, len);
1810                 return 0;
1811         }
1812
1813 err_clear:
1814         memset(to, 0, len);
1815         return -EFAULT;
1816 }
1817
1818 static const struct bpf_func_proto bpf_skb_load_bytes_relative_proto = {
1819         .func           = bpf_skb_load_bytes_relative,
1820         .gpl_only       = false,
1821         .ret_type       = RET_INTEGER,
1822         .arg1_type      = ARG_PTR_TO_CTX,
1823         .arg2_type      = ARG_ANYTHING,
1824         .arg3_type      = ARG_PTR_TO_UNINIT_MEM,
1825         .arg4_type      = ARG_CONST_SIZE,
1826         .arg5_type      = ARG_ANYTHING,
1827 };
1828
1829 BPF_CALL_2(bpf_skb_pull_data, struct sk_buff *, skb, u32, len)
1830 {
1831         /* Idea is the following: should the needed direct read/write
1832          * test fail during runtime, we can pull in more data and redo
1833          * again, since implicitly, we invalidate previous checks here.
1834          *
1835          * Or, since we know how much we need to make read/writeable,
1836          * this can be done once at the program beginning for direct
1837          * access case. By this we overcome limitations of only current
1838          * headroom being accessible.
1839          */
1840         return bpf_try_make_writable(skb, len ? : skb_headlen(skb));
1841 }
1842
1843 static const struct bpf_func_proto bpf_skb_pull_data_proto = {
1844         .func           = bpf_skb_pull_data,
1845         .gpl_only       = false,
1846         .ret_type       = RET_INTEGER,
1847         .arg1_type      = ARG_PTR_TO_CTX,
1848         .arg2_type      = ARG_ANYTHING,
1849 };
1850
1851 BPF_CALL_1(bpf_sk_fullsock, struct sock *, sk)
1852 {
1853         return sk_fullsock(sk) ? (unsigned long)sk : (unsigned long)NULL;
1854 }
1855
1856 static const struct bpf_func_proto bpf_sk_fullsock_proto = {
1857         .func           = bpf_sk_fullsock,
1858         .gpl_only       = false,
1859         .ret_type       = RET_PTR_TO_SOCKET_OR_NULL,
1860         .arg1_type      = ARG_PTR_TO_SOCK_COMMON,
1861 };
1862
1863 static inline int sk_skb_try_make_writable(struct sk_buff *skb,
1864                                            unsigned int write_len)
1865 {
1866         int err = __bpf_try_make_writable(skb, write_len);
1867
1868         bpf_compute_data_end_sk_skb(skb);
1869         return err;
1870 }
1871
1872 BPF_CALL_2(sk_skb_pull_data, struct sk_buff *, skb, u32, len)
1873 {
1874         /* Idea is the following: should the needed direct read/write
1875          * test fail during runtime, we can pull in more data and redo
1876          * again, since implicitly, we invalidate previous checks here.
1877          *
1878          * Or, since we know how much we need to make read/writeable,
1879          * this can be done once at the program beginning for direct
1880          * access case. By this we overcome limitations of only current
1881          * headroom being accessible.
1882          */
1883         return sk_skb_try_make_writable(skb, len ? : skb_headlen(skb));
1884 }
1885
1886 static const struct bpf_func_proto sk_skb_pull_data_proto = {
1887         .func           = sk_skb_pull_data,
1888         .gpl_only       = false,
1889         .ret_type       = RET_INTEGER,
1890         .arg1_type      = ARG_PTR_TO_CTX,
1891         .arg2_type      = ARG_ANYTHING,
1892 };
1893
1894 BPF_CALL_5(bpf_l3_csum_replace, struct sk_buff *, skb, u32, offset,
1895            u64, from, u64, to, u64, flags)
1896 {
1897         __sum16 *ptr;
1898
1899         if (unlikely(flags & ~(BPF_F_HDR_FIELD_MASK)))
1900                 return -EINVAL;
1901         if (unlikely(offset > 0xffff || offset & 1))
1902                 return -EFAULT;
1903         if (unlikely(bpf_try_make_writable(skb, offset + sizeof(*ptr))))
1904                 return -EFAULT;
1905
1906         ptr = (__sum16 *)(skb->data + offset);
1907         switch (flags & BPF_F_HDR_FIELD_MASK) {
1908         case 0:
1909                 if (unlikely(from != 0))
1910                         return -EINVAL;
1911
1912                 csum_replace_by_diff(ptr, to);
1913                 break;
1914         case 2:
1915                 csum_replace2(ptr, from, to);
1916                 break;
1917         case 4:
1918                 csum_replace4(ptr, from, to);
1919                 break;
1920         default:
1921                 return -EINVAL;
1922         }
1923
1924         return 0;
1925 }
1926
1927 static const struct bpf_func_proto bpf_l3_csum_replace_proto = {
1928         .func           = bpf_l3_csum_replace,
1929         .gpl_only       = false,
1930         .ret_type       = RET_INTEGER,
1931         .arg1_type      = ARG_PTR_TO_CTX,
1932         .arg2_type      = ARG_ANYTHING,
1933         .arg3_type      = ARG_ANYTHING,
1934         .arg4_type      = ARG_ANYTHING,
1935         .arg5_type      = ARG_ANYTHING,
1936 };
1937
1938 BPF_CALL_5(bpf_l4_csum_replace, struct sk_buff *, skb, u32, offset,
1939            u64, from, u64, to, u64, flags)
1940 {
1941         bool is_pseudo = flags & BPF_F_PSEUDO_HDR;
1942         bool is_mmzero = flags & BPF_F_MARK_MANGLED_0;
1943         bool do_mforce = flags & BPF_F_MARK_ENFORCE;
1944         __sum16 *ptr;
1945
1946         if (unlikely(flags & ~(BPF_F_MARK_MANGLED_0 | BPF_F_MARK_ENFORCE |
1947                                BPF_F_PSEUDO_HDR | BPF_F_HDR_FIELD_MASK)))
1948                 return -EINVAL;
1949         if (unlikely(offset > 0xffff || offset & 1))
1950                 return -EFAULT;
1951         if (unlikely(bpf_try_make_writable(skb, offset + sizeof(*ptr))))
1952                 return -EFAULT;
1953
1954         ptr = (__sum16 *)(skb->data + offset);
1955         if (is_mmzero && !do_mforce && !*ptr)
1956                 return 0;
1957
1958         switch (flags & BPF_F_HDR_FIELD_MASK) {
1959         case 0:
1960                 if (unlikely(from != 0))
1961                         return -EINVAL;
1962
1963                 inet_proto_csum_replace_by_diff(ptr, skb, to, is_pseudo);
1964                 break;
1965         case 2:
1966                 inet_proto_csum_replace2(ptr, skb, from, to, is_pseudo);
1967                 break;
1968         case 4:
1969                 inet_proto_csum_replace4(ptr, skb, from, to, is_pseudo);
1970                 break;
1971         default:
1972                 return -EINVAL;
1973         }
1974
1975         if (is_mmzero && !*ptr)
1976                 *ptr = CSUM_MANGLED_0;
1977         return 0;
1978 }
1979
1980 static const struct bpf_func_proto bpf_l4_csum_replace_proto = {
1981         .func           = bpf_l4_csum_replace,
1982         .gpl_only       = false,
1983         .ret_type       = RET_INTEGER,
1984         .arg1_type      = ARG_PTR_TO_CTX,
1985         .arg2_type      = ARG_ANYTHING,
1986         .arg3_type      = ARG_ANYTHING,
1987         .arg4_type      = ARG_ANYTHING,
1988         .arg5_type      = ARG_ANYTHING,
1989 };
1990
1991 BPF_CALL_5(bpf_csum_diff, __be32 *, from, u32, from_size,
1992            __be32 *, to, u32, to_size, __wsum, seed)
1993 {
1994         struct bpf_scratchpad *sp = this_cpu_ptr(&bpf_sp);
1995         u32 diff_size = from_size + to_size;
1996         int i, j = 0;
1997
1998         /* This is quite flexible, some examples:
1999          *
2000          * from_size == 0, to_size > 0,  seed := csum --> pushing data
2001          * from_size > 0,  to_size == 0, seed := csum --> pulling data
2002          * from_size > 0,  to_size > 0,  seed := 0    --> diffing data
2003          *
2004          * Even for diffing, from_size and to_size don't need to be equal.
2005          */
2006         if (unlikely(((from_size | to_size) & (sizeof(__be32) - 1)) ||
2007                      diff_size > sizeof(sp->diff)))
2008                 return -EINVAL;
2009
2010         for (i = 0; i < from_size / sizeof(__be32); i++, j++)
2011                 sp->diff[j] = ~from[i];
2012         for (i = 0; i <   to_size / sizeof(__be32); i++, j++)
2013                 sp->diff[j] = to[i];
2014
2015         return csum_partial(sp->diff, diff_size, seed);
2016 }
2017
2018 static const struct bpf_func_proto bpf_csum_diff_proto = {
2019         .func           = bpf_csum_diff,
2020         .gpl_only       = false,
2021         .pkt_access     = true,
2022         .ret_type       = RET_INTEGER,
2023         .arg1_type      = ARG_PTR_TO_MEM_OR_NULL,
2024         .arg2_type      = ARG_CONST_SIZE_OR_ZERO,
2025         .arg3_type      = ARG_PTR_TO_MEM_OR_NULL,
2026         .arg4_type      = ARG_CONST_SIZE_OR_ZERO,
2027         .arg5_type      = ARG_ANYTHING,
2028 };
2029
2030 BPF_CALL_2(bpf_csum_update, struct sk_buff *, skb, __wsum, csum)
2031 {
2032         /* The interface is to be used in combination with bpf_csum_diff()
2033          * for direct packet writes. csum rotation for alignment as well
2034          * as emulating csum_sub() can be done from the eBPF program.
2035          */
2036         if (skb->ip_summed == CHECKSUM_COMPLETE)
2037                 return (skb->csum = csum_add(skb->csum, csum));
2038
2039         return -ENOTSUPP;
2040 }
2041
2042 static const struct bpf_func_proto bpf_csum_update_proto = {
2043         .func           = bpf_csum_update,
2044         .gpl_only       = false,
2045         .ret_type       = RET_INTEGER,
2046         .arg1_type      = ARG_PTR_TO_CTX,
2047         .arg2_type      = ARG_ANYTHING,
2048 };
2049
2050 BPF_CALL_2(bpf_csum_level, struct sk_buff *, skb, u64, level)
2051 {
2052         /* The interface is to be used in combination with bpf_skb_adjust_room()
2053          * for encap/decap of packet headers when BPF_F_ADJ_ROOM_NO_CSUM_RESET
2054          * is passed as flags, for example.
2055          */
2056         switch (level) {
2057         case BPF_CSUM_LEVEL_INC:
2058                 __skb_incr_checksum_unnecessary(skb);
2059                 break;
2060         case BPF_CSUM_LEVEL_DEC:
2061                 __skb_decr_checksum_unnecessary(skb);
2062                 break;
2063         case BPF_CSUM_LEVEL_RESET:
2064                 __skb_reset_checksum_unnecessary(skb);
2065                 break;
2066         case BPF_CSUM_LEVEL_QUERY:
2067                 return skb->ip_summed == CHECKSUM_UNNECESSARY ?
2068                        skb->csum_level : -EACCES;
2069         default:
2070                 return -EINVAL;
2071         }
2072
2073         return 0;
2074 }
2075
2076 static const struct bpf_func_proto bpf_csum_level_proto = {
2077         .func           = bpf_csum_level,
2078         .gpl_only       = false,
2079         .ret_type       = RET_INTEGER,
2080         .arg1_type      = ARG_PTR_TO_CTX,
2081         .arg2_type      = ARG_ANYTHING,
2082 };
2083
2084 static inline int __bpf_rx_skb(struct net_device *dev, struct sk_buff *skb)
2085 {
2086         return dev_forward_skb(dev, skb);
2087 }
2088
2089 static inline int __bpf_rx_skb_no_mac(struct net_device *dev,
2090                                       struct sk_buff *skb)
2091 {
2092         int ret = ____dev_forward_skb(dev, skb);
2093
2094         if (likely(!ret)) {
2095                 skb->dev = dev;
2096                 ret = netif_rx(skb);
2097         }
2098
2099         return ret;
2100 }
2101
2102 static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
2103 {
2104         int ret;
2105
2106         if (dev_xmit_recursion()) {
2107                 net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n");
2108                 kfree_skb(skb);
2109                 return -ENETDOWN;
2110         }
2111
2112         skb->dev = dev;
2113         skb->tstamp = 0;
2114
2115         dev_xmit_recursion_inc();
2116         ret = dev_queue_xmit(skb);
2117         dev_xmit_recursion_dec();
2118
2119         return ret;
2120 }
2121
2122 static int __bpf_redirect_no_mac(struct sk_buff *skb, struct net_device *dev,
2123                                  u32 flags)
2124 {
2125         unsigned int mlen = skb_network_offset(skb);
2126
2127         if (mlen) {
2128                 __skb_pull(skb, mlen);
2129
2130                 /* At ingress, the mac header has already been pulled once.
2131                  * At egress, skb_pospull_rcsum has to be done in case that
2132                  * the skb is originated from ingress (i.e. a forwarded skb)
2133                  * to ensure that rcsum starts at net header.
2134                  */
2135                 if (!skb_at_tc_ingress(skb))
2136                         skb_postpull_rcsum(skb, skb_mac_header(skb), mlen);
2137         }
2138         skb_pop_mac_header(skb);
2139         skb_reset_mac_len(skb);
2140         return flags & BPF_F_INGRESS ?
2141                __bpf_rx_skb_no_mac(dev, skb) : __bpf_tx_skb(dev, skb);
2142 }
2143
2144 static int __bpf_redirect_common(struct sk_buff *skb, struct net_device *dev,
2145                                  u32 flags)
2146 {
2147         /* Verify that a link layer header is carried */
2148         if (unlikely(skb->mac_header >= skb->network_header)) {
2149                 kfree_skb(skb);
2150                 return -ERANGE;
2151         }
2152
2153         bpf_push_mac_rcsum(skb);
2154         return flags & BPF_F_INGRESS ?
2155                __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb);
2156 }
2157
2158 static int __bpf_redirect(struct sk_buff *skb, struct net_device *dev,
2159                           u32 flags)
2160 {
2161         if (dev_is_mac_header_xmit(dev))
2162                 return __bpf_redirect_common(skb, dev, flags);
2163         else
2164                 return __bpf_redirect_no_mac(skb, dev, flags);
2165 }
2166
2167 #if IS_ENABLED(CONFIG_IPV6)
2168 static int bpf_out_neigh_v6(struct net *net, struct sk_buff *skb,
2169                             struct net_device *dev, struct bpf_nh_params *nh)
2170 {
2171         u32 hh_len = LL_RESERVED_SPACE(dev);
2172         const struct in6_addr *nexthop;
2173         struct dst_entry *dst = NULL;
2174         struct neighbour *neigh;
2175
2176         if (dev_xmit_recursion()) {
2177                 net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n");
2178                 goto out_drop;
2179         }
2180
2181         skb->dev = dev;
2182         skb->tstamp = 0;
2183
2184         if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
2185                 struct sk_buff *skb2;
2186
2187                 skb2 = skb_realloc_headroom(skb, hh_len);
2188                 if (unlikely(!skb2)) {
2189                         kfree_skb(skb);
2190                         return -ENOMEM;
2191                 }
2192                 if (skb->sk)
2193                         skb_set_owner_w(skb2, skb->sk);
2194                 consume_skb(skb);
2195                 skb = skb2;
2196         }
2197
2198         rcu_read_lock_bh();
2199         if (!nh) {
2200                 dst = skb_dst(skb);
2201                 nexthop = rt6_nexthop(container_of(dst, struct rt6_info, dst),
2202                                       &ipv6_hdr(skb)->daddr);
2203         } else {
2204                 nexthop = &nh->ipv6_nh;
2205         }
2206         neigh = ip_neigh_gw6(dev, nexthop);
2207         if (likely(!IS_ERR(neigh))) {
2208                 int ret;
2209
2210                 sock_confirm_neigh(skb, neigh);
2211                 dev_xmit_recursion_inc();
2212                 ret = neigh_output(neigh, skb, false);
2213                 dev_xmit_recursion_dec();
2214                 rcu_read_unlock_bh();
2215                 return ret;
2216         }
2217         rcu_read_unlock_bh();
2218         if (dst)
2219                 IP6_INC_STATS(dev_net(dst->dev),
2220                               ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
2221 out_drop:
2222         kfree_skb(skb);
2223         return -ENETDOWN;
2224 }
2225
2226 static int __bpf_redirect_neigh_v6(struct sk_buff *skb, struct net_device *dev,
2227                                    struct bpf_nh_params *nh)
2228 {
2229         const struct ipv6hdr *ip6h = ipv6_hdr(skb);
2230         struct net *net = dev_net(dev);
2231         int err, ret = NET_XMIT_DROP;
2232
2233         if (!nh) {
2234                 struct dst_entry *dst;
2235                 struct flowi6 fl6 = {
2236                         .flowi6_flags = FLOWI_FLAG_ANYSRC,
2237                         .flowi6_mark  = skb->mark,
2238                         .flowlabel    = ip6_flowinfo(ip6h),
2239                         .flowi6_oif   = dev->ifindex,
2240                         .flowi6_proto = ip6h->nexthdr,
2241                         .daddr        = ip6h->daddr,
2242                         .saddr        = ip6h->saddr,
2243                 };
2244
2245                 dst = ipv6_stub->ipv6_dst_lookup_flow(net, NULL, &fl6, NULL);
2246                 if (IS_ERR(dst))
2247                         goto out_drop;
2248
2249                 skb_dst_set(skb, dst);
2250         } else if (nh->nh_family != AF_INET6) {
2251                 goto out_drop;
2252         }
2253
2254         err = bpf_out_neigh_v6(net, skb, dev, nh);
2255         if (unlikely(net_xmit_eval(err)))
2256                 dev->stats.tx_errors++;
2257         else
2258                 ret = NET_XMIT_SUCCESS;
2259         goto out_xmit;
2260 out_drop:
2261         dev->stats.tx_errors++;
2262         kfree_skb(skb);
2263 out_xmit:
2264         return ret;
2265 }
2266 #else
2267 static int __bpf_redirect_neigh_v6(struct sk_buff *skb, struct net_device *dev,
2268                                    struct bpf_nh_params *nh)
2269 {
2270         kfree_skb(skb);
2271         return NET_XMIT_DROP;
2272 }
2273 #endif /* CONFIG_IPV6 */
2274
2275 #if IS_ENABLED(CONFIG_INET)
2276 static int bpf_out_neigh_v4(struct net *net, struct sk_buff *skb,
2277                             struct net_device *dev, struct bpf_nh_params *nh)
2278 {
2279         u32 hh_len = LL_RESERVED_SPACE(dev);
2280         struct neighbour *neigh;
2281         bool is_v6gw = false;
2282
2283         if (dev_xmit_recursion()) {
2284                 net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n");
2285                 goto out_drop;
2286         }
2287
2288         skb->dev = dev;
2289         skb->tstamp = 0;
2290
2291         if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
2292                 struct sk_buff *skb2;
2293
2294                 skb2 = skb_realloc_headroom(skb, hh_len);
2295                 if (unlikely(!skb2)) {
2296                         kfree_skb(skb);
2297                         return -ENOMEM;
2298                 }
2299                 if (skb->sk)
2300                         skb_set_owner_w(skb2, skb->sk);
2301                 consume_skb(skb);
2302                 skb = skb2;
2303         }
2304
2305         rcu_read_lock_bh();
2306         if (!nh) {
2307                 struct dst_entry *dst = skb_dst(skb);
2308                 struct rtable *rt = container_of(dst, struct rtable, dst);
2309
2310                 neigh = ip_neigh_for_gw(rt, skb, &is_v6gw);
2311         } else if (nh->nh_family == AF_INET6) {
2312                 neigh = ip_neigh_gw6(dev, &nh->ipv6_nh);
2313                 is_v6gw = true;
2314         } else if (nh->nh_family == AF_INET) {
2315                 neigh = ip_neigh_gw4(dev, nh->ipv4_nh);
2316         } else {
2317                 rcu_read_unlock_bh();
2318                 goto out_drop;
2319         }
2320
2321         if (likely(!IS_ERR(neigh))) {
2322                 int ret;
2323
2324                 sock_confirm_neigh(skb, neigh);
2325                 dev_xmit_recursion_inc();
2326                 ret = neigh_output(neigh, skb, is_v6gw);
2327                 dev_xmit_recursion_dec();
2328                 rcu_read_unlock_bh();
2329                 return ret;
2330         }
2331         rcu_read_unlock_bh();
2332 out_drop:
2333         kfree_skb(skb);
2334         return -ENETDOWN;
2335 }
2336
2337 static int __bpf_redirect_neigh_v4(struct sk_buff *skb, struct net_device *dev,
2338                                    struct bpf_nh_params *nh)
2339 {
2340         const struct iphdr *ip4h = ip_hdr(skb);
2341         struct net *net = dev_net(dev);
2342         int err, ret = NET_XMIT_DROP;
2343
2344         if (!nh) {
2345                 struct flowi4 fl4 = {
2346                         .flowi4_flags = FLOWI_FLAG_ANYSRC,
2347                         .flowi4_mark  = skb->mark,
2348                         .flowi4_tos   = RT_TOS(ip4h->tos),
2349                         .flowi4_oif   = dev->ifindex,
2350                         .flowi4_proto = ip4h->protocol,
2351                         .daddr        = ip4h->daddr,
2352                         .saddr        = ip4h->saddr,
2353                 };
2354                 struct rtable *rt;
2355
2356                 rt = ip_route_output_flow(net, &fl4, NULL);
2357                 if (IS_ERR(rt))
2358                         goto out_drop;
2359                 if (rt->rt_type != RTN_UNICAST && rt->rt_type != RTN_LOCAL) {
2360                         ip_rt_put(rt);
2361                         goto out_drop;
2362                 }
2363
2364                 skb_dst_set(skb, &rt->dst);
2365         }
2366
2367         err = bpf_out_neigh_v4(net, skb, dev, nh);
2368         if (unlikely(net_xmit_eval(err)))
2369                 dev->stats.tx_errors++;
2370         else
2371                 ret = NET_XMIT_SUCCESS;
2372         goto out_xmit;
2373 out_drop:
2374         dev->stats.tx_errors++;
2375         kfree_skb(skb);
2376 out_xmit:
2377         return ret;
2378 }
2379 #else
2380 static int __bpf_redirect_neigh_v4(struct sk_buff *skb, struct net_device *dev,
2381                                    struct bpf_nh_params *nh)
2382 {
2383         kfree_skb(skb);
2384         return NET_XMIT_DROP;
2385 }
2386 #endif /* CONFIG_INET */
2387
2388 static int __bpf_redirect_neigh(struct sk_buff *skb, struct net_device *dev,
2389                                 struct bpf_nh_params *nh)
2390 {
2391         struct ethhdr *ethh = eth_hdr(skb);
2392
2393         if (unlikely(skb->mac_header >= skb->network_header))
2394                 goto out;
2395         bpf_push_mac_rcsum(skb);
2396         if (is_multicast_ether_addr(ethh->h_dest))
2397                 goto out;
2398
2399         skb_pull(skb, sizeof(*ethh));
2400         skb_unset_mac_header(skb);
2401         skb_reset_network_header(skb);
2402
2403         if (skb->protocol == htons(ETH_P_IP))
2404                 return __bpf_redirect_neigh_v4(skb, dev, nh);
2405         else if (skb->protocol == htons(ETH_P_IPV6))
2406                 return __bpf_redirect_neigh_v6(skb, dev, nh);
2407 out:
2408         kfree_skb(skb);
2409         return -ENOTSUPP;
2410 }
2411
2412 /* Internal, non-exposed redirect flags. */
2413 enum {
2414         BPF_F_NEIGH     = (1ULL << 1),
2415         BPF_F_PEER      = (1ULL << 2),
2416         BPF_F_NEXTHOP   = (1ULL << 3),
2417 #define BPF_F_REDIRECT_INTERNAL (BPF_F_NEIGH | BPF_F_PEER | BPF_F_NEXTHOP)
2418 };
2419
2420 BPF_CALL_3(bpf_clone_redirect, struct sk_buff *, skb, u32, ifindex, u64, flags)
2421 {
2422         struct net_device *dev;
2423         struct sk_buff *clone;
2424         int ret;
2425
2426         if (unlikely(flags & (~(BPF_F_INGRESS) | BPF_F_REDIRECT_INTERNAL)))
2427                 return -EINVAL;
2428
2429         dev = dev_get_by_index_rcu(dev_net(skb->dev), ifindex);
2430         if (unlikely(!dev))
2431                 return -EINVAL;
2432
2433         clone = skb_clone(skb, GFP_ATOMIC);
2434         if (unlikely(!clone))
2435                 return -ENOMEM;
2436
2437         /* For direct write, we need to keep the invariant that the skbs
2438          * we're dealing with need to be uncloned. Should uncloning fail
2439          * here, we need to free the just generated clone to unclone once
2440          * again.
2441          */
2442         ret = bpf_try_make_head_writable(skb);
2443         if (unlikely(ret)) {
2444                 kfree_skb(clone);
2445                 return -ENOMEM;
2446         }
2447
2448         return __bpf_redirect(clone, dev, flags);
2449 }
2450
2451 static const struct bpf_func_proto bpf_clone_redirect_proto = {
2452         .func           = bpf_clone_redirect,
2453         .gpl_only       = false,
2454         .ret_type       = RET_INTEGER,
2455         .arg1_type      = ARG_PTR_TO_CTX,
2456         .arg2_type      = ARG_ANYTHING,
2457         .arg3_type      = ARG_ANYTHING,
2458 };
2459
2460 DEFINE_PER_CPU(struct bpf_redirect_info, bpf_redirect_info);
2461 EXPORT_PER_CPU_SYMBOL_GPL(bpf_redirect_info);
2462
2463 int skb_do_redirect(struct sk_buff *skb)
2464 {
2465         struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
2466         struct net *net = dev_net(skb->dev);
2467         struct net_device *dev;
2468         u32 flags = ri->flags;
2469
2470         dev = dev_get_by_index_rcu(net, ri->tgt_index);
2471         ri->tgt_index = 0;
2472         ri->flags = 0;
2473         if (unlikely(!dev))
2474                 goto out_drop;
2475         if (flags & BPF_F_PEER) {
2476                 const struct net_device_ops *ops = dev->netdev_ops;
2477
2478                 if (unlikely(!ops->ndo_get_peer_dev ||
2479                              !skb_at_tc_ingress(skb)))
2480                         goto out_drop;
2481                 dev = ops->ndo_get_peer_dev(dev);
2482                 if (unlikely(!dev ||
2483                              !is_skb_forwardable(dev, skb) ||
2484                              net_eq(net, dev_net(dev))))
2485                         goto out_drop;
2486                 skb->dev = dev;
2487                 return -EAGAIN;
2488         }
2489         return flags & BPF_F_NEIGH ?
2490                __bpf_redirect_neigh(skb, dev, flags & BPF_F_NEXTHOP ?
2491                                     &ri->nh : NULL) :
2492                __bpf_redirect(skb, dev, flags);
2493 out_drop:
2494         kfree_skb(skb);
2495         return -EINVAL;
2496 }
2497
2498 BPF_CALL_2(bpf_redirect, u32, ifindex, u64, flags)
2499 {
2500         struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
2501
2502         if (unlikely(flags & (~(BPF_F_INGRESS) | BPF_F_REDIRECT_INTERNAL)))
2503                 return TC_ACT_SHOT;
2504
2505         ri->flags = flags;
2506         ri->tgt_index = ifindex;
2507
2508         return TC_ACT_REDIRECT;
2509 }
2510
2511 static const struct bpf_func_proto bpf_redirect_proto = {
2512         .func           = bpf_redirect,
2513         .gpl_only       = false,
2514         .ret_type       = RET_INTEGER,
2515         .arg1_type      = ARG_ANYTHING,
2516         .arg2_type      = ARG_ANYTHING,
2517 };
2518
2519 BPF_CALL_2(bpf_redirect_peer, u32, ifindex, u64, flags)
2520 {
2521         struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
2522
2523         if (unlikely(flags))
2524                 return TC_ACT_SHOT;
2525
2526         ri->flags = BPF_F_PEER;
2527         ri->tgt_index = ifindex;
2528
2529         return TC_ACT_REDIRECT;
2530 }
2531
2532 static const struct bpf_func_proto bpf_redirect_peer_proto = {
2533         .func           = bpf_redirect_peer,
2534         .gpl_only       = false,
2535         .ret_type       = RET_INTEGER,
2536         .arg1_type      = ARG_ANYTHING,
2537         .arg2_type      = ARG_ANYTHING,
2538 };
2539
2540 BPF_CALL_4(bpf_redirect_neigh, u32, ifindex, struct bpf_redir_neigh *, params,
2541            int, plen, u64, flags)
2542 {
2543         struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
2544
2545         if (unlikely((plen && plen < sizeof(*params)) || flags))
2546                 return TC_ACT_SHOT;
2547
2548         ri->flags = BPF_F_NEIGH | (plen ? BPF_F_NEXTHOP : 0);
2549         ri->tgt_index = ifindex;
2550
2551         BUILD_BUG_ON(sizeof(struct bpf_redir_neigh) != sizeof(struct bpf_nh_params));
2552         if (plen)
2553                 memcpy(&ri->nh, params, sizeof(ri->nh));
2554
2555         return TC_ACT_REDIRECT;
2556 }
2557
2558 static const struct bpf_func_proto bpf_redirect_neigh_proto = {
2559         .func           = bpf_redirect_neigh,
2560         .gpl_only       = false,
2561         .ret_type       = RET_INTEGER,
2562         .arg1_type      = ARG_ANYTHING,
2563         .arg2_type      = ARG_PTR_TO_MEM_OR_NULL,
2564         .arg3_type      = ARG_CONST_SIZE_OR_ZERO,
2565         .arg4_type      = ARG_ANYTHING,
2566 };
2567
2568 BPF_CALL_2(bpf_msg_apply_bytes, struct sk_msg *, msg, u32, bytes)
2569 {
2570         msg->apply_bytes = bytes;
2571         return 0;
2572 }
2573
2574 static const struct bpf_func_proto bpf_msg_apply_bytes_proto = {
2575         .func           = bpf_msg_apply_bytes,
2576         .gpl_only       = false,
2577         .ret_type       = RET_INTEGER,
2578         .arg1_type      = ARG_PTR_TO_CTX,
2579         .arg2_type      = ARG_ANYTHING,
2580 };
2581
2582 BPF_CALL_2(bpf_msg_cork_bytes, struct sk_msg *, msg, u32, bytes)
2583 {
2584         msg->cork_bytes = bytes;
2585         return 0;
2586 }
2587
2588 static const struct bpf_func_proto bpf_msg_cork_bytes_proto = {
2589         .func           = bpf_msg_cork_bytes,
2590         .gpl_only       = false,
2591         .ret_type       = RET_INTEGER,
2592         .arg1_type      = ARG_PTR_TO_CTX,
2593         .arg2_type      = ARG_ANYTHING,
2594 };
2595
2596 BPF_CALL_4(bpf_msg_pull_data, struct sk_msg *, msg, u32, start,
2597            u32, end, u64, flags)
2598 {
2599         u32 len = 0, offset = 0, copy = 0, poffset = 0, bytes = end - start;
2600         u32 first_sge, last_sge, i, shift, bytes_sg_total;
2601         struct scatterlist *sge;
2602         u8 *raw, *to, *from;
2603         struct page *page;
2604
2605         if (unlikely(flags || end <= start))
2606                 return -EINVAL;
2607
2608         /* First find the starting scatterlist element */
2609         i = msg->sg.start;
2610         do {
2611                 offset += len;
2612                 len = sk_msg_elem(msg, i)->length;
2613                 if (start < offset + len)
2614                         break;
2615                 sk_msg_iter_var_next(i);
2616         } while (i != msg->sg.end);
2617
2618         if (unlikely(start >= offset + len))
2619                 return -EINVAL;
2620
2621         first_sge = i;
2622         /* The start may point into the sg element so we need to also
2623          * account for the headroom.
2624          */
2625         bytes_sg_total = start - offset + bytes;
2626         if (!test_bit(i, &msg->sg.copy) && bytes_sg_total <= len)
2627                 goto out;
2628
2629         /* At this point we need to linearize multiple scatterlist
2630          * elements or a single shared page. Either way we need to
2631          * copy into a linear buffer exclusively owned by BPF. Then
2632          * place the buffer in the scatterlist and fixup the original
2633          * entries by removing the entries now in the linear buffer
2634          * and shifting the remaining entries. For now we do not try
2635          * to copy partial entries to avoid complexity of running out
2636          * of sg_entry slots. The downside is reading a single byte
2637          * will copy the entire sg entry.
2638          */
2639         do {
2640                 copy += sk_msg_elem(msg, i)->length;
2641                 sk_msg_iter_var_next(i);
2642                 if (bytes_sg_total <= copy)
2643                         break;
2644         } while (i != msg->sg.end);
2645         last_sge = i;
2646
2647         if (unlikely(bytes_sg_total > copy))
2648                 return -EINVAL;
2649
2650         page = alloc_pages(__GFP_NOWARN | GFP_ATOMIC | __GFP_COMP,
2651                            get_order(copy));
2652         if (unlikely(!page))
2653                 return -ENOMEM;
2654
2655         raw = page_address(page);
2656         i = first_sge;
2657         do {
2658                 sge = sk_msg_elem(msg, i);
2659                 from = sg_virt(sge);
2660                 len = sge->length;
2661                 to = raw + poffset;
2662
2663                 memcpy(to, from, len);
2664                 poffset += len;
2665                 sge->length = 0;
2666                 put_page(sg_page(sge));
2667
2668                 sk_msg_iter_var_next(i);
2669         } while (i != last_sge);
2670
2671         sg_set_page(&msg->sg.data[first_sge], page, copy, 0);
2672
2673         /* To repair sg ring we need to shift entries. If we only
2674          * had a single entry though we can just replace it and
2675          * be done. Otherwise walk the ring and shift the entries.
2676          */
2677         WARN_ON_ONCE(last_sge == first_sge);
2678         shift = last_sge > first_sge ?
2679                 last_sge - first_sge - 1 :
2680                 NR_MSG_FRAG_IDS - first_sge + last_sge - 1;
2681         if (!shift)
2682                 goto out;
2683
2684         i = first_sge;
2685         sk_msg_iter_var_next(i);
2686         do {
2687                 u32 move_from;
2688
2689                 if (i + shift >= NR_MSG_FRAG_IDS)
2690                         move_from = i + shift - NR_MSG_FRAG_IDS;
2691                 else
2692                         move_from = i + shift;
2693                 if (move_from == msg->sg.end)
2694                         break;
2695
2696                 msg->sg.data[i] = msg->sg.data[move_from];
2697                 msg->sg.data[move_from].length = 0;
2698                 msg->sg.data[move_from].page_link = 0;
2699                 msg->sg.data[move_from].offset = 0;
2700                 sk_msg_iter_var_next(i);
2701         } while (1);
2702
2703         msg->sg.end = msg->sg.end - shift > msg->sg.end ?
2704                       msg->sg.end - shift + NR_MSG_FRAG_IDS :
2705                       msg->sg.end - shift;
2706 out:
2707         msg->data = sg_virt(&msg->sg.data[first_sge]) + start - offset;
2708         msg->data_end = msg->data + bytes;
2709         return 0;
2710 }
2711
2712 static const struct bpf_func_proto bpf_msg_pull_data_proto = {
2713         .func           = bpf_msg_pull_data,
2714         .gpl_only       = false,
2715         .ret_type       = RET_INTEGER,
2716         .arg1_type      = ARG_PTR_TO_CTX,
2717         .arg2_type      = ARG_ANYTHING,
2718         .arg3_type      = ARG_ANYTHING,
2719         .arg4_type      = ARG_ANYTHING,
2720 };
2721
2722 BPF_CALL_4(bpf_msg_push_data, struct sk_msg *, msg, u32, start,
2723            u32, len, u64, flags)
2724 {
2725         struct scatterlist sge, nsge, nnsge, rsge = {0}, *psge;
2726         u32 new, i = 0, l = 0, space, copy = 0, offset = 0;
2727         u8 *raw, *to, *from;
2728         struct page *page;
2729
2730         if (unlikely(flags))
2731                 return -EINVAL;
2732
2733         /* First find the starting scatterlist element */
2734         i = msg->sg.start;
2735         do {
2736                 offset += l;
2737                 l = sk_msg_elem(msg, i)->length;
2738
2739                 if (start < offset + l)
2740                         break;
2741                 sk_msg_iter_var_next(i);
2742         } while (i != msg->sg.end);
2743
2744         if (start >= offset + l)
2745                 return -EINVAL;
2746
2747         space = MAX_MSG_FRAGS - sk_msg_elem_used(msg);
2748
2749         /* If no space available will fallback to copy, we need at
2750          * least one scatterlist elem available to push data into
2751          * when start aligns to the beginning of an element or two
2752          * when it falls inside an element. We handle the start equals
2753          * offset case because its the common case for inserting a
2754          * header.
2755          */
2756         if (!space || (space == 1 && start != offset))
2757                 copy = msg->sg.data[i].length;
2758
2759         page = alloc_pages(__GFP_NOWARN | GFP_ATOMIC | __GFP_COMP,
2760                            get_order(copy + len));
2761         if (unlikely(!page))
2762                 return -ENOMEM;
2763
2764         if (copy) {
2765                 int front, back;
2766
2767                 raw = page_address(page);
2768
2769                 psge = sk_msg_elem(msg, i);
2770                 front = start - offset;
2771                 back = psge->length - front;
2772                 from = sg_virt(psge);
2773
2774                 if (front)
2775                         memcpy(raw, from, front);
2776
2777                 if (back) {
2778                         from += front;
2779                         to = raw + front + len;
2780
2781                         memcpy(to, from, back);
2782                 }
2783
2784                 put_page(sg_page(psge));
2785         } else if (start - offset) {
2786                 psge = sk_msg_elem(msg, i);
2787                 rsge = sk_msg_elem_cpy(msg, i);
2788
2789                 psge->length = start - offset;
2790                 rsge.length -= psge->length;
2791                 rsge.offset += start;
2792
2793                 sk_msg_iter_var_next(i);
2794                 sg_unmark_end(psge);
2795                 sg_unmark_end(&rsge);
2796                 sk_msg_iter_next(msg, end);
2797         }
2798
2799         /* Slot(s) to place newly allocated data */
2800         new = i;
2801
2802         /* Shift one or two slots as needed */
2803         if (!copy) {
2804                 sge = sk_msg_elem_cpy(msg, i);
2805
2806                 sk_msg_iter_var_next(i);
2807                 sg_unmark_end(&sge);
2808                 sk_msg_iter_next(msg, end);
2809
2810                 nsge = sk_msg_elem_cpy(msg, i);
2811                 if (rsge.length) {
2812                         sk_msg_iter_var_next(i);
2813                         nnsge = sk_msg_elem_cpy(msg, i);
2814                 }
2815
2816                 while (i != msg->sg.end) {
2817                         msg->sg.data[i] = sge;
2818                         sge = nsge;
2819                         sk_msg_iter_var_next(i);
2820                         if (rsge.length) {
2821                                 nsge = nnsge;
2822                                 nnsge = sk_msg_elem_cpy(msg, i);
2823                         } else {
2824                                 nsge = sk_msg_elem_cpy(msg, i);
2825                         }
2826                 }
2827         }
2828
2829         /* Place newly allocated data buffer */
2830         sk_mem_charge(msg->sk, len);
2831         msg->sg.size += len;
2832         __clear_bit(new, &msg->sg.copy);
2833         sg_set_page(&msg->sg.data[new], page, len + copy, 0);
2834         if (rsge.length) {
2835                 get_page(sg_page(&rsge));
2836                 sk_msg_iter_var_next(new);
2837                 msg->sg.data[new] = rsge;
2838         }
2839
2840         sk_msg_compute_data_pointers(msg);
2841         return 0;
2842 }
2843
2844 static const struct bpf_func_proto bpf_msg_push_data_proto = {
2845         .func           = bpf_msg_push_data,
2846         .gpl_only       = false,
2847         .ret_type       = RET_INTEGER,
2848         .arg1_type      = ARG_PTR_TO_CTX,
2849         .arg2_type      = ARG_ANYTHING,
2850         .arg3_type      = ARG_ANYTHING,
2851         .arg4_type      = ARG_ANYTHING,
2852 };
2853
2854 static void sk_msg_shift_left(struct sk_msg *msg, int i)
2855 {
2856         int prev;
2857
2858         do {
2859                 prev = i;
2860                 sk_msg_iter_var_next(i);
2861                 msg->sg.data[prev] = msg->sg.data[i];
2862         } while (i != msg->sg.end);
2863
2864         sk_msg_iter_prev(msg, end);
2865 }
2866
2867 static void sk_msg_shift_right(struct sk_msg *msg, int i)
2868 {
2869         struct scatterlist tmp, sge;
2870
2871         sk_msg_iter_next(msg, end);
2872         sge = sk_msg_elem_cpy(msg, i);
2873         sk_msg_iter_var_next(i);
2874         tmp = sk_msg_elem_cpy(msg, i);
2875
2876         while (i != msg->sg.end) {
2877                 msg->sg.data[i] = sge;
2878                 sk_msg_iter_var_next(i);
2879                 sge = tmp;
2880                 tmp = sk_msg_elem_cpy(msg, i);
2881         }
2882 }
2883
2884 BPF_CALL_4(bpf_msg_pop_data, struct sk_msg *, msg, u32, start,
2885            u32, len, u64, flags)
2886 {
2887         u32 i = 0, l = 0, space, offset = 0;
2888         u64 last = start + len;
2889         int pop;
2890
2891         if (unlikely(flags))
2892                 return -EINVAL;
2893
2894         /* First find the starting scatterlist element */
2895         i = msg->sg.start;
2896         do {
2897                 offset += l;
2898                 l = sk_msg_elem(msg, i)->length;
2899
2900                 if (start < offset + l)
2901                         break;
2902                 sk_msg_iter_var_next(i);
2903         } while (i != msg->sg.end);
2904
2905         /* Bounds checks: start and pop must be inside message */
2906         if (start >= offset + l || last >= msg->sg.size)
2907                 return -EINVAL;
2908
2909         space = MAX_MSG_FRAGS - sk_msg_elem_used(msg);
2910
2911         pop = len;
2912         /* --------------| offset
2913          * -| start      |-------- len -------|
2914          *
2915          *  |----- a ----|-------- pop -------|----- b ----|
2916          *  |______________________________________________| length
2917          *
2918          *
2919          * a:   region at front of scatter element to save
2920          * b:   region at back of scatter element to save when length > A + pop
2921          * pop: region to pop from element, same as input 'pop' here will be
2922          *      decremented below per iteration.
2923          *
2924          * Two top-level cases to handle when start != offset, first B is non
2925          * zero and second B is zero corresponding to when a pop includes more
2926          * than one element.
2927          *
2928          * Then if B is non-zero AND there is no space allocate space and
2929          * compact A, B regions into page. If there is space shift ring to
2930          * the rigth free'ing the next element in ring to place B, leaving
2931          * A untouched except to reduce length.
2932          */
2933         if (start != offset) {
2934                 struct scatterlist *nsge, *sge = sk_msg_elem(msg, i);
2935                 int a = start;
2936                 int b = sge->length - pop - a;
2937
2938                 sk_msg_iter_var_next(i);
2939
2940                 if (pop < sge->length - a) {
2941                         if (space) {
2942                                 sge->length = a;
2943                                 sk_msg_shift_right(msg, i);
2944                                 nsge = sk_msg_elem(msg, i);
2945                                 get_page(sg_page(sge));
2946                                 sg_set_page(nsge,
2947                                             sg_page(sge),
2948                                             b, sge->offset + pop + a);
2949                         } else {
2950                                 struct page *page, *orig;
2951                                 u8 *to, *from;
2952
2953                                 page = alloc_pages(__GFP_NOWARN |
2954                                                    __GFP_COMP   | GFP_ATOMIC,
2955                                                    get_order(a + b));
2956                                 if (unlikely(!page))
2957                                         return -ENOMEM;
2958
2959                                 sge->length = a;
2960                                 orig = sg_page(sge);
2961                                 from = sg_virt(sge);
2962                                 to = page_address(page);
2963                                 memcpy(to, from, a);
2964                                 memcpy(to + a, from + a + pop, b);
2965                                 sg_set_page(sge, page, a + b, 0);
2966                                 put_page(orig);
2967                         }
2968                         pop = 0;
2969                 } else if (pop >= sge->length - a) {
2970                         pop -= (sge->length - a);
2971                         sge->length = a;
2972                 }
2973         }
2974
2975         /* From above the current layout _must_ be as follows,
2976          *
2977          * -| offset
2978          * -| start
2979          *
2980          *  |---- pop ---|---------------- b ------------|
2981          *  |____________________________________________| length
2982          *
2983          * Offset and start of the current msg elem are equal because in the
2984          * previous case we handled offset != start and either consumed the
2985          * entire element and advanced to the next element OR pop == 0.
2986          *
2987          * Two cases to handle here are first pop is less than the length
2988          * leaving some remainder b above. Simply adjust the element's layout
2989          * in this case. Or pop >= length of the element so that b = 0. In this
2990          * case advance to next element decrementing pop.
2991          */
2992         while (pop) {
2993                 struct scatterlist *sge = sk_msg_elem(msg, i);
2994
2995                 if (pop < sge->length) {
2996                         sge->length -= pop;
2997                         sge->offset += pop;
2998                         pop = 0;
2999                 } else {
3000                         pop -= sge->length;
3001                         sk_msg_shift_left(msg, i);
3002                 }
3003                 sk_msg_iter_var_next(i);
3004         }
3005
3006         sk_mem_uncharge(msg->sk, len - pop);
3007         msg->sg.size -= (len - pop);
3008         sk_msg_compute_data_pointers(msg);
3009         return 0;
3010 }
3011
3012 static const struct bpf_func_proto bpf_msg_pop_data_proto = {
3013         .func           = bpf_msg_pop_data,
3014         .gpl_only       = false,
3015         .ret_type       = RET_INTEGER,
3016         .arg1_type      = ARG_PTR_TO_CTX,
3017         .arg2_type      = ARG_ANYTHING,
3018         .arg3_type      = ARG_ANYTHING,
3019         .arg4_type      = ARG_ANYTHING,
3020 };
3021
3022 #ifdef CONFIG_CGROUP_NET_CLASSID
3023 BPF_CALL_0(bpf_get_cgroup_classid_curr)
3024 {
3025         return __task_get_classid(current);
3026 }
3027
3028 static const struct bpf_func_proto bpf_get_cgroup_classid_curr_proto = {
3029         .func           = bpf_get_cgroup_classid_curr,
3030         .gpl_only       = false,
3031         .ret_type       = RET_INTEGER,
3032 };
3033
3034 BPF_CALL_1(bpf_skb_cgroup_classid, const struct sk_buff *, skb)
3035 {
3036         struct sock *sk = skb_to_full_sk(skb);
3037
3038         if (!sk || !sk_fullsock(sk))
3039                 return 0;
3040
3041         return sock_cgroup_classid(&sk->sk_cgrp_data);
3042 }
3043
3044 static const struct bpf_func_proto bpf_skb_cgroup_classid_proto = {
3045         .func           = bpf_skb_cgroup_classid,
3046         .gpl_only       = false,
3047         .ret_type       = RET_INTEGER,
3048         .arg1_type      = ARG_PTR_TO_CTX,
3049 };
3050 #endif
3051
3052 BPF_CALL_1(bpf_get_cgroup_classid, const struct sk_buff *, skb)
3053 {
3054         return task_get_classid(skb);
3055 }
3056
3057 static const struct bpf_func_proto bpf_get_cgroup_classid_proto = {
3058         .func           = bpf_get_cgroup_classid,
3059         .gpl_only       = false,
3060         .ret_type       = RET_INTEGER,
3061         .arg1_type      = ARG_PTR_TO_CTX,
3062 };
3063
3064 BPF_CALL_1(bpf_get_route_realm, const struct sk_buff *, skb)
3065 {
3066         return dst_tclassid(skb);
3067 }
3068
3069 static const struct bpf_func_proto bpf_get_route_realm_proto = {
3070         .func           = bpf_get_route_realm,
3071         .gpl_only       = false,
3072         .ret_type       = RET_INTEGER,
3073         .arg1_type      = ARG_PTR_TO_CTX,
3074 };
3075
3076 BPF_CALL_1(bpf_get_hash_recalc, struct sk_buff *, skb)
3077 {
3078         /* If skb_clear_hash() was called due to mangling, we can
3079          * trigger SW recalculation here. Later access to hash
3080          * can then use the inline skb->hash via context directly
3081          * instead of calling this helper again.
3082          */
3083         return skb_get_hash(skb);
3084 }
3085
3086 static const struct bpf_func_proto bpf_get_hash_recalc_proto = {
3087         .func           = bpf_get_hash_recalc,
3088         .gpl_only       = false,
3089         .ret_type       = RET_INTEGER,
3090         .arg1_type      = ARG_PTR_TO_CTX,
3091 };
3092
3093 BPF_CALL_1(bpf_set_hash_invalid, struct sk_buff *, skb)
3094 {
3095         /* After all direct packet write, this can be used once for
3096          * triggering a lazy recalc on next skb_get_hash() invocation.
3097          */
3098         skb_clear_hash(skb);
3099         return 0;
3100 }
3101
3102 static const struct bpf_func_proto bpf_set_hash_invalid_proto = {
3103         .func           = bpf_set_hash_invalid,
3104         .gpl_only       = false,
3105         .ret_type       = RET_INTEGER,
3106         .arg1_type      = ARG_PTR_TO_CTX,
3107 };
3108
3109 BPF_CALL_2(bpf_set_hash, struct sk_buff *, skb, u32, hash)
3110 {
3111         /* Set user specified hash as L4(+), so that it gets returned
3112          * on skb_get_hash() call unless BPF prog later on triggers a
3113          * skb_clear_hash().
3114          */
3115         __skb_set_sw_hash(skb, hash, true);
3116         return 0;
3117 }
3118
3119 static const struct bpf_func_proto bpf_set_hash_proto = {
3120         .func           = bpf_set_hash,
3121         .gpl_only       = false,
3122         .ret_type       = RET_INTEGER,
3123         .arg1_type      = ARG_PTR_TO_CTX,
3124         .arg2_type      = ARG_ANYTHING,
3125 };
3126
3127 BPF_CALL_3(bpf_skb_vlan_push, struct sk_buff *, skb, __be16, vlan_proto,
3128            u16, vlan_tci)
3129 {
3130         int ret;
3131
3132         if (unlikely(vlan_proto != htons(ETH_P_8021Q) &&
3133                      vlan_proto != htons(ETH_P_8021AD)))
3134                 vlan_proto = htons(ETH_P_8021Q);
3135
3136         bpf_push_mac_rcsum(skb);
3137         ret = skb_vlan_push(skb, vlan_proto, vlan_tci);
3138         bpf_pull_mac_rcsum(skb);
3139
3140         bpf_compute_data_pointers(skb);
3141         return ret;
3142 }
3143
3144 static const struct bpf_func_proto bpf_skb_vlan_push_proto = {
3145         .func           = bpf_skb_vlan_push,
3146         .gpl_only       = false,
3147         .ret_type       = RET_INTEGER,
3148         .arg1_type      = ARG_PTR_TO_CTX,
3149         .arg2_type      = ARG_ANYTHING,
3150         .arg3_type      = ARG_ANYTHING,
3151 };
3152
3153 BPF_CALL_1(bpf_skb_vlan_pop, struct sk_buff *, skb)
3154 {
3155         int ret;
3156
3157         bpf_push_mac_rcsum(skb);
3158         ret = skb_vlan_pop(skb);
3159         bpf_pull_mac_rcsum(skb);
3160
3161         bpf_compute_data_pointers(skb);
3162         return ret;
3163 }
3164
3165 static const struct bpf_func_proto bpf_skb_vlan_pop_proto = {
3166         .func           = bpf_skb_vlan_pop,
3167         .gpl_only       = false,
3168         .ret_type       = RET_INTEGER,
3169         .arg1_type      = ARG_PTR_TO_CTX,
3170 };
3171
3172 static int bpf_skb_generic_push(struct sk_buff *skb, u32 off, u32 len)
3173 {
3174         /* Caller already did skb_cow() with len as headroom,
3175          * so no need to do it here.
3176          */
3177         skb_push(skb, len);
3178         memmove(skb->data, skb->data + len, off);
3179         memset(skb->data + off, 0, len);
3180
3181         /* No skb_postpush_rcsum(skb, skb->data + off, len)
3182          * needed here as it does not change the skb->csum
3183          * result for checksum complete when summing over
3184          * zeroed blocks.
3185          */
3186         return 0;
3187 }
3188
3189 static int bpf_skb_generic_pop(struct sk_buff *skb, u32 off, u32 len)
3190 {
3191         /* skb_ensure_writable() is not needed here, as we're
3192          * already working on an uncloned skb.
3193          */
3194         if (unlikely(!pskb_may_pull(skb, off + len)))
3195                 return -ENOMEM;
3196
3197         skb_postpull_rcsum(skb, skb->data + off, len);
3198         memmove(skb->data + len, skb->data, off);
3199         __skb_pull(skb, len);
3200
3201         return 0;
3202 }
3203
3204 static int bpf_skb_net_hdr_push(struct sk_buff *skb, u32 off, u32 len)
3205 {
3206         bool trans_same = skb->transport_header == skb->network_header;
3207         int ret;
3208
3209         /* There's no need for __skb_push()/__skb_pull() pair to
3210          * get to the start of the mac header as we're guaranteed
3211          * to always start from here under eBPF.
3212          */
3213         ret = bpf_skb_generic_push(skb, off, len);
3214         if (likely(!ret)) {
3215                 skb->mac_header -= len;
3216                 skb->network_header -= len;
3217                 if (trans_same)
3218                         skb->transport_header = skb->network_header;
3219         }
3220
3221         return ret;
3222 }
3223
3224 static int bpf_skb_net_hdr_pop(struct sk_buff *skb, u32 off, u32 len)
3225 {
3226         bool trans_same = skb->transport_header == skb->network_header;
3227         int ret;
3228
3229         /* Same here, __skb_push()/__skb_pull() pair not needed. */
3230         ret = bpf_skb_generic_pop(skb, off, len);
3231         if (likely(!ret)) {
3232                 skb->mac_header += len;
3233                 skb->network_header += len;
3234                 if (trans_same)
3235                         skb->transport_header = skb->network_header;
3236         }
3237
3238         return ret;
3239 }
3240
3241 static int bpf_skb_proto_4_to_6(struct sk_buff *skb)
3242 {
3243         const u32 len_diff = sizeof(struct ipv6hdr) - sizeof(struct iphdr);
3244         u32 off = skb_mac_header_len(skb);
3245         int ret;
3246
3247         if (skb_is_gso(skb) && !skb_is_gso_tcp(skb))
3248                 return -ENOTSUPP;
3249
3250         ret = skb_cow(skb, len_diff);
3251         if (unlikely(ret < 0))
3252                 return ret;
3253
3254         ret = bpf_skb_net_hdr_push(skb, off, len_diff);
3255         if (unlikely(ret < 0))
3256                 return ret;
3257
3258         if (skb_is_gso(skb)) {
3259                 struct skb_shared_info *shinfo = skb_shinfo(skb);
3260
3261                 /* SKB_GSO_TCPV4 needs to be changed into
3262                  * SKB_GSO_TCPV6.
3263                  */
3264                 if (shinfo->gso_type & SKB_GSO_TCPV4) {
3265                         shinfo->gso_type &= ~SKB_GSO_TCPV4;
3266                         shinfo->gso_type |=  SKB_GSO_TCPV6;
3267                 }
3268
3269                 /* Due to IPv6 header, MSS needs to be downgraded. */
3270                 skb_decrease_gso_size(shinfo, len_diff);
3271                 /* Header must be checked, and gso_segs recomputed. */
3272                 shinfo->gso_type |= SKB_GSO_DODGY;
3273                 shinfo->gso_segs = 0;
3274         }
3275
3276         skb->protocol = htons(ETH_P_IPV6);
3277         skb_clear_hash(skb);
3278
3279         return 0;
3280 }
3281
3282 static int bpf_skb_proto_6_to_4(struct sk_buff *skb)
3283 {
3284         const u32 len_diff = sizeof(struct ipv6hdr) - sizeof(struct iphdr);
3285         u32 off = skb_mac_header_len(skb);
3286         int ret;
3287
3288         if (skb_is_gso(skb) && !skb_is_gso_tcp(skb))
3289                 return -ENOTSUPP;
3290
3291         ret = skb_unclone(skb, GFP_ATOMIC);
3292         if (unlikely(ret < 0))
3293                 return ret;
3294
3295         ret = bpf_skb_net_hdr_pop(skb, off, len_diff);
3296         if (unlikely(ret < 0))
3297                 return ret;
3298
3299         if (skb_is_gso(skb)) {
3300                 struct skb_shared_info *shinfo = skb_shinfo(skb);
3301
3302                 /* SKB_GSO_TCPV6 needs to be changed into
3303                  * SKB_GSO_TCPV4.
3304                  */
3305                 if (shinfo->gso_type & SKB_GSO_TCPV6) {
3306                         shinfo->gso_type &= ~SKB_GSO_TCPV6;
3307                         shinfo->gso_type |=  SKB_GSO_TCPV4;
3308                 }
3309
3310                 /* Due to IPv4 header, MSS can be upgraded. */
3311                 skb_increase_gso_size(shinfo, len_diff);
3312                 /* Header must be checked, and gso_segs recomputed. */
3313                 shinfo->gso_type |= SKB_GSO_DODGY;
3314                 shinfo->gso_segs = 0;
3315         }
3316
3317         skb->protocol = htons(ETH_P_IP);
3318         skb_clear_hash(skb);
3319
3320         return 0;
3321 }
3322
3323 static int bpf_skb_proto_xlat(struct sk_buff *skb, __be16 to_proto)
3324 {
3325         __be16 from_proto = skb->protocol;
3326
3327         if (from_proto == htons(ETH_P_IP) &&
3328               to_proto == htons(ETH_P_IPV6))
3329                 return bpf_skb_proto_4_to_6(skb);
3330
3331         if (from_proto == htons(ETH_P_IPV6) &&
3332               to_proto == htons(ETH_P_IP))
3333                 return bpf_skb_proto_6_to_4(skb);
3334
3335         return -ENOTSUPP;
3336 }
3337
3338 BPF_CALL_3(bpf_skb_change_proto, struct sk_buff *, skb, __be16, proto,
3339            u64, flags)
3340 {
3341         int ret;
3342
3343         if (unlikely(flags))
3344                 return -EINVAL;
3345
3346         /* General idea is that this helper does the basic groundwork
3347          * needed for changing the protocol, and eBPF program fills the
3348          * rest through bpf_skb_store_bytes(), bpf_lX_csum_replace()
3349          * and other helpers, rather than passing a raw buffer here.
3350          *
3351          * The rationale is to keep this minimal and without a need to
3352          * deal with raw packet data. F.e. even if we would pass buffers
3353          * here, the program still needs to call the bpf_lX_csum_replace()
3354          * helpers anyway. Plus, this way we keep also separation of
3355          * concerns, since f.e. bpf_skb_store_bytes() should only take
3356          * care of stores.
3357          *
3358          * Currently, additional options and extension header space are
3359          * not supported, but flags register is reserved so we can adapt
3360          * that. For offloads, we mark packet as dodgy, so that headers
3361          * need to be verified first.
3362          */
3363         ret = bpf_skb_proto_xlat(skb, proto);
3364         bpf_compute_data_pointers(skb);
3365         return ret;
3366 }
3367
3368 static const struct bpf_func_proto bpf_skb_change_proto_proto = {
3369         .func           = bpf_skb_change_proto,
3370         .gpl_only       = false,
3371         .ret_type       = RET_INTEGER,
3372         .arg1_type      = ARG_PTR_TO_CTX,
3373         .arg2_type      = ARG_ANYTHING,
3374         .arg3_type      = ARG_ANYTHING,
3375 };
3376
3377 BPF_CALL_2(bpf_skb_change_type, struct sk_buff *, skb, u32, pkt_type)
3378 {
3379         /* We only allow a restricted subset to be changed for now. */
3380         if (unlikely(!skb_pkt_type_ok(skb->pkt_type) ||
3381                      !skb_pkt_type_ok(pkt_type)))
3382                 return -EINVAL;
3383
3384         skb->pkt_type = pkt_type;
3385         return 0;
3386 }
3387
3388 static const struct bpf_func_proto bpf_skb_change_type_proto = {
3389         .func           = bpf_skb_change_type,
3390         .gpl_only       = false,
3391         .ret_type       = RET_INTEGER,
3392         .arg1_type      = ARG_PTR_TO_CTX,
3393         .arg2_type      = ARG_ANYTHING,
3394 };
3395
3396 static u32 bpf_skb_net_base_len(const struct sk_buff *skb)
3397 {
3398         switch (skb->protocol) {
3399         case htons(ETH_P_IP):
3400                 return sizeof(struct iphdr);
3401         case htons(ETH_P_IPV6):
3402                 return sizeof(struct ipv6hdr);
3403         default:
3404                 return ~0U;
3405         }
3406 }
3407
3408 #define BPF_F_ADJ_ROOM_ENCAP_L3_MASK    (BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 | \
3409                                          BPF_F_ADJ_ROOM_ENCAP_L3_IPV6)
3410
3411 #define BPF_F_ADJ_ROOM_MASK             (BPF_F_ADJ_ROOM_FIXED_GSO | \
3412                                          BPF_F_ADJ_ROOM_ENCAP_L3_MASK | \
3413                                          BPF_F_ADJ_ROOM_ENCAP_L4_GRE | \
3414                                          BPF_F_ADJ_ROOM_ENCAP_L4_UDP | \
3415                                          BPF_F_ADJ_ROOM_ENCAP_L2( \
3416                                           BPF_ADJ_ROOM_ENCAP_L2_MASK))
3417
3418 static int bpf_skb_net_grow(struct sk_buff *skb, u32 off, u32 len_diff,
3419                             u64 flags)
3420 {
3421         u8 inner_mac_len = flags >> BPF_ADJ_ROOM_ENCAP_L2_SHIFT;
3422         bool encap = flags & BPF_F_ADJ_ROOM_ENCAP_L3_MASK;
3423         u16 mac_len = 0, inner_net = 0, inner_trans = 0;
3424         unsigned int gso_type = SKB_GSO_DODGY;
3425         int ret;
3426
3427         if (skb_is_gso(skb) && !skb_is_gso_tcp(skb)) {
3428                 /* udp gso_size delineates datagrams, only allow if fixed */
3429                 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ||
3430                     !(flags & BPF_F_ADJ_ROOM_FIXED_GSO))
3431                         return -ENOTSUPP;
3432         }
3433
3434         ret = skb_cow_head(skb, len_diff);
3435         if (unlikely(ret < 0))
3436                 return ret;
3437
3438         if (encap) {
3439                 if (skb->protocol != htons(ETH_P_IP) &&
3440                     skb->protocol != htons(ETH_P_IPV6))
3441                         return -ENOTSUPP;
3442
3443                 if (flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 &&
3444                     flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV6)
3445                         return -EINVAL;
3446
3447                 if (flags & BPF_F_ADJ_ROOM_ENCAP_L4_GRE &&
3448                     flags & BPF_F_ADJ_ROOM_ENCAP_L4_UDP)
3449                         return -EINVAL;
3450
3451                 if (skb->encapsulation)
3452                         return -EALREADY;
3453
3454                 mac_len = skb->network_header - skb->mac_header;
3455                 inner_net = skb->network_header;
3456                 if (inner_mac_len > len_diff)
3457                         return -EINVAL;
3458                 inner_trans = skb->transport_header;
3459         }
3460
3461         ret = bpf_skb_net_hdr_push(skb, off, len_diff);
3462         if (unlikely(ret < 0))
3463                 return ret;
3464
3465         if (encap) {
3466                 skb->inner_mac_header = inner_net - inner_mac_len;
3467                 skb->inner_network_header = inner_net;
3468                 skb->inner_transport_header = inner_trans;
3469                 skb_set_inner_protocol(skb, skb->protocol);
3470
3471                 skb->encapsulation = 1;
3472                 skb_set_network_header(skb, mac_len);
3473
3474                 if (flags & BPF_F_ADJ_ROOM_ENCAP_L4_UDP)
3475                         gso_type |= SKB_GSO_UDP_TUNNEL;
3476                 else if (flags & BPF_F_ADJ_ROOM_ENCAP_L4_GRE)
3477                         gso_type |= SKB_GSO_GRE;
3478                 else if (flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV6)
3479                         gso_type |= SKB_GSO_IPXIP6;
3480                 else if (flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV4)
3481                         gso_type |= SKB_GSO_IPXIP4;
3482
3483                 if (flags & BPF_F_ADJ_ROOM_ENCAP_L4_GRE ||
3484                     flags & BPF_F_ADJ_ROOM_ENCAP_L4_UDP) {
3485                         int nh_len = flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 ?
3486                                         sizeof(struct ipv6hdr) :
3487                                         sizeof(struct iphdr);
3488
3489                         skb_set_transport_header(skb, mac_len + nh_len);
3490                 }
3491
3492                 /* Match skb->protocol to new outer l3 protocol */
3493                 if (skb->protocol == htons(ETH_P_IP) &&
3494                     flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV6)
3495                         skb->protocol = htons(ETH_P_IPV6);
3496                 else if (skb->protocol == htons(ETH_P_IPV6) &&
3497                          flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV4)
3498                         skb->protocol = htons(ETH_P_IP);
3499         }
3500
3501         if (skb_is_gso(skb)) {
3502                 struct skb_shared_info *shinfo = skb_shinfo(skb);
3503
3504                 /* Due to header grow, MSS needs to be downgraded. */
3505                 if (!(flags & BPF_F_ADJ_ROOM_FIXED_GSO))
3506                         skb_decrease_gso_size(shinfo, len_diff);
3507
3508                 /* Header must be checked, and gso_segs recomputed. */
3509                 shinfo->gso_type |= gso_type;
3510                 shinfo->gso_segs = 0;
3511         }
3512
3513         return 0;
3514 }
3515
3516 static int bpf_skb_net_shrink(struct sk_buff *skb, u32 off, u32 len_diff,
3517                               u64 flags)
3518 {
3519         int ret;
3520
3521         if (unlikely(flags & ~(BPF_F_ADJ_ROOM_FIXED_GSO |
3522                                BPF_F_ADJ_ROOM_NO_CSUM_RESET)))
3523                 return -EINVAL;
3524
3525         if (skb_is_gso(skb) && !skb_is_gso_tcp(skb)) {
3526                 /* udp gso_size delineates datagrams, only allow if fixed */
3527                 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ||
3528                     !(flags & BPF_F_ADJ_ROOM_FIXED_GSO))
3529                         return -ENOTSUPP;
3530         }
3531
3532         ret = skb_unclone(skb, GFP_ATOMIC);
3533         if (unlikely(ret < 0))
3534                 return ret;
3535
3536         ret = bpf_skb_net_hdr_pop(skb, off, len_diff);
3537         if (unlikely(ret < 0))
3538                 return ret;
3539
3540         if (skb_is_gso(skb)) {
3541                 struct skb_shared_info *shinfo = skb_shinfo(skb);
3542
3543                 /* Due to header shrink, MSS can be upgraded. */
3544                 if (!(flags & BPF_F_ADJ_ROOM_FIXED_GSO))
3545                         skb_increase_gso_size(shinfo, len_diff);
3546
3547                 /* Header must be checked, and gso_segs recomputed. */
3548                 shinfo->gso_type |= SKB_GSO_DODGY;
3549                 shinfo->gso_segs = 0;
3550         }
3551
3552         return 0;
3553 }
3554
3555 static u32 __bpf_skb_max_len(const struct sk_buff *skb)
3556 {
3557         return skb->dev ? skb->dev->mtu + skb->dev->hard_header_len :
3558                           SKB_MAX_ALLOC;
3559 }
3560
3561 BPF_CALL_4(sk_skb_adjust_room, struct sk_buff *, skb, s32, len_diff,
3562            u32, mode, u64, flags)
3563 {
3564         u32 len_diff_abs = abs(len_diff);
3565         bool shrink = len_diff < 0;
3566         int ret = 0;
3567
3568         if (unlikely(flags || mode))
3569                 return -EINVAL;
3570         if (unlikely(len_diff_abs > 0xfffU))
3571                 return -EFAULT;
3572
3573         if (!shrink) {
3574                 ret = skb_cow(skb, len_diff);
3575                 if (unlikely(ret < 0))
3576                         return ret;
3577                 __skb_push(skb, len_diff_abs);
3578                 memset(skb->data, 0, len_diff_abs);
3579         } else {
3580                 if (unlikely(!pskb_may_pull(skb, len_diff_abs)))
3581                         return -ENOMEM;
3582                 __skb_pull(skb, len_diff_abs);
3583         }
3584         bpf_compute_data_end_sk_skb(skb);
3585         if (tls_sw_has_ctx_rx(skb->sk)) {
3586                 struct strp_msg *rxm = strp_msg(skb);
3587
3588                 rxm->full_len += len_diff;
3589         }
3590         return ret;
3591 }
3592
3593 static const struct bpf_func_proto sk_skb_adjust_room_proto = {
3594         .func           = sk_skb_adjust_room,
3595         .gpl_only       = false,
3596         .ret_type       = RET_INTEGER,
3597         .arg1_type      = ARG_PTR_TO_CTX,
3598         .arg2_type      = ARG_ANYTHING,
3599         .arg3_type      = ARG_ANYTHING,
3600         .arg4_type      = ARG_ANYTHING,
3601 };
3602
3603 BPF_CALL_4(bpf_skb_adjust_room, struct sk_buff *, skb, s32, len_diff,
3604            u32, mode, u64, flags)
3605 {
3606         u32 len_cur, len_diff_abs = abs(len_diff);
3607         u32 len_min = bpf_skb_net_base_len(skb);
3608         u32 len_max = __bpf_skb_max_len(skb);
3609         __be16 proto = skb->protocol;
3610         bool shrink = len_diff < 0;
3611         u32 off;
3612         int ret;
3613
3614         if (unlikely(flags & ~(BPF_F_ADJ_ROOM_MASK |
3615                                BPF_F_ADJ_ROOM_NO_CSUM_RESET)))
3616                 return -EINVAL;
3617         if (unlikely(len_diff_abs > 0xfffU))
3618                 return -EFAULT;
3619         if (unlikely(proto != htons(ETH_P_IP) &&
3620                      proto != htons(ETH_P_IPV6)))
3621                 return -ENOTSUPP;
3622
3623         off = skb_mac_header_len(skb);
3624         switch (mode) {
3625         case BPF_ADJ_ROOM_NET:
3626                 off += bpf_skb_net_base_len(skb);
3627                 break;
3628         case BPF_ADJ_ROOM_MAC:
3629                 break;
3630         default:
3631                 return -ENOTSUPP;
3632         }
3633
3634         len_cur = skb->len - skb_network_offset(skb);
3635         if ((shrink && (len_diff_abs >= len_cur ||
3636                         len_cur - len_diff_abs < len_min)) ||
3637             (!shrink && (skb->len + len_diff_abs > len_max &&
3638                          !skb_is_gso(skb))))
3639                 return -ENOTSUPP;
3640
3641         ret = shrink ? bpf_skb_net_shrink(skb, off, len_diff_abs, flags) :
3642                        bpf_skb_net_grow(skb, off, len_diff_abs, flags);
3643         if (!ret && !(flags & BPF_F_ADJ_ROOM_NO_CSUM_RESET))
3644                 __skb_reset_checksum_unnecessary(skb);
3645
3646         bpf_compute_data_pointers(skb);
3647         return ret;
3648 }
3649
3650 static const struct bpf_func_proto bpf_skb_adjust_room_proto = {
3651         .func           = bpf_skb_adjust_room,
3652         .gpl_only       = false,
3653         .ret_type       = RET_INTEGER,
3654         .arg1_type      = ARG_PTR_TO_CTX,
3655         .arg2_type      = ARG_ANYTHING,
3656         .arg3_type      = ARG_ANYTHING,
3657         .arg4_type      = ARG_ANYTHING,
3658 };
3659
3660 static u32 __bpf_skb_min_len(const struct sk_buff *skb)
3661 {
3662         u32 min_len = skb_network_offset(skb);
3663
3664         if (skb_transport_header_was_set(skb))
3665                 min_len = skb_transport_offset(skb);
3666         if (skb->ip_summed == CHECKSUM_PARTIAL)
3667                 min_len = skb_checksum_start_offset(skb) +
3668                           skb->csum_offset + sizeof(__sum16);
3669         return min_len;
3670 }
3671
3672 static int bpf_skb_grow_rcsum(struct sk_buff *skb, unsigned int new_len)
3673 {
3674         unsigned int old_len = skb->len;
3675         int ret;
3676
3677         ret = __skb_grow_rcsum(skb, new_len);
3678         if (!ret)
3679                 memset(skb->data + old_len, 0, new_len - old_len);
3680         return ret;
3681 }
3682
3683 static int bpf_skb_trim_rcsum(struct sk_buff *skb, unsigned int new_len)
3684 {
3685         return __skb_trim_rcsum(skb, new_len);
3686 }
3687
3688 static inline int __bpf_skb_change_tail(struct sk_buff *skb, u32 new_len,
3689                                         u64 flags)
3690 {
3691         u32 max_len = __bpf_skb_max_len(skb);
3692         u32 min_len = __bpf_skb_min_len(skb);
3693         int ret;
3694
3695         if (unlikely(flags || new_len > max_len || new_len < min_len))
3696                 return -EINVAL;
3697         if (skb->encapsulation)
3698                 return -ENOTSUPP;
3699
3700         /* The basic idea of this helper is that it's performing the
3701          * needed work to either grow or trim an skb, and eBPF program
3702          * rewrites the rest via helpers like bpf_skb_store_bytes(),
3703          * bpf_lX_csum_replace() and others rather than passing a raw
3704          * buffer here. This one is a slow path helper and intended
3705          * for replies with control messages.
3706          *
3707          * Like in bpf_skb_change_proto(), we want to keep this rather
3708          * minimal and without protocol specifics so that we are able
3709          * to separate concerns as in bpf_skb_store_bytes() should only
3710          * be the one responsible for writing buffers.
3711          *
3712          * It's really expected to be a slow path operation here for
3713          * control message replies, so we're implicitly linearizing,
3714          * uncloning and drop offloads from the skb by this.
3715          */
3716         ret = __bpf_try_make_writable(skb, skb->len);
3717         if (!ret) {
3718                 if (new_len > skb->len)
3719                         ret = bpf_skb_grow_rcsum(skb, new_len);
3720                 else if (new_len < skb->len)
3721                         ret = bpf_skb_trim_rcsum(skb, new_len);
3722                 if (!ret && skb_is_gso(skb))
3723                         skb_gso_reset(skb);
3724         }
3725         return ret;
3726 }
3727
3728 BPF_CALL_3(bpf_skb_change_tail, struct sk_buff *, skb, u32, new_len,
3729            u64, flags)
3730 {
3731         int ret = __bpf_skb_change_tail(skb, new_len, flags);
3732
3733         bpf_compute_data_pointers(skb);
3734         return ret;
3735 }
3736
3737 static const struct bpf_func_proto bpf_skb_change_tail_proto = {
3738         .func           = bpf_skb_change_tail,
3739         .gpl_only       = false,
3740         .ret_type       = RET_INTEGER,
3741         .arg1_type      = ARG_PTR_TO_CTX,
3742         .arg2_type      = ARG_ANYTHING,
3743         .arg3_type      = ARG_ANYTHING,
3744 };
3745
3746 BPF_CALL_3(sk_skb_change_tail, struct sk_buff *, skb, u32, new_len,
3747            u64, flags)
3748 {
3749         int ret = __bpf_skb_change_tail(skb, new_len, flags);
3750
3751         bpf_compute_data_end_sk_skb(skb);
3752         return ret;
3753 }
3754
3755 static const struct bpf_func_proto sk_skb_change_tail_proto = {
3756         .func           = sk_skb_change_tail,
3757         .gpl_only       = false,
3758         .ret_type       = RET_INTEGER,
3759         .arg1_type      = ARG_PTR_TO_CTX,
3760         .arg2_type      = ARG_ANYTHING,
3761         .arg3_type      = ARG_ANYTHING,
3762 };
3763
3764 static inline int __bpf_skb_change_head(struct sk_buff *skb, u32 head_room,
3765                                         u64 flags)
3766 {
3767         u32 max_len = __bpf_skb_max_len(skb);
3768         u32 new_len = skb->len + head_room;
3769         int ret;
3770
3771         if (unlikely(flags || (!skb_is_gso(skb) && new_len > max_len) ||
3772                      new_len < skb->len))
3773                 return -EINVAL;
3774
3775         ret = skb_cow(skb, head_room);
3776         if (likely(!ret)) {
3777                 /* Idea for this helper is that we currently only
3778                  * allow to expand on mac header. This means that
3779                  * skb->protocol network header, etc, stay as is.
3780                  * Compared to bpf_skb_change_tail(), we're more
3781                  * flexible due to not needing to linearize or
3782                  * reset GSO. Intention for this helper is to be
3783                  * used by an L3 skb that needs to push mac header
3784                  * for redirection into L2 device.
3785                  */
3786                 __skb_push(skb, head_room);
3787                 memset(skb->data, 0, head_room);
3788                 skb_reset_mac_header(skb);
3789         }
3790
3791         return ret;
3792 }
3793
3794 BPF_CALL_3(bpf_skb_change_head, struct sk_buff *, skb, u32, head_room,
3795            u64, flags)
3796 {
3797         int ret = __bpf_skb_change_head(skb, head_room, flags);
3798
3799         bpf_compute_data_pointers(skb);
3800         return ret;
3801 }
3802
3803 static const struct bpf_func_proto bpf_skb_change_head_proto = {
3804         .func           = bpf_skb_change_head,
3805         .gpl_only       = false,
3806         .ret_type       = RET_INTEGER,
3807         .arg1_type      = ARG_PTR_TO_CTX,
3808         .arg2_type      = ARG_ANYTHING,
3809         .arg3_type      = ARG_ANYTHING,
3810 };
3811
3812 BPF_CALL_3(sk_skb_change_head, struct sk_buff *, skb, u32, head_room,
3813            u64, flags)
3814 {
3815         int ret = __bpf_skb_change_head(skb, head_room, flags);
3816
3817         bpf_compute_data_end_sk_skb(skb);
3818         return ret;
3819 }
3820
3821 static const struct bpf_func_proto sk_skb_change_head_proto = {
3822         .func           = sk_skb_change_head,
3823         .gpl_only       = false,
3824         .ret_type       = RET_INTEGER,
3825         .arg1_type      = ARG_PTR_TO_CTX,
3826         .arg2_type      = ARG_ANYTHING,
3827         .arg3_type      = ARG_ANYTHING,
3828 };
3829 static unsigned long xdp_get_metalen(const struct xdp_buff *xdp)
3830 {
3831         return xdp_data_meta_unsupported(xdp) ? 0 :
3832                xdp->data - xdp->data_meta;
3833 }
3834
3835 BPF_CALL_2(bpf_xdp_adjust_head, struct xdp_buff *, xdp, int, offset)
3836 {
3837         void *xdp_frame_end = xdp->data_hard_start + sizeof(struct xdp_frame);
3838         unsigned long metalen = xdp_get_metalen(xdp);
3839         void *data_start = xdp_frame_end + metalen;
3840         void *data = xdp->data + offset;
3841
3842         if (unlikely(data < data_start ||
3843                      data > xdp->data_end - ETH_HLEN))
3844                 return -EINVAL;
3845
3846         if (metalen)
3847                 memmove(xdp->data_meta + offset,
3848                         xdp->data_meta, metalen);
3849         xdp->data_meta += offset;
3850         xdp->data = data;
3851
3852         return 0;
3853 }
3854
3855 static const struct bpf_func_proto bpf_xdp_adjust_head_proto = {
3856         .func           = bpf_xdp_adjust_head,
3857         .gpl_only       = false,
3858         .ret_type       = RET_INTEGER,
3859         .arg1_type      = ARG_PTR_TO_CTX,
3860         .arg2_type      = ARG_ANYTHING,
3861 };
3862
3863 BPF_CALL_2(bpf_xdp_adjust_tail, struct xdp_buff *, xdp, int, offset)
3864 {
3865         void *data_hard_end = xdp_data_hard_end(xdp); /* use xdp->frame_sz */
3866         void *data_end = xdp->data_end + offset;
3867
3868         /* Notice that xdp_data_hard_end have reserved some tailroom */
3869         if (unlikely(data_end > data_hard_end))
3870                 return -EINVAL;
3871
3872         /* ALL drivers MUST init xdp->frame_sz, chicken check below */
3873         if (unlikely(xdp->frame_sz > PAGE_SIZE)) {
3874                 WARN_ONCE(1, "Too BIG xdp->frame_sz = %d\n", xdp->frame_sz);
3875                 return -EINVAL;
3876         }
3877
3878         if (unlikely(data_end < xdp->data + ETH_HLEN))
3879                 return -EINVAL;
3880
3881         /* Clear memory area on grow, can contain uninit kernel memory */
3882         if (offset > 0)
3883                 memset(xdp->data_end, 0, offset);
3884
3885         xdp->data_end = data_end;
3886
3887         return 0;
3888 }
3889
3890 static const struct bpf_func_proto bpf_xdp_adjust_tail_proto = {
3891         .func           = bpf_xdp_adjust_tail,
3892         .gpl_only       = false,
3893         .ret_type       = RET_INTEGER,
3894         .arg1_type      = ARG_PTR_TO_CTX,
3895         .arg2_type      = ARG_ANYTHING,
3896 };
3897
3898 BPF_CALL_2(bpf_xdp_adjust_meta, struct xdp_buff *, xdp, int, offset)
3899 {
3900         void *xdp_frame_end = xdp->data_hard_start + sizeof(struct xdp_frame);
3901         void *meta = xdp->data_meta + offset;
3902         unsigned long metalen = xdp->data - meta;
3903
3904         if (xdp_data_meta_unsupported(xdp))
3905                 return -ENOTSUPP;
3906         if (unlikely(meta < xdp_frame_end ||
3907                      meta > xdp->data))
3908                 return -EINVAL;
3909         if (unlikely((metalen & (sizeof(__u32) - 1)) ||
3910                      (metalen > 32)))
3911                 return -EACCES;
3912
3913         xdp->data_meta = meta;
3914
3915         return 0;
3916 }
3917
3918 static const struct bpf_func_proto bpf_xdp_adjust_meta_proto = {
3919         .func           = bpf_xdp_adjust_meta,
3920         .gpl_only       = false,
3921         .ret_type       = RET_INTEGER,
3922         .arg1_type      = ARG_PTR_TO_CTX,
3923         .arg2_type      = ARG_ANYTHING,
3924 };
3925
3926 static int __bpf_tx_xdp_map(struct net_device *dev_rx, void *fwd,
3927                             struct bpf_map *map, struct xdp_buff *xdp)
3928 {
3929         switch (map->map_type) {
3930         case BPF_MAP_TYPE_DEVMAP:
3931         case BPF_MAP_TYPE_DEVMAP_HASH:
3932                 return dev_map_enqueue(fwd, xdp, dev_rx);
3933         case BPF_MAP_TYPE_CPUMAP:
3934                 return cpu_map_enqueue(fwd, xdp, dev_rx);
3935         case BPF_MAP_TYPE_XSKMAP:
3936                 return __xsk_map_redirect(fwd, xdp);
3937         default:
3938                 return -EBADRQC;
3939         }
3940         return 0;
3941 }
3942
3943 void xdp_do_flush(void)
3944 {
3945         __dev_flush();
3946         __cpu_map_flush();
3947         __xsk_map_flush();
3948 }
3949 EXPORT_SYMBOL_GPL(xdp_do_flush);
3950
3951 static inline void *__xdp_map_lookup_elem(struct bpf_map *map, u32 index)
3952 {
3953         switch (map->map_type) {
3954         case BPF_MAP_TYPE_DEVMAP:
3955                 return __dev_map_lookup_elem(map, index);
3956         case BPF_MAP_TYPE_DEVMAP_HASH:
3957                 return __dev_map_hash_lookup_elem(map, index);
3958         case BPF_MAP_TYPE_CPUMAP:
3959                 return __cpu_map_lookup_elem(map, index);
3960         case BPF_MAP_TYPE_XSKMAP:
3961                 return __xsk_map_lookup_elem(map, index);
3962         default:
3963                 return NULL;
3964         }
3965 }
3966
3967 void bpf_clear_redirect_map(struct bpf_map *map)
3968 {
3969         struct bpf_redirect_info *ri;
3970         int cpu;
3971
3972         for_each_possible_cpu(cpu) {
3973                 ri = per_cpu_ptr(&bpf_redirect_info, cpu);
3974                 /* Avoid polluting remote cacheline due to writes if
3975                  * not needed. Once we pass this test, we need the
3976                  * cmpxchg() to make sure it hasn't been changed in
3977                  * the meantime by remote CPU.
3978                  */
3979                 if (unlikely(READ_ONCE(ri->map) == map))
3980                         cmpxchg(&ri->map, map, NULL);
3981         }
3982 }
3983
3984 int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp,
3985                     struct bpf_prog *xdp_prog)
3986 {
3987         struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
3988         struct bpf_map *map = READ_ONCE(ri->map);
3989         u32 index = ri->tgt_index;
3990         void *fwd = ri->tgt_value;
3991         int err;
3992
3993         ri->tgt_index = 0;
3994         ri->tgt_value = NULL;
3995         WRITE_ONCE(ri->map, NULL);
3996
3997         if (unlikely(!map)) {
3998                 fwd = dev_get_by_index_rcu(dev_net(dev), index);
3999                 if (unlikely(!fwd)) {
4000                         err = -EINVAL;
4001                         goto err;
4002                 }
4003
4004                 err = dev_xdp_enqueue(fwd, xdp, dev);
4005         } else {
4006                 err = __bpf_tx_xdp_map(dev, fwd, map, xdp);
4007         }
4008
4009         if (unlikely(err))
4010                 goto err;
4011
4012         _trace_xdp_redirect_map(dev, xdp_prog, fwd, map, index);
4013         return 0;
4014 err:
4015         _trace_xdp_redirect_map_err(dev, xdp_prog, fwd, map, index, err);
4016         return err;
4017 }
4018 EXPORT_SYMBOL_GPL(xdp_do_redirect);
4019
4020 static int xdp_do_generic_redirect_map(struct net_device *dev,
4021                                        struct sk_buff *skb,
4022                                        struct xdp_buff *xdp,
4023                                        struct bpf_prog *xdp_prog,
4024                                        struct bpf_map *map)
4025 {
4026         struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
4027         u32 index = ri->tgt_index;
4028         void *fwd = ri->tgt_value;
4029         int err = 0;
4030
4031         ri->tgt_index = 0;
4032         ri->tgt_value = NULL;
4033         WRITE_ONCE(ri->map, NULL);
4034
4035         if (map->map_type == BPF_MAP_TYPE_DEVMAP ||
4036             map->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
4037                 struct bpf_dtab_netdev *dst = fwd;
4038
4039                 err = dev_map_generic_redirect(dst, skb, xdp_prog);
4040                 if (unlikely(err))
4041                         goto err;
4042         } else if (map->map_type == BPF_MAP_TYPE_XSKMAP) {
4043                 struct xdp_sock *xs = fwd;
4044
4045                 err = xsk_generic_rcv(xs, xdp);
4046                 if (err)
4047                         goto err;
4048                 consume_skb(skb);
4049         } else {
4050                 /* TODO: Handle BPF_MAP_TYPE_CPUMAP */
4051                 err = -EBADRQC;
4052                 goto err;
4053         }
4054
4055         _trace_xdp_redirect_map(dev, xdp_prog, fwd, map, index);
4056         return 0;
4057 err:
4058         _trace_xdp_redirect_map_err(dev, xdp_prog, fwd, map, index, err);
4059         return err;
4060 }
4061
4062 int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
4063                             struct xdp_buff *xdp, struct bpf_prog *xdp_prog)
4064 {
4065         struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
4066         struct bpf_map *map = READ_ONCE(ri->map);
4067         u32 index = ri->tgt_index;
4068         struct net_device *fwd;
4069         int err = 0;
4070
4071         if (map)
4072                 return xdp_do_generic_redirect_map(dev, skb, xdp, xdp_prog,
4073                                                    map);
4074         ri->tgt_index = 0;
4075         fwd = dev_get_by_index_rcu(dev_net(dev), index);
4076         if (unlikely(!fwd)) {
4077                 err = -EINVAL;
4078                 goto err;
4079         }
4080
4081         err = xdp_ok_fwd_dev(fwd, skb->len);
4082         if (unlikely(err))
4083                 goto err;
4084
4085         skb->dev = fwd;
4086         _trace_xdp_redirect(dev, xdp_prog, index);
4087         generic_xdp_tx(skb, xdp_prog);
4088         return 0;
4089 err:
4090         _trace_xdp_redirect_err(dev, xdp_prog, index, err);
4091         return err;
4092 }
4093
4094 BPF_CALL_2(bpf_xdp_redirect, u32, ifindex, u64, flags)
4095 {
4096         struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
4097
4098         if (unlikely(flags))
4099                 return XDP_ABORTED;
4100
4101         ri->flags = flags;
4102         ri->tgt_index = ifindex;
4103         ri->tgt_value = NULL;
4104         WRITE_ONCE(ri->map, NULL);
4105
4106         return XDP_REDIRECT;
4107 }
4108
4109 static const struct bpf_func_proto bpf_xdp_redirect_proto = {
4110         .func           = bpf_xdp_redirect,
4111         .gpl_only       = false,
4112         .ret_type       = RET_INTEGER,
4113         .arg1_type      = ARG_ANYTHING,
4114         .arg2_type      = ARG_ANYTHING,
4115 };
4116
4117 BPF_CALL_3(bpf_xdp_redirect_map, struct bpf_map *, map, u32, ifindex,
4118            u64, flags)
4119 {
4120         struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
4121
4122         /* Lower bits of the flags are used as return code on lookup failure */
4123         if (unlikely(flags > XDP_TX))
4124                 return XDP_ABORTED;
4125
4126         ri->tgt_value = __xdp_map_lookup_elem(map, ifindex);
4127         if (unlikely(!ri->tgt_value)) {
4128                 /* If the lookup fails we want to clear out the state in the
4129                  * redirect_info struct completely, so that if an eBPF program
4130                  * performs multiple lookups, the last one always takes
4131                  * precedence.
4132                  */
4133                 WRITE_ONCE(ri->map, NULL);
4134                 return flags;
4135         }
4136
4137         ri->flags = flags;
4138         ri->tgt_index = ifindex;
4139         WRITE_ONCE(ri->map, map);
4140
4141         return XDP_REDIRECT;
4142 }
4143
4144 static const struct bpf_func_proto bpf_xdp_redirect_map_proto = {
4145         .func           = bpf_xdp_redirect_map,
4146         .gpl_only       = false,
4147         .ret_type       = RET_INTEGER,
4148         .arg1_type      = ARG_CONST_MAP_PTR,
4149         .arg2_type      = ARG_ANYTHING,
4150         .arg3_type      = ARG_ANYTHING,
4151 };
4152
4153 static unsigned long bpf_skb_copy(void *dst_buff, const void *skb,
4154                                   unsigned long off, unsigned long len)
4155 {
4156         void *ptr = skb_header_pointer(skb, off, len, dst_buff);
4157
4158         if (unlikely(!ptr))
4159                 return len;
4160         if (ptr != dst_buff)
4161                 memcpy(dst_buff, ptr, len);
4162
4163         return 0;
4164 }
4165
4166 BPF_CALL_5(bpf_skb_event_output, struct sk_buff *, skb, struct bpf_map *, map,
4167            u64, flags, void *, meta, u64, meta_size)
4168 {
4169         u64 skb_size = (flags & BPF_F_CTXLEN_MASK) >> 32;
4170
4171         if (unlikely(flags & ~(BPF_F_CTXLEN_MASK | BPF_F_INDEX_MASK)))
4172                 return -EINVAL;
4173         if (unlikely(!skb || skb_size > skb->len))
4174                 return -EFAULT;
4175
4176         return bpf_event_output(map, flags, meta, meta_size, skb, skb_size,
4177                                 bpf_skb_copy);
4178 }
4179
4180 static const struct bpf_func_proto bpf_skb_event_output_proto = {
4181         .func           = bpf_skb_event_output,
4182         .gpl_only       = true,
4183         .ret_type       = RET_INTEGER,
4184         .arg1_type      = ARG_PTR_TO_CTX,
4185         .arg2_type      = ARG_CONST_MAP_PTR,
4186         .arg3_type      = ARG_ANYTHING,
4187         .arg4_type      = ARG_PTR_TO_MEM,
4188         .arg5_type      = ARG_CONST_SIZE_OR_ZERO,
4189 };
4190
4191 BTF_ID_LIST_SINGLE(bpf_skb_output_btf_ids, struct, sk_buff)
4192
4193 const struct bpf_func_proto bpf_skb_output_proto = {
4194         .func           = bpf_skb_event_output,
4195         .gpl_only       = true,
4196         .ret_type       = RET_INTEGER,
4197         .arg1_type      = ARG_PTR_TO_BTF_ID,
4198         .arg1_btf_id    = &bpf_skb_output_btf_ids[0],
4199         .arg2_type      = ARG_CONST_MAP_PTR,
4200         .arg3_type      = ARG_ANYTHING,
4201         .arg4_type      = ARG_PTR_TO_MEM,
4202         .arg5_type      = ARG_CONST_SIZE_OR_ZERO,
4203 };
4204
4205 static unsigned short bpf_tunnel_key_af(u64 flags)
4206 {
4207         return flags & BPF_F_TUNINFO_IPV6 ? AF_INET6 : AF_INET;
4208 }
4209
4210 BPF_CALL_4(bpf_skb_get_tunnel_key, struct sk_buff *, skb, struct bpf_tunnel_key *, to,
4211            u32, size, u64, flags)
4212 {
4213         const struct ip_tunnel_info *info = skb_tunnel_info(skb);
4214         u8 compat[sizeof(struct bpf_tunnel_key)];
4215         void *to_orig = to;
4216         int err;
4217
4218         if (unlikely(!info || (flags & ~(BPF_F_TUNINFO_IPV6)))) {
4219                 err = -EINVAL;
4220                 goto err_clear;
4221         }
4222         if (ip_tunnel_info_af(info) != bpf_tunnel_key_af(flags)) {
4223                 err = -EPROTO;
4224                 goto err_clear;
4225         }
4226         if (unlikely(size != sizeof(struct bpf_tunnel_key))) {
4227                 err = -EINVAL;
4228                 switch (size) {
4229                 case offsetof(struct bpf_tunnel_key, tunnel_label):
4230                 case offsetof(struct bpf_tunnel_key, tunnel_ext):
4231                         goto set_compat;
4232                 case offsetof(struct bpf_tunnel_key, remote_ipv6[1]):
4233                         /* Fixup deprecated structure layouts here, so we have
4234                          * a common path later on.
4235                          */
4236                         if (ip_tunnel_info_af(info) != AF_INET)
4237                                 goto err_clear;
4238 set_compat:
4239                         to = (struct bpf_tunnel_key *)compat;
4240                         break;
4241                 default:
4242                         goto err_clear;
4243                 }
4244         }
4245
4246         to->tunnel_id = be64_to_cpu(info->key.tun_id);
4247         to->tunnel_tos = info->key.tos;
4248         to->tunnel_ttl = info->key.ttl;
4249         to->tunnel_ext = 0;
4250
4251         if (flags & BPF_F_TUNINFO_IPV6) {
4252                 memcpy(to->remote_ipv6, &info->key.u.ipv6.src,
4253                        sizeof(to->remote_ipv6));
4254                 to->tunnel_label = be32_to_cpu(info->key.label);
4255         } else {
4256                 to->remote_ipv4 = be32_to_cpu(info->key.u.ipv4.src);
4257                 memset(&to->remote_ipv6[1], 0, sizeof(__u32) * 3);
4258                 to->tunnel_label = 0;
4259         }
4260
4261         if (unlikely(size != sizeof(struct bpf_tunnel_key)))
4262                 memcpy(to_orig, to, size);
4263
4264         return 0;
4265 err_clear:
4266         memset(to_orig, 0, size);
4267         return err;
4268 }
4269
4270 static const struct bpf_func_proto bpf_skb_get_tunnel_key_proto = {
4271         .func           = bpf_skb_get_tunnel_key,
4272         .gpl_only       = false,
4273         .ret_type       = RET_INTEGER,
4274         .arg1_type      = ARG_PTR_TO_CTX,
4275         .arg2_type      = ARG_PTR_TO_UNINIT_MEM,
4276         .arg3_type      = ARG_CONST_SIZE,
4277         .arg4_type      = ARG_ANYTHING,
4278 };
4279
4280 BPF_CALL_3(bpf_skb_get_tunnel_opt, struct sk_buff *, skb, u8 *, to, u32, size)
4281 {
4282         const struct ip_tunnel_info *info = skb_tunnel_info(skb);
4283         int err;
4284
4285         if (unlikely(!info ||
4286                      !(info->key.tun_flags & TUNNEL_OPTIONS_PRESENT))) {
4287                 err = -ENOENT;
4288                 goto err_clear;
4289         }
4290         if (unlikely(size < info->options_len)) {
4291                 err = -ENOMEM;
4292                 goto err_clear;
4293         }
4294
4295         ip_tunnel_info_opts_get(to, info);
4296         if (size > info->options_len)
4297                 memset(to + info->options_len, 0, size - info->options_len);
4298
4299         return info->options_len;
4300 err_clear:
4301         memset(to, 0, size);
4302         return err;
4303 }
4304
4305 static const struct bpf_func_proto bpf_skb_get_tunnel_opt_proto = {
4306         .func           = bpf_skb_get_tunnel_opt,
4307         .gpl_only       = false,
4308         .ret_type       = RET_INTEGER,
4309         .arg1_type      = ARG_PTR_TO_CTX,
4310         .arg2_type      = ARG_PTR_TO_UNINIT_MEM,
4311         .arg3_type      = ARG_CONST_SIZE,
4312 };
4313
4314 static struct metadata_dst __percpu *md_dst;
4315
4316 BPF_CALL_4(bpf_skb_set_tunnel_key, struct sk_buff *, skb,
4317            const struct bpf_tunnel_key *, from, u32, size, u64, flags)
4318 {
4319         struct metadata_dst *md = this_cpu_ptr(md_dst);
4320         u8 compat[sizeof(struct bpf_tunnel_key)];
4321         struct ip_tunnel_info *info;
4322
4323         if (unlikely(flags & ~(BPF_F_TUNINFO_IPV6 | BPF_F_ZERO_CSUM_TX |
4324                                BPF_F_DONT_FRAGMENT | BPF_F_SEQ_NUMBER)))
4325                 return -EINVAL;
4326         if (unlikely(size != sizeof(struct bpf_tunnel_key))) {
4327                 switch (size) {
4328                 case offsetof(struct bpf_tunnel_key, tunnel_label):
4329                 case offsetof(struct bpf_tunnel_key, tunnel_ext):
4330                 case offsetof(struct bpf_tunnel_key, remote_ipv6[1]):
4331                         /* Fixup deprecated structure layouts here, so we have
4332                          * a common path later on.
4333                          */
4334                         memcpy(compat, from, size);
4335                         memset(compat + size, 0, sizeof(compat) - size);
4336                         from = (const struct bpf_tunnel_key *) compat;
4337                         break;
4338                 default:
4339                         return -EINVAL;
4340                 }
4341         }
4342         if (unlikely((!(flags & BPF_F_TUNINFO_IPV6) && from->tunnel_label) ||
4343                      from->tunnel_ext))
4344                 return -EINVAL;
4345
4346         skb_dst_drop(skb);
4347         dst_hold((struct dst_entry *) md);
4348         skb_dst_set(skb, (struct dst_entry *) md);
4349
4350         info = &md->u.tun_info;
4351         memset(info, 0, sizeof(*info));
4352         info->mode = IP_TUNNEL_INFO_TX;
4353
4354         info->key.tun_flags = TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_NOCACHE;
4355         if (flags & BPF_F_DONT_FRAGMENT)
4356                 info->key.tun_flags |= TUNNEL_DONT_FRAGMENT;
4357         if (flags & BPF_F_ZERO_CSUM_TX)
4358                 info->key.tun_flags &= ~TUNNEL_CSUM;
4359         if (flags & BPF_F_SEQ_NUMBER)
4360                 info->key.tun_flags |= TUNNEL_SEQ;
4361
4362         info->key.tun_id = cpu_to_be64(from->tunnel_id);
4363         info->key.tos = from->tunnel_tos;
4364         info->key.ttl = from->tunnel_ttl;
4365
4366         if (flags & BPF_F_TUNINFO_IPV6) {
4367                 info->mode |= IP_TUNNEL_INFO_IPV6;
4368                 memcpy(&info->key.u.ipv6.dst, from->remote_ipv6,
4369                        sizeof(from->remote_ipv6));
4370                 info->key.label = cpu_to_be32(from->tunnel_label) &
4371                                   IPV6_FLOWLABEL_MASK;
4372         } else {
4373                 info->key.u.ipv4.dst = cpu_to_be32(from->remote_ipv4);
4374         }
4375
4376         return 0;
4377 }
4378
4379 static const struct bpf_func_proto bpf_skb_set_tunnel_key_proto = {
4380         .func           = bpf_skb_set_tunnel_key,
4381         .gpl_only       = false,
4382         .ret_type       = RET_INTEGER,
4383         .arg1_type      = ARG_PTR_TO_CTX,
4384         .arg2_type      = ARG_PTR_TO_MEM,
4385         .arg3_type      = ARG_CONST_SIZE,
4386         .arg4_type      = ARG_ANYTHING,
4387 };
4388
4389 BPF_CALL_3(bpf_skb_set_tunnel_opt, struct sk_buff *, skb,
4390            const u8 *, from, u32, size)
4391 {
4392         struct ip_tunnel_info *info = skb_tunnel_info(skb);
4393         const struct metadata_dst *md = this_cpu_ptr(md_dst);
4394
4395         if (unlikely(info != &md->u.tun_info || (size & (sizeof(u32) - 1))))
4396                 return -EINVAL;
4397         if (unlikely(size > IP_TUNNEL_OPTS_MAX))
4398                 return -ENOMEM;
4399
4400         ip_tunnel_info_opts_set(info, from, size, TUNNEL_OPTIONS_PRESENT);
4401
4402         return 0;
4403 }
4404
4405 static const struct bpf_func_proto bpf_skb_set_tunnel_opt_proto = {
4406         .func           = bpf_skb_set_tunnel_opt,
4407         .gpl_only       = false,
4408         .ret_type       = RET_INTEGER,
4409         .arg1_type      = ARG_PTR_TO_CTX,
4410         .arg2_type      = ARG_PTR_TO_MEM,
4411         .arg3_type      = ARG_CONST_SIZE,
4412 };
4413
4414 static const struct bpf_func_proto *
4415 bpf_get_skb_set_tunnel_proto(enum bpf_func_id which)
4416 {
4417         if (!md_dst) {
4418                 struct metadata_dst __percpu *tmp;
4419
4420                 tmp = metadata_dst_alloc_percpu(IP_TUNNEL_OPTS_MAX,
4421                                                 METADATA_IP_TUNNEL,
4422                                                 GFP_KERNEL);
4423                 if (!tmp)
4424                         return NULL;
4425                 if (cmpxchg(&md_dst, NULL, tmp))
4426                         metadata_dst_free_percpu(tmp);
4427         }
4428
4429         switch (which) {
4430         case BPF_FUNC_skb_set_tunnel_key:
4431                 return &bpf_skb_set_tunnel_key_proto;
4432         case BPF_FUNC_skb_set_tunnel_opt:
4433                 return &bpf_skb_set_tunnel_opt_proto;
4434         default:
4435                 return NULL;
4436         }
4437 }
4438
4439 BPF_CALL_3(bpf_skb_under_cgroup, struct sk_buff *, skb, struct bpf_map *, map,
4440            u32, idx)
4441 {
4442         struct bpf_array *array = container_of(map, struct bpf_array, map);
4443         struct cgroup *cgrp;
4444         struct sock *sk;
4445
4446         sk = skb_to_full_sk(skb);
4447         if (!sk || !sk_fullsock(sk))
4448                 return -ENOENT;
4449         if (unlikely(idx >= array->map.max_entries))
4450                 return -E2BIG;
4451
4452         cgrp = READ_ONCE(array->ptrs[idx]);
4453         if (unlikely(!cgrp))
4454                 return -EAGAIN;
4455
4456         return sk_under_cgroup_hierarchy(sk, cgrp);
4457 }
4458
4459 static const struct bpf_func_proto bpf_skb_under_cgroup_proto = {
4460         .func           = bpf_skb_under_cgroup,
4461         .gpl_only       = false,
4462         .ret_type       = RET_INTEGER,
4463         .arg1_type      = ARG_PTR_TO_CTX,
4464         .arg2_type      = ARG_CONST_MAP_PTR,
4465         .arg3_type      = ARG_ANYTHING,
4466 };
4467
4468 #ifdef CONFIG_SOCK_CGROUP_DATA
4469 static inline u64 __bpf_sk_cgroup_id(struct sock *sk)
4470 {
4471         struct cgroup *cgrp;
4472
4473         sk = sk_to_full_sk(sk);
4474         if (!sk || !sk_fullsock(sk))
4475                 return 0;
4476
4477         cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
4478         return cgroup_id(cgrp);
4479 }
4480
4481 BPF_CALL_1(bpf_skb_cgroup_id, const struct sk_buff *, skb)
4482 {
4483         return __bpf_sk_cgroup_id(skb->sk);
4484 }
4485
4486 static const struct bpf_func_proto bpf_skb_cgroup_id_proto = {
4487         .func           = bpf_skb_cgroup_id,
4488         .gpl_only       = false,
4489         .ret_type       = RET_INTEGER,
4490         .arg1_type      = ARG_PTR_TO_CTX,
4491 };
4492
4493 static inline u64 __bpf_sk_ancestor_cgroup_id(struct sock *sk,
4494                                               int ancestor_level)
4495 {
4496         struct cgroup *ancestor;
4497         struct cgroup *cgrp;
4498
4499         sk = sk_to_full_sk(sk);
4500         if (!sk || !sk_fullsock(sk))
4501                 return 0;
4502
4503         cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
4504         ancestor = cgroup_ancestor(cgrp, ancestor_level);
4505         if (!ancestor)
4506                 return 0;
4507
4508         return cgroup_id(ancestor);
4509 }
4510
4511 BPF_CALL_2(bpf_skb_ancestor_cgroup_id, const struct sk_buff *, skb, int,
4512            ancestor_level)
4513 {
4514         return __bpf_sk_ancestor_cgroup_id(skb->sk, ancestor_level);
4515 }
4516
4517 static const struct bpf_func_proto bpf_skb_ancestor_cgroup_id_proto = {
4518         .func           = bpf_skb_ancestor_cgroup_id,
4519         .gpl_only       = false,
4520         .ret_type       = RET_INTEGER,
4521         .arg1_type      = ARG_PTR_TO_CTX,
4522         .arg2_type      = ARG_ANYTHING,
4523 };
4524
4525 BPF_CALL_1(bpf_sk_cgroup_id, struct sock *, sk)
4526 {
4527         return __bpf_sk_cgroup_id(sk);
4528 }
4529
4530 static const struct bpf_func_proto bpf_sk_cgroup_id_proto = {
4531         .func           = bpf_sk_cgroup_id,
4532         .gpl_only       = false,
4533         .ret_type       = RET_INTEGER,
4534         .arg1_type      = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
4535 };
4536
4537 BPF_CALL_2(bpf_sk_ancestor_cgroup_id, struct sock *, sk, int, ancestor_level)
4538 {
4539         return __bpf_sk_ancestor_cgroup_id(sk, ancestor_level);
4540 }
4541
4542 static const struct bpf_func_proto bpf_sk_ancestor_cgroup_id_proto = {
4543         .func           = bpf_sk_ancestor_cgroup_id,
4544         .gpl_only       = false,
4545         .ret_type       = RET_INTEGER,
4546         .arg1_type      = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
4547         .arg2_type      = ARG_ANYTHING,
4548 };
4549 #endif
4550
4551 static unsigned long bpf_xdp_copy(void *dst_buff, const void *src_buff,
4552                                   unsigned long off, unsigned long len)
4553 {
4554         memcpy(dst_buff, src_buff + off, len);
4555         return 0;
4556 }
4557
4558 BPF_CALL_5(bpf_xdp_event_output, struct xdp_buff *, xdp, struct bpf_map *, map,
4559            u64, flags, void *, meta, u64, meta_size)
4560 {
4561         u64 xdp_size = (flags & BPF_F_CTXLEN_MASK) >> 32;
4562
4563         if (unlikely(flags & ~(BPF_F_CTXLEN_MASK | BPF_F_INDEX_MASK)))
4564                 return -EINVAL;
4565         if (unlikely(!xdp ||
4566                      xdp_size > (unsigned long)(xdp->data_end - xdp->data)))
4567                 return -EFAULT;
4568
4569         return bpf_event_output(map, flags, meta, meta_size, xdp->data,
4570                                 xdp_size, bpf_xdp_copy);
4571 }
4572
4573 static const struct bpf_func_proto bpf_xdp_event_output_proto = {
4574         .func           = bpf_xdp_event_output,
4575         .gpl_only       = true,
4576         .ret_type       = RET_INTEGER,
4577         .arg1_type      = ARG_PTR_TO_CTX,
4578         .arg2_type      = ARG_CONST_MAP_PTR,
4579         .arg3_type      = ARG_ANYTHING,
4580         .arg4_type      = ARG_PTR_TO_MEM,
4581         .arg5_type      = ARG_CONST_SIZE_OR_ZERO,
4582 };
4583
4584 BTF_ID_LIST_SINGLE(bpf_xdp_output_btf_ids, struct, xdp_buff)
4585
4586 const struct bpf_func_proto bpf_xdp_output_proto = {
4587         .func           = bpf_xdp_event_output,
4588         .gpl_only       = true,
4589         .ret_type       = RET_INTEGER,
4590         .arg1_type      = ARG_PTR_TO_BTF_ID,
4591         .arg1_btf_id    = &bpf_xdp_output_btf_ids[0],
4592         .arg2_type      = ARG_CONST_MAP_PTR,
4593         .arg3_type      = ARG_ANYTHING,
4594         .arg4_type      = ARG_PTR_TO_MEM,
4595         .arg5_type      = ARG_CONST_SIZE_OR_ZERO,
4596 };
4597
4598 BPF_CALL_1(bpf_get_socket_cookie, struct sk_buff *, skb)
4599 {
4600         return skb->sk ? __sock_gen_cookie(skb->sk) : 0;
4601 }
4602
4603 static const struct bpf_func_proto bpf_get_socket_cookie_proto = {
4604         .func           = bpf_get_socket_cookie,
4605         .gpl_only       = false,
4606         .ret_type       = RET_INTEGER,
4607         .arg1_type      = ARG_PTR_TO_CTX,
4608 };
4609
4610 BPF_CALL_1(bpf_get_socket_cookie_sock_addr, struct bpf_sock_addr_kern *, ctx)
4611 {
4612         return __sock_gen_cookie(ctx->sk);
4613 }
4614
4615 static const struct bpf_func_proto bpf_get_socket_cookie_sock_addr_proto = {
4616         .func           = bpf_get_socket_cookie_sock_addr,
4617         .gpl_only       = false,
4618         .ret_type       = RET_INTEGER,
4619         .arg1_type      = ARG_PTR_TO_CTX,
4620 };
4621
4622 BPF_CALL_1(bpf_get_socket_cookie_sock, struct sock *, ctx)
4623 {
4624         return __sock_gen_cookie(ctx);
4625 }
4626
4627 static const struct bpf_func_proto bpf_get_socket_cookie_sock_proto = {
4628         .func           = bpf_get_socket_cookie_sock,
4629         .gpl_only       = false,
4630         .ret_type       = RET_INTEGER,
4631         .arg1_type      = ARG_PTR_TO_CTX,
4632 };
4633
4634 BPF_CALL_1(bpf_get_socket_cookie_sock_ops, struct bpf_sock_ops_kern *, ctx)
4635 {
4636         return __sock_gen_cookie(ctx->sk);
4637 }
4638
4639 static const struct bpf_func_proto bpf_get_socket_cookie_sock_ops_proto = {
4640         .func           = bpf_get_socket_cookie_sock_ops,
4641         .gpl_only       = false,
4642         .ret_type       = RET_INTEGER,
4643         .arg1_type      = ARG_PTR_TO_CTX,
4644 };
4645
4646 static u64 __bpf_get_netns_cookie(struct sock *sk)
4647 {
4648 #ifdef CONFIG_NET_NS
4649         return __net_gen_cookie(sk ? sk->sk_net.net : &init_net);
4650 #else
4651         return 0;
4652 #endif
4653 }
4654
4655 BPF_CALL_1(bpf_get_netns_cookie_sock, struct sock *, ctx)
4656 {
4657         return __bpf_get_netns_cookie(ctx);
4658 }
4659
4660 static const struct bpf_func_proto bpf_get_netns_cookie_sock_proto = {
4661         .func           = bpf_get_netns_cookie_sock,
4662         .gpl_only       = false,
4663         .ret_type       = RET_INTEGER,
4664         .arg1_type      = ARG_PTR_TO_CTX_OR_NULL,
4665 };
4666
4667 BPF_CALL_1(bpf_get_netns_cookie_sock_addr, struct bpf_sock_addr_kern *, ctx)
4668 {
4669         return __bpf_get_netns_cookie(ctx ? ctx->sk : NULL);
4670 }
4671
4672 static const struct bpf_func_proto bpf_get_netns_cookie_sock_addr_proto = {
4673         .func           = bpf_get_netns_cookie_sock_addr,
4674         .gpl_only       = false,
4675         .ret_type       = RET_INTEGER,
4676         .arg1_type      = ARG_PTR_TO_CTX_OR_NULL,
4677 };
4678
4679 BPF_CALL_1(bpf_get_socket_uid, struct sk_buff *, skb)
4680 {
4681         struct sock *sk = sk_to_full_sk(skb->sk);
4682         kuid_t kuid;
4683
4684         if (!sk || !sk_fullsock(sk))
4685                 return overflowuid;
4686         kuid = sock_net_uid(sock_net(sk), sk);
4687         return from_kuid_munged(sock_net(sk)->user_ns, kuid);
4688 }
4689
4690 static const struct bpf_func_proto bpf_get_socket_uid_proto = {
4691         .func           = bpf_get_socket_uid,
4692         .gpl_only       = false,
4693         .ret_type       = RET_INTEGER,
4694         .arg1_type      = ARG_PTR_TO_CTX,
4695 };
4696
4697 static int _bpf_setsockopt(struct sock *sk, int level, int optname,
4698                            char *optval, int optlen)
4699 {
4700         char devname[IFNAMSIZ];
4701         int val, valbool;
4702         struct net *net;
4703         int ifindex;
4704         int ret = 0;
4705
4706         if (!sk_fullsock(sk))
4707                 return -EINVAL;
4708
4709         sock_owned_by_me(sk);
4710
4711         if (level == SOL_SOCKET) {
4712                 if (optlen != sizeof(int) && optname != SO_BINDTODEVICE)
4713                         return -EINVAL;
4714                 val = *((int *)optval);
4715                 valbool = val ? 1 : 0;
4716
4717                 /* Only some socketops are supported */
4718                 switch (optname) {
4719                 case SO_RCVBUF:
4720                         val = min_t(u32, val, sysctl_rmem_max);
4721                         sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
4722                         WRITE_ONCE(sk->sk_rcvbuf,
4723                                    max_t(int, val * 2, SOCK_MIN_RCVBUF));
4724                         break;
4725                 case SO_SNDBUF:
4726                         val = min_t(u32, val, sysctl_wmem_max);
4727                         sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
4728                         WRITE_ONCE(sk->sk_sndbuf,
4729                                    max_t(int, val * 2, SOCK_MIN_SNDBUF));
4730                         break;
4731                 case SO_MAX_PACING_RATE: /* 32bit version */
4732                         if (val != ~0U)
4733                                 cmpxchg(&sk->sk_pacing_status,
4734                                         SK_PACING_NONE,
4735                                         SK_PACING_NEEDED);
4736                         sk->sk_max_pacing_rate = (val == ~0U) ?
4737                                                  ~0UL : (unsigned int)val;
4738                         sk->sk_pacing_rate = min(sk->sk_pacing_rate,
4739                                                  sk->sk_max_pacing_rate);
4740                         break;
4741                 case SO_PRIORITY:
4742                         sk->sk_priority = val;
4743                         break;
4744                 case SO_RCVLOWAT:
4745                         if (val < 0)
4746                                 val = INT_MAX;
4747                         WRITE_ONCE(sk->sk_rcvlowat, val ? : 1);
4748                         break;
4749                 case SO_MARK:
4750                         if (sk->sk_mark != val) {
4751                                 sk->sk_mark = val;
4752                                 sk_dst_reset(sk);
4753                         }
4754                         break;
4755                 case SO_BINDTODEVICE:
4756                         optlen = min_t(long, optlen, IFNAMSIZ - 1);
4757                         strncpy(devname, optval, optlen);
4758                         devname[optlen] = 0;
4759
4760                         ifindex = 0;
4761                         if (devname[0] != '\0') {
4762                                 struct net_device *dev;
4763
4764                                 ret = -ENODEV;
4765
4766                                 net = sock_net(sk);
4767                                 dev = dev_get_by_name(net, devname);
4768                                 if (!dev)
4769                                         break;
4770                                 ifindex = dev->ifindex;
4771                                 dev_put(dev);
4772                         }
4773                         fallthrough;
4774                 case SO_BINDTOIFINDEX:
4775                         if (optname == SO_BINDTOIFINDEX)
4776                                 ifindex = val;
4777                         ret = sock_bindtoindex(sk, ifindex, false);
4778                         break;
4779                 case SO_KEEPALIVE:
4780                         if (sk->sk_prot->keepalive)
4781                                 sk->sk_prot->keepalive(sk, valbool);
4782                         sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
4783                         break;
4784                 default:
4785                         ret = -EINVAL;
4786                 }
4787 #ifdef CONFIG_INET
4788         } else if (level == SOL_IP) {
4789                 if (optlen != sizeof(int) || sk->sk_family != AF_INET)
4790                         return -EINVAL;
4791
4792                 val = *((int *)optval);
4793                 /* Only some options are supported */
4794                 switch (optname) {
4795                 case IP_TOS:
4796                         if (val < -1 || val > 0xff) {
4797                                 ret = -EINVAL;
4798                         } else {
4799                                 struct inet_sock *inet = inet_sk(sk);
4800
4801                                 if (val == -1)
4802                                         val = 0;
4803                                 inet->tos = val;
4804                         }
4805                         break;
4806                 default:
4807                         ret = -EINVAL;
4808                 }
4809 #if IS_ENABLED(CONFIG_IPV6)
4810         } else if (level == SOL_IPV6) {
4811                 if (optlen != sizeof(int) || sk->sk_family != AF_INET6)
4812                         return -EINVAL;
4813
4814                 val = *((int *)optval);
4815                 /* Only some options are supported */
4816                 switch (optname) {
4817                 case IPV6_TCLASS:
4818                         if (val < -1 || val > 0xff) {
4819                                 ret = -EINVAL;
4820                         } else {
4821                                 struct ipv6_pinfo *np = inet6_sk(sk);
4822
4823                                 if (val == -1)
4824                                         val = 0;
4825                                 np->tclass = val;
4826                         }
4827                         break;
4828                 default:
4829                         ret = -EINVAL;
4830                 }
4831 #endif
4832         } else if (level == SOL_TCP &&
4833                    sk->sk_prot->setsockopt == tcp_setsockopt) {
4834                 if (optname == TCP_CONGESTION) {
4835                         char name[TCP_CA_NAME_MAX];
4836
4837                         strncpy(name, optval, min_t(long, optlen,
4838                                                     TCP_CA_NAME_MAX-1));
4839                         name[TCP_CA_NAME_MAX-1] = 0;
4840                         ret = tcp_set_congestion_control(sk, name, false, true);
4841                 } else {
4842                         struct inet_connection_sock *icsk = inet_csk(sk);
4843                         struct tcp_sock *tp = tcp_sk(sk);
4844                         unsigned long timeout;
4845
4846                         if (optlen != sizeof(int))
4847                                 return -EINVAL;
4848
4849                         val = *((int *)optval);
4850                         /* Only some options are supported */
4851                         switch (optname) {
4852                         case TCP_BPF_IW:
4853                                 if (val <= 0 || tp->data_segs_out > tp->syn_data)
4854                                         ret = -EINVAL;
4855                                 else
4856                                         tp->snd_cwnd = val;
4857                                 break;
4858                         case TCP_BPF_SNDCWND_CLAMP:
4859                                 if (val <= 0) {
4860                                         ret = -EINVAL;
4861                                 } else {
4862                                         tp->snd_cwnd_clamp = val;
4863                                         tp->snd_ssthresh = val;
4864                                 }
4865                                 break;
4866                         case TCP_BPF_DELACK_MAX:
4867                                 timeout = usecs_to_jiffies(val);
4868                                 if (timeout > TCP_DELACK_MAX ||
4869                                     timeout < TCP_TIMEOUT_MIN)
4870                                         return -EINVAL;
4871                                 inet_csk(sk)->icsk_delack_max = timeout;
4872                                 break;
4873                         case TCP_BPF_RTO_MIN:
4874                                 timeout = usecs_to_jiffies(val);
4875                                 if (timeout > TCP_RTO_MIN ||
4876                                     timeout < TCP_TIMEOUT_MIN)
4877                                         return -EINVAL;
4878                                 inet_csk(sk)->icsk_rto_min = timeout;
4879                                 break;
4880                         case TCP_SAVE_SYN:
4881                                 if (val < 0 || val > 1)
4882                                         ret = -EINVAL;
4883                                 else
4884                                         tp->save_syn = val;
4885                                 break;
4886                         case TCP_KEEPIDLE:
4887                                 ret = tcp_sock_set_keepidle_locked(sk, val);
4888                                 break;
4889                         case TCP_KEEPINTVL:
4890                                 if (val < 1 || val > MAX_TCP_KEEPINTVL)
4891                                         ret = -EINVAL;
4892                                 else
4893                                         tp->keepalive_intvl = val * HZ;
4894                                 break;
4895                         case TCP_KEEPCNT:
4896                                 if (val < 1 || val > MAX_TCP_KEEPCNT)
4897                                         ret = -EINVAL;
4898                                 else
4899                                         tp->keepalive_probes = val;
4900                                 break;
4901                         case TCP_SYNCNT:
4902                                 if (val < 1 || val > MAX_TCP_SYNCNT)
4903                                         ret = -EINVAL;
4904                                 else
4905                                         icsk->icsk_syn_retries = val;
4906                                 break;
4907                         case TCP_USER_TIMEOUT:
4908                                 if (val < 0)
4909                                         ret = -EINVAL;
4910                                 else
4911                                         icsk->icsk_user_timeout = val;
4912                                 break;
4913                         case TCP_NOTSENT_LOWAT:
4914                                 tp->notsent_lowat = val;
4915                                 sk->sk_write_space(sk);
4916                                 break;
4917                         case TCP_WINDOW_CLAMP:
4918                                 ret = tcp_set_window_clamp(sk, val);
4919                                 break;
4920                         default:
4921                                 ret = -EINVAL;
4922                         }
4923                 }
4924 #endif
4925         } else {
4926                 ret = -EINVAL;
4927         }
4928         return ret;
4929 }
4930
4931 static int _bpf_getsockopt(struct sock *sk, int level, int optname,
4932                            char *optval, int optlen)
4933 {
4934         if (!sk_fullsock(sk))
4935                 goto err_clear;
4936
4937         sock_owned_by_me(sk);
4938
4939         if (level == SOL_SOCKET) {
4940                 if (optlen != sizeof(int))
4941                         goto err_clear;
4942
4943                 switch (optname) {
4944                 case SO_MARK:
4945                         *((int *)optval) = sk->sk_mark;
4946                         break;
4947                 case SO_PRIORITY:
4948                         *((int *)optval) = sk->sk_priority;
4949                         break;
4950                 case SO_BINDTOIFINDEX:
4951                         *((int *)optval) = sk->sk_bound_dev_if;
4952                         break;
4953                 default:
4954                         goto err_clear;
4955                 }
4956 #ifdef CONFIG_INET
4957         } else if (level == SOL_TCP && sk->sk_prot->getsockopt == tcp_getsockopt) {
4958                 struct inet_connection_sock *icsk;
4959                 struct tcp_sock *tp;
4960
4961                 switch (optname) {
4962                 case TCP_CONGESTION:
4963                         icsk = inet_csk(sk);
4964
4965                         if (!icsk->icsk_ca_ops || optlen <= 1)
4966                                 goto err_clear;
4967                         strncpy(optval, icsk->icsk_ca_ops->name, optlen);
4968                         optval[optlen - 1] = 0;
4969                         break;
4970                 case TCP_SAVED_SYN:
4971                         tp = tcp_sk(sk);
4972
4973                         if (optlen <= 0 || !tp->saved_syn ||
4974                             optlen > tcp_saved_syn_len(tp->saved_syn))
4975                                 goto err_clear;
4976                         memcpy(optval, tp->saved_syn->data, optlen);
4977                         break;
4978                 default:
4979                         goto err_clear;
4980                 }
4981         } else if (level == SOL_IP) {
4982                 struct inet_sock *inet = inet_sk(sk);
4983
4984                 if (optlen != sizeof(int) || sk->sk_family != AF_INET)
4985                         goto err_clear;
4986
4987                 /* Only some options are supported */
4988                 switch (optname) {
4989                 case IP_TOS:
4990                         *((int *)optval) = (int)inet->tos;
4991                         break;
4992                 default:
4993                         goto err_clear;
4994                 }
4995 #if IS_ENABLED(CONFIG_IPV6)
4996         } else if (level == SOL_IPV6) {
4997                 struct ipv6_pinfo *np = inet6_sk(sk);
4998
4999                 if (optlen != sizeof(int) || sk->sk_family != AF_INET6)
5000                         goto err_clear;
5001
5002                 /* Only some options are supported */
5003                 switch (optname) {
5004                 case IPV6_TCLASS:
5005                         *((int *)optval) = (int)np->tclass;
5006                         break;
5007                 default:
5008                         goto err_clear;
5009                 }
5010 #endif
5011 #endif
5012         } else {
5013                 goto err_clear;
5014         }
5015         return 0;
5016 err_clear:
5017         memset(optval, 0, optlen);
5018         return -EINVAL;
5019 }
5020
5021 BPF_CALL_5(bpf_sock_addr_setsockopt, struct bpf_sock_addr_kern *, ctx,
5022            int, level, int, optname, char *, optval, int, optlen)
5023 {
5024         return _bpf_setsockopt(ctx->sk, level, optname, optval, optlen);
5025 }
5026
5027 static const struct bpf_func_proto bpf_sock_addr_setsockopt_proto = {
5028         .func           = bpf_sock_addr_setsockopt,
5029         .gpl_only       = false,
5030         .ret_type       = RET_INTEGER,
5031         .arg1_type      = ARG_PTR_TO_CTX,
5032         .arg2_type      = ARG_ANYTHING,
5033         .arg3_type      = ARG_ANYTHING,
5034         .arg4_type      = ARG_PTR_TO_MEM,
5035         .arg5_type      = ARG_CONST_SIZE,
5036 };
5037
5038 BPF_CALL_5(bpf_sock_addr_getsockopt, struct bpf_sock_addr_kern *, ctx,
5039            int, level, int, optname, char *, optval, int, optlen)
5040 {
5041         return _bpf_getsockopt(ctx->sk, level, optname, optval, optlen);
5042 }
5043
5044 static const struct bpf_func_proto bpf_sock_addr_getsockopt_proto = {
5045         .func           = bpf_sock_addr_getsockopt,
5046         .gpl_only       = false,
5047         .ret_type       = RET_INTEGER,
5048         .arg1_type      = ARG_PTR_TO_CTX,
5049         .arg2_type      = ARG_ANYTHING,
5050         .arg3_type      = ARG_ANYTHING,
5051         .arg4_type      = ARG_PTR_TO_UNINIT_MEM,
5052         .arg5_type      = ARG_CONST_SIZE,
5053 };
5054
5055 BPF_CALL_5(bpf_sock_ops_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
5056            int, level, int, optname, char *, optval, int, optlen)
5057 {
5058         return _bpf_setsockopt(bpf_sock->sk, level, optname, optval, optlen);
5059 }
5060
5061 static const struct bpf_func_proto bpf_sock_ops_setsockopt_proto = {
5062         .func           = bpf_sock_ops_setsockopt,
5063         .gpl_only       = false,
5064         .ret_type       = RET_INTEGER,
5065         .arg1_type      = ARG_PTR_TO_CTX,
5066         .arg2_type      = ARG_ANYTHING,
5067         .arg3_type      = ARG_ANYTHING,
5068         .arg4_type      = ARG_PTR_TO_MEM,
5069         .arg5_type      = ARG_CONST_SIZE,
5070 };
5071
5072 static int bpf_sock_ops_get_syn(struct bpf_sock_ops_kern *bpf_sock,
5073                                 int optname, const u8 **start)
5074 {
5075         struct sk_buff *syn_skb = bpf_sock->syn_skb;
5076         const u8 *hdr_start;
5077         int ret;
5078
5079         if (syn_skb) {
5080                 /* sk is a request_sock here */
5081
5082                 if (optname == TCP_BPF_SYN) {
5083                         hdr_start = syn_skb->data;
5084                         ret = tcp_hdrlen(syn_skb);
5085                 } else if (optname == TCP_BPF_SYN_IP) {
5086                         hdr_start = skb_network_header(syn_skb);
5087                         ret = skb_network_header_len(syn_skb) +
5088                                 tcp_hdrlen(syn_skb);
5089                 } else {
5090                         /* optname == TCP_BPF_SYN_MAC */
5091                         hdr_start = skb_mac_header(syn_skb);
5092                         ret = skb_mac_header_len(syn_skb) +
5093                                 skb_network_header_len(syn_skb) +
5094                                 tcp_hdrlen(syn_skb);
5095                 }
5096         } else {
5097                 struct sock *sk = bpf_sock->sk;
5098                 struct saved_syn *saved_syn;
5099
5100                 if (sk->sk_state == TCP_NEW_SYN_RECV)
5101                         /* synack retransmit. bpf_sock->syn_skb will
5102                          * not be available.  It has to resort to
5103                          * saved_syn (if it is saved).
5104                          */
5105                         saved_syn = inet_reqsk(sk)->saved_syn;
5106                 else
5107                         saved_syn = tcp_sk(sk)->saved_syn;
5108
5109                 if (!saved_syn)
5110                         return -ENOENT;
5111
5112                 if (optname == TCP_BPF_SYN) {
5113                         hdr_start = saved_syn->data +
5114                                 saved_syn->mac_hdrlen +
5115                                 saved_syn->network_hdrlen;
5116                         ret = saved_syn->tcp_hdrlen;
5117                 } else if (optname == TCP_BPF_SYN_IP) {
5118                         hdr_start = saved_syn->data +
5119                                 saved_syn->mac_hdrlen;
5120                         ret = saved_syn->network_hdrlen +
5121                                 saved_syn->tcp_hdrlen;
5122                 } else {
5123                         /* optname == TCP_BPF_SYN_MAC */
5124
5125                         /* TCP_SAVE_SYN may not have saved the mac hdr */
5126                         if (!saved_syn->mac_hdrlen)
5127                                 return -ENOENT;
5128
5129                         hdr_start = saved_syn->data;
5130                         ret = saved_syn->mac_hdrlen +
5131                                 saved_syn->network_hdrlen +
5132                                 saved_syn->tcp_hdrlen;
5133                 }
5134         }
5135
5136         *start = hdr_start;
5137         return ret;
5138 }
5139
5140 BPF_CALL_5(bpf_sock_ops_getsockopt, struct bpf_sock_ops_kern *, bpf_sock,
5141            int, level, int, optname, char *, optval, int, optlen)
5142 {
5143         if (IS_ENABLED(CONFIG_INET) && level == SOL_TCP &&
5144             optname >= TCP_BPF_SYN && optname <= TCP_BPF_SYN_MAC) {
5145                 int ret, copy_len = 0;
5146                 const u8 *start;
5147
5148                 ret = bpf_sock_ops_get_syn(bpf_sock, optname, &start);
5149                 if (ret > 0) {
5150                         copy_len = ret;
5151                         if (optlen < copy_len) {
5152                                 copy_len = optlen;
5153                                 ret = -ENOSPC;
5154                         }
5155
5156                         memcpy(optval, start, copy_len);
5157                 }
5158
5159                 /* Zero out unused buffer at the end */
5160                 memset(optval + copy_len, 0, optlen - copy_len);
5161
5162                 return ret;
5163         }
5164
5165         return _bpf_getsockopt(bpf_sock->sk, level, optname, optval, optlen);
5166 }
5167
5168 static const struct bpf_func_proto bpf_sock_ops_getsockopt_proto = {
5169         .func           = bpf_sock_ops_getsockopt,
5170         .gpl_only       = false,
5171         .ret_type       = RET_INTEGER,
5172         .arg1_type      = ARG_PTR_TO_CTX,
5173         .arg2_type      = ARG_ANYTHING,
5174         .arg3_type      = ARG_ANYTHING,
5175         .arg4_type      = ARG_PTR_TO_UNINIT_MEM,
5176         .arg5_type      = ARG_CONST_SIZE,
5177 };
5178
5179 BPF_CALL_2(bpf_sock_ops_cb_flags_set, struct bpf_sock_ops_kern *, bpf_sock,
5180            int, argval)
5181 {
5182         struct sock *sk = bpf_sock->sk;
5183         int val = argval & BPF_SOCK_OPS_ALL_CB_FLAGS;
5184
5185         if (!IS_ENABLED(CONFIG_INET) || !sk_fullsock(sk))
5186                 return -EINVAL;
5187
5188         tcp_sk(sk)->bpf_sock_ops_cb_flags = val;
5189
5190         return argval & (~BPF_SOCK_OPS_ALL_CB_FLAGS);
5191 }
5192
5193 static const struct bpf_func_proto bpf_sock_ops_cb_flags_set_proto = {
5194         .func           = bpf_sock_ops_cb_flags_set,
5195         .gpl_only       = false,
5196         .ret_type       = RET_INTEGER,
5197         .arg1_type      = ARG_PTR_TO_CTX,
5198         .arg2_type      = ARG_ANYTHING,
5199 };
5200
5201 const struct ipv6_bpf_stub *ipv6_bpf_stub __read_mostly;
5202 EXPORT_SYMBOL_GPL(ipv6_bpf_stub);
5203
5204 BPF_CALL_3(bpf_bind, struct bpf_sock_addr_kern *, ctx, struct sockaddr *, addr,
5205            int, addr_len)
5206 {
5207 #ifdef CONFIG_INET
5208         struct sock *sk = ctx->sk;
5209         u32 flags = BIND_FROM_BPF;
5210         int err;
5211
5212         err = -EINVAL;
5213         if (addr_len < offsetofend(struct sockaddr, sa_family))
5214                 return err;
5215         if (addr->sa_family == AF_INET) {
5216                 if (addr_len < sizeof(struct sockaddr_in))
5217                         return err;
5218                 if (((struct sockaddr_in *)addr)->sin_port == htons(0))
5219                         flags |= BIND_FORCE_ADDRESS_NO_PORT;
5220                 return __inet_bind(sk, addr, addr_len, flags);
5221 #if IS_ENABLED(CONFIG_IPV6)
5222         } else if (addr->sa_family == AF_INET6) {
5223                 if (addr_len < SIN6_LEN_RFC2133)
5224                         return err;
5225                 if (((struct sockaddr_in6 *)addr)->sin6_port == htons(0))
5226                         flags |= BIND_FORCE_ADDRESS_NO_PORT;
5227                 /* ipv6_bpf_stub cannot be NULL, since it's called from
5228                  * bpf_cgroup_inet6_connect hook and ipv6 is already loaded
5229                  */
5230                 return ipv6_bpf_stub->inet6_bind(sk, addr, addr_len, flags);
5231 #endif /* CONFIG_IPV6 */
5232         }
5233 #endif /* CONFIG_INET */
5234
5235         return -EAFNOSUPPORT;
5236 }
5237
5238 static const struct bpf_func_proto bpf_bind_proto = {
5239         .func           = bpf_bind,
5240         .gpl_only       = false,
5241         .ret_type       = RET_INTEGER,
5242         .arg1_type      = ARG_PTR_TO_CTX,
5243         .arg2_type      = ARG_PTR_TO_MEM,
5244         .arg3_type      = ARG_CONST_SIZE,
5245 };
5246
5247 #ifdef CONFIG_XFRM
5248 BPF_CALL_5(bpf_skb_get_xfrm_state, struct sk_buff *, skb, u32, index,
5249            struct bpf_xfrm_state *, to, u32, size, u64, flags)
5250 {
5251         const struct sec_path *sp = skb_sec_path(skb);
5252         const struct xfrm_state *x;
5253
5254         if (!sp || unlikely(index >= sp->len || flags))
5255                 goto err_clear;
5256
5257         x = sp->xvec[index];
5258
5259         if (unlikely(size != sizeof(struct bpf_xfrm_state)))
5260                 goto err_clear;
5261
5262         to->reqid = x->props.reqid;
5263         to->spi = x->id.spi;
5264         to->family = x->props.family;
5265         to->ext = 0;
5266
5267         if (to->family == AF_INET6) {
5268                 memcpy(to->remote_ipv6, x->props.saddr.a6,
5269                        sizeof(to->remote_ipv6));
5270         } else {
5271                 to->remote_ipv4 = x->props.saddr.a4;
5272                 memset(&to->remote_ipv6[1], 0, sizeof(__u32) * 3);
5273         }
5274
5275         return 0;
5276 err_clear:
5277         memset(to, 0, size);
5278         return -EINVAL;
5279 }
5280
5281 static const struct bpf_func_proto bpf_skb_get_xfrm_state_proto = {
5282         .func           = bpf_skb_get_xfrm_state,
5283         .gpl_only       = false,
5284         .ret_type       = RET_INTEGER,
5285         .arg1_type      = ARG_PTR_TO_CTX,
5286         .arg2_type      = ARG_ANYTHING,
5287         .arg3_type      = ARG_PTR_TO_UNINIT_MEM,
5288         .arg4_type      = ARG_CONST_SIZE,
5289         .arg5_type      = ARG_ANYTHING,
5290 };
5291 #endif
5292
5293 #if IS_ENABLED(CONFIG_INET) || IS_ENABLED(CONFIG_IPV6)
5294 static int bpf_fib_set_fwd_params(struct bpf_fib_lookup *params,
5295                                   const struct neighbour *neigh,
5296                                   const struct net_device *dev)
5297 {
5298         memcpy(params->dmac, neigh->ha, ETH_ALEN);
5299         memcpy(params->smac, dev->dev_addr, ETH_ALEN);
5300         params->h_vlan_TCI = 0;
5301         params->h_vlan_proto = 0;
5302
5303         return 0;
5304 }
5305 #endif
5306
5307 #if IS_ENABLED(CONFIG_INET)
5308 static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
5309                                u32 flags, bool check_mtu)
5310 {
5311         struct fib_nh_common *nhc;
5312         struct in_device *in_dev;
5313         struct neighbour *neigh;
5314         struct net_device *dev;
5315         struct fib_result res;
5316         struct flowi4 fl4;
5317         int err;
5318         u32 mtu;
5319
5320         dev = dev_get_by_index_rcu(net, params->ifindex);
5321         if (unlikely(!dev))
5322                 return -ENODEV;
5323
5324         /* verify forwarding is enabled on this interface */
5325         in_dev = __in_dev_get_rcu(dev);
5326         if (unlikely(!in_dev || !IN_DEV_FORWARD(in_dev)))
5327                 return BPF_FIB_LKUP_RET_FWD_DISABLED;
5328
5329         if (flags & BPF_FIB_LOOKUP_OUTPUT) {
5330                 fl4.flowi4_iif = 1;
5331                 fl4.flowi4_oif = params->ifindex;
5332         } else {
5333                 fl4.flowi4_iif = params->ifindex;
5334                 fl4.flowi4_oif = 0;
5335         }
5336         fl4.flowi4_tos = params->tos & IPTOS_RT_MASK;
5337         fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
5338         fl4.flowi4_flags = 0;
5339
5340         fl4.flowi4_proto = params->l4_protocol;
5341         fl4.daddr = params->ipv4_dst;
5342         fl4.saddr = params->ipv4_src;
5343         fl4.fl4_sport = params->sport;
5344         fl4.fl4_dport = params->dport;
5345         fl4.flowi4_multipath_hash = 0;
5346
5347         if (flags & BPF_FIB_LOOKUP_DIRECT) {
5348                 u32 tbid = l3mdev_fib_table_rcu(dev) ? : RT_TABLE_MAIN;
5349                 struct fib_table *tb;
5350
5351                 tb = fib_get_table(net, tbid);
5352                 if (unlikely(!tb))
5353                         return BPF_FIB_LKUP_RET_NOT_FWDED;
5354
5355                 err = fib_table_lookup(tb, &fl4, &res, FIB_LOOKUP_NOREF);
5356         } else {
5357                 fl4.flowi4_mark = 0;
5358                 fl4.flowi4_secid = 0;
5359                 fl4.flowi4_tun_key.tun_id = 0;
5360                 fl4.flowi4_uid = sock_net_uid(net, NULL);
5361
5362                 err = fib_lookup(net, &fl4, &res, FIB_LOOKUP_NOREF);
5363         }
5364
5365         if (err) {
5366                 /* map fib lookup errors to RTN_ type */
5367                 if (err == -EINVAL)
5368                         return BPF_FIB_LKUP_RET_BLACKHOLE;
5369                 if (err == -EHOSTUNREACH)
5370                         return BPF_FIB_LKUP_RET_UNREACHABLE;
5371                 if (err == -EACCES)
5372                         return BPF_FIB_LKUP_RET_PROHIBIT;
5373
5374                 return BPF_FIB_LKUP_RET_NOT_FWDED;
5375         }
5376
5377         if (res.type != RTN_UNICAST)
5378                 return BPF_FIB_LKUP_RET_NOT_FWDED;
5379
5380         if (fib_info_num_path(res.fi) > 1)
5381                 fib_select_path(net, &res, &fl4, NULL);
5382
5383         if (check_mtu) {
5384                 mtu = ip_mtu_from_fib_result(&res, params->ipv4_dst);
5385                 if (params->tot_len > mtu)
5386                         return BPF_FIB_LKUP_RET_FRAG_NEEDED;
5387         }
5388
5389         nhc = res.nhc;
5390
5391         /* do not handle lwt encaps right now */
5392         if (nhc->nhc_lwtstate)
5393                 return BPF_FIB_LKUP_RET_UNSUPP_LWT;
5394
5395         dev = nhc->nhc_dev;
5396
5397         params->rt_metric = res.fi->fib_priority;
5398         params->ifindex = dev->ifindex;
5399
5400         /* xdp and cls_bpf programs are run in RCU-bh so
5401          * rcu_read_lock_bh is not needed here
5402          */
5403         if (likely(nhc->nhc_gw_family != AF_INET6)) {
5404                 if (nhc->nhc_gw_family)
5405                         params->ipv4_dst = nhc->nhc_gw.ipv4;
5406
5407                 neigh = __ipv4_neigh_lookup_noref(dev,
5408                                                  (__force u32)params->ipv4_dst);
5409         } else {
5410                 struct in6_addr *dst = (struct in6_addr *)params->ipv6_dst;
5411
5412                 params->family = AF_INET6;
5413                 *dst = nhc->nhc_gw.ipv6;
5414                 neigh = __ipv6_neigh_lookup_noref_stub(dev, dst);
5415         }
5416
5417         if (!neigh)
5418                 return BPF_FIB_LKUP_RET_NO_NEIGH;
5419
5420         return bpf_fib_set_fwd_params(params, neigh, dev);
5421 }
5422 #endif
5423
5424 #if IS_ENABLED(CONFIG_IPV6)
5425 static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
5426                                u32 flags, bool check_mtu)
5427 {
5428         struct in6_addr *src = (struct in6_addr *) params->ipv6_src;
5429         struct in6_addr *dst = (struct in6_addr *) params->ipv6_dst;
5430         struct fib6_result res = {};
5431         struct neighbour *neigh;
5432         struct net_device *dev;
5433         struct inet6_dev *idev;
5434         struct flowi6 fl6;
5435         int strict = 0;
5436         int oif, err;
5437         u32 mtu;
5438
5439         /* link local addresses are never forwarded */
5440         if (rt6_need_strict(dst) || rt6_need_strict(src))
5441                 return BPF_FIB_LKUP_RET_NOT_FWDED;
5442
5443         dev = dev_get_by_index_rcu(net, params->ifindex);
5444         if (unlikely(!dev))
5445                 return -ENODEV;
5446
5447         idev = __in6_dev_get_safely(dev);
5448         if (unlikely(!idev || !idev->cnf.forwarding))
5449                 return BPF_FIB_LKUP_RET_FWD_DISABLED;
5450
5451         if (flags & BPF_FIB_LOOKUP_OUTPUT) {
5452                 fl6.flowi6_iif = 1;
5453                 oif = fl6.flowi6_oif = params->ifindex;
5454         } else {
5455                 oif = fl6.flowi6_iif = params->ifindex;
5456                 fl6.flowi6_oif = 0;
5457                 strict = RT6_LOOKUP_F_HAS_SADDR;
5458         }
5459         fl6.flowlabel = params->flowinfo;
5460         fl6.flowi6_scope = 0;
5461         fl6.flowi6_flags = 0;
5462         fl6.mp_hash = 0;
5463
5464         fl6.flowi6_proto = params->l4_protocol;
5465         fl6.daddr = *dst;
5466         fl6.saddr = *src;
5467         fl6.fl6_sport = params->sport;
5468         fl6.fl6_dport = params->dport;
5469
5470         if (flags & BPF_FIB_LOOKUP_DIRECT) {
5471                 u32 tbid = l3mdev_fib_table_rcu(dev) ? : RT_TABLE_MAIN;
5472                 struct fib6_table *tb;
5473
5474                 tb = ipv6_stub->fib6_get_table(net, tbid);
5475                 if (unlikely(!tb))
5476                         return BPF_FIB_LKUP_RET_NOT_FWDED;
5477
5478                 err = ipv6_stub->fib6_table_lookup(net, tb, oif, &fl6, &res,
5479                                                    strict);
5480         } else {
5481                 fl6.flowi6_mark = 0;
5482                 fl6.flowi6_secid = 0;
5483                 fl6.flowi6_tun_key.tun_id = 0;
5484                 fl6.flowi6_uid = sock_net_uid(net, NULL);
5485
5486                 err = ipv6_stub->fib6_lookup(net, oif, &fl6, &res, strict);
5487         }
5488
5489         if (unlikely(err || IS_ERR_OR_NULL(res.f6i) ||
5490                      res.f6i == net->ipv6.fib6_null_entry))
5491                 return BPF_FIB_LKUP_RET_NOT_FWDED;
5492
5493         switch (res.fib6_type) {
5494         /* only unicast is forwarded */
5495         case RTN_UNICAST:
5496                 break;
5497         case RTN_BLACKHOLE:
5498                 return BPF_FIB_LKUP_RET_BLACKHOLE;
5499         case RTN_UNREACHABLE:
5500                 return BPF_FIB_LKUP_RET_UNREACHABLE;
5501         case RTN_PROHIBIT:
5502                 return BPF_FIB_LKUP_RET_PROHIBIT;
5503         default:
5504                 return BPF_FIB_LKUP_RET_NOT_FWDED;
5505         }
5506
5507         ipv6_stub->fib6_select_path(net, &res, &fl6, fl6.flowi6_oif,
5508                                     fl6.flowi6_oif != 0, NULL, strict);
5509
5510         if (check_mtu) {
5511                 mtu = ipv6_stub->ip6_mtu_from_fib6(&res, dst, src);
5512                 if (params->tot_len > mtu)
5513                         return BPF_FIB_LKUP_RET_FRAG_NEEDED;
5514         }
5515
5516         if (res.nh->fib_nh_lws)
5517                 return BPF_FIB_LKUP_RET_UNSUPP_LWT;
5518
5519         if (res.nh->fib_nh_gw_family)
5520                 *dst = res.nh->fib_nh_gw6;
5521
5522         dev = res.nh->fib_nh_dev;
5523         params->rt_metric = res.f6i->fib6_metric;
5524         params->ifindex = dev->ifindex;
5525
5526         /* xdp and cls_bpf programs are run in RCU-bh so rcu_read_lock_bh is
5527          * not needed here.
5528          */
5529         neigh = __ipv6_neigh_lookup_noref_stub(dev, dst);
5530         if (!neigh)
5531                 return BPF_FIB_LKUP_RET_NO_NEIGH;
5532
5533         return bpf_fib_set_fwd_params(params, neigh, dev);
5534 }
5535 #endif
5536
5537 BPF_CALL_4(bpf_xdp_fib_lookup, struct xdp_buff *, ctx,
5538            struct bpf_fib_lookup *, params, int, plen, u32, flags)
5539 {
5540         if (plen < sizeof(*params))
5541                 return -EINVAL;
5542
5543         if (flags & ~(BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_OUTPUT))
5544                 return -EINVAL;
5545
5546         switch (params->family) {
5547 #if IS_ENABLED(CONFIG_INET)
5548         case AF_INET:
5549                 return bpf_ipv4_fib_lookup(dev_net(ctx->rxq->dev), params,
5550                                            flags, true);
5551 #endif
5552 #if IS_ENABLED(CONFIG_IPV6)
5553         case AF_INET6:
5554                 return bpf_ipv6_fib_lookup(dev_net(ctx->rxq->dev), params,
5555                                            flags, true);
5556 #endif
5557         }
5558         return -EAFNOSUPPORT;
5559 }
5560
5561 static const struct bpf_func_proto bpf_xdp_fib_lookup_proto = {
5562         .func           = bpf_xdp_fib_lookup,
5563         .gpl_only       = true,
5564         .ret_type       = RET_INTEGER,
5565         .arg1_type      = ARG_PTR_TO_CTX,
5566         .arg2_type      = ARG_PTR_TO_MEM,
5567         .arg3_type      = ARG_CONST_SIZE,
5568         .arg4_type      = ARG_ANYTHING,
5569 };
5570
5571 BPF_CALL_4(bpf_skb_fib_lookup, struct sk_buff *, skb,
5572            struct bpf_fib_lookup *, params, int, plen, u32, flags)
5573 {
5574         struct net *net = dev_net(skb->dev);
5575         int rc = -EAFNOSUPPORT;
5576
5577         if (plen < sizeof(*params))
5578                 return -EINVAL;
5579
5580         if (flags & ~(BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_OUTPUT))
5581                 return -EINVAL;
5582
5583         switch (params->family) {
5584 #if IS_ENABLED(CONFIG_INET)
5585         case AF_INET:
5586                 rc = bpf_ipv4_fib_lookup(net, params, flags, false);
5587                 break;
5588 #endif
5589 #if IS_ENABLED(CONFIG_IPV6)
5590         case AF_INET6:
5591                 rc = bpf_ipv6_fib_lookup(net, params, flags, false);
5592                 break;
5593 #endif
5594         }
5595
5596         if (!rc) {
5597                 struct net_device *dev;
5598
5599                 dev = dev_get_by_index_rcu(net, params->ifindex);
5600                 if (!is_skb_forwardable(dev, skb))
5601                         rc = BPF_FIB_LKUP_RET_FRAG_NEEDED;
5602         }
5603
5604         return rc;
5605 }
5606
5607 static const struct bpf_func_proto bpf_skb_fib_lookup_proto = {
5608         .func           = bpf_skb_fib_lookup,
5609         .gpl_only       = true,
5610         .ret_type       = RET_INTEGER,
5611         .arg1_type      = ARG_PTR_TO_CTX,
5612         .arg2_type      = ARG_PTR_TO_MEM,
5613         .arg3_type      = ARG_CONST_SIZE,
5614         .arg4_type      = ARG_ANYTHING,
5615 };
5616
5617 #if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
5618 static int bpf_push_seg6_encap(struct sk_buff *skb, u32 type, void *hdr, u32 len)
5619 {
5620         int err;
5621         struct ipv6_sr_hdr *srh = (struct ipv6_sr_hdr *)hdr;
5622
5623         if (!seg6_validate_srh(srh, len, false))
5624                 return -EINVAL;
5625
5626         switch (type) {
5627         case BPF_LWT_ENCAP_SEG6_INLINE:
5628                 if (skb->protocol != htons(ETH_P_IPV6))
5629                         return -EBADMSG;
5630
5631                 err = seg6_do_srh_inline(skb, srh);
5632                 break;
5633         case BPF_LWT_ENCAP_SEG6:
5634                 skb_reset_inner_headers(skb);
5635                 skb->encapsulation = 1;
5636                 err = seg6_do_srh_encap(skb, srh, IPPROTO_IPV6);
5637                 break;
5638         default:
5639                 return -EINVAL;
5640         }
5641
5642         bpf_compute_data_pointers(skb);
5643         if (err)
5644                 return err;
5645
5646         ipv6_hdr(skb)->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
5647         skb_set_transport_header(skb, sizeof(struct ipv6hdr));
5648
5649         return seg6_lookup_nexthop(skb, NULL, 0);
5650 }
5651 #endif /* CONFIG_IPV6_SEG6_BPF */
5652
5653 #if IS_ENABLED(CONFIG_LWTUNNEL_BPF)
5654 static int bpf_push_ip_encap(struct sk_buff *skb, void *hdr, u32 len,
5655                              bool ingress)
5656 {
5657         return bpf_lwt_push_ip_encap(skb, hdr, len, ingress);
5658 }
5659 #endif
5660
5661 BPF_CALL_4(bpf_lwt_in_push_encap, struct sk_buff *, skb, u32, type, void *, hdr,
5662            u32, len)
5663 {
5664         switch (type) {
5665 #if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
5666         case BPF_LWT_ENCAP_SEG6:
5667         case BPF_LWT_ENCAP_SEG6_INLINE:
5668                 return bpf_push_seg6_encap(skb, type, hdr, len);
5669 #endif
5670 #if IS_ENABLED(CONFIG_LWTUNNEL_BPF)
5671         case BPF_LWT_ENCAP_IP:
5672                 return bpf_push_ip_encap(skb, hdr, len, true /* ingress */);
5673 #endif
5674         default:
5675                 return -EINVAL;
5676         }
5677 }
5678
5679 BPF_CALL_4(bpf_lwt_xmit_push_encap, struct sk_buff *, skb, u32, type,
5680            void *, hdr, u32, len)
5681 {
5682         switch (type) {
5683 #if IS_ENABLED(CONFIG_LWTUNNEL_BPF)
5684         case BPF_LWT_ENCAP_IP:
5685                 return bpf_push_ip_encap(skb, hdr, len, false /* egress */);
5686 #endif
5687         default:
5688                 return -EINVAL;
5689         }
5690 }
5691
5692 static const struct bpf_func_proto bpf_lwt_in_push_encap_proto = {
5693         .func           = bpf_lwt_in_push_encap,
5694         .gpl_only       = false,
5695         .ret_type       = RET_INTEGER,
5696         .arg1_type      = ARG_PTR_TO_CTX,
5697         .arg2_type      = ARG_ANYTHING,
5698         .arg3_type      = ARG_PTR_TO_MEM,
5699         .arg4_type      = ARG_CONST_SIZE
5700 };
5701
5702 static const struct bpf_func_proto bpf_lwt_xmit_push_encap_proto = {
5703         .func           = bpf_lwt_xmit_push_encap,
5704         .gpl_only       = false,
5705         .ret_type       = RET_INTEGER,
5706         .arg1_type      = ARG_PTR_TO_CTX,
5707         .arg2_type      = ARG_ANYTHING,
5708         .arg3_type      = ARG_PTR_TO_MEM,
5709         .arg4_type      = ARG_CONST_SIZE
5710 };
5711
5712 #if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
5713 BPF_CALL_4(bpf_lwt_seg6_store_bytes, struct sk_buff *, skb, u32, offset,
5714            const void *, from, u32, len)
5715 {
5716         struct seg6_bpf_srh_state *srh_state =
5717                 this_cpu_ptr(&seg6_bpf_srh_states);
5718         struct ipv6_sr_hdr *srh = srh_state->srh;
5719         void *srh_tlvs, *srh_end, *ptr;
5720         int srhoff = 0;
5721
5722         if (srh == NULL)
5723                 return -EINVAL;
5724
5725         srh_tlvs = (void *)((char *)srh + ((srh->first_segment + 1) << 4));
5726         srh_end = (void *)((char *)srh + sizeof(*srh) + srh_state->hdrlen);
5727
5728         ptr = skb->data + offset;
5729         if (ptr >= srh_tlvs && ptr + len <= srh_end)
5730                 srh_state->valid = false;
5731         else if (ptr < (void *)&srh->flags ||
5732                  ptr + len > (void *)&srh->segments)
5733                 return -EFAULT;
5734
5735         if (unlikely(bpf_try_make_writable(skb, offset + len)))
5736                 return -EFAULT;
5737         if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, NULL) < 0)
5738                 return -EINVAL;
5739         srh_state->srh = (struct ipv6_sr_hdr *)(skb->data + srhoff);
5740
5741         memcpy(skb->data + offset, from, len);
5742         return 0;
5743 }
5744
5745 static const struct bpf_func_proto bpf_lwt_seg6_store_bytes_proto = {
5746         .func           = bpf_lwt_seg6_store_bytes,
5747         .gpl_only       = false,
5748         .ret_type       = RET_INTEGER,
5749         .arg1_type      = ARG_PTR_TO_CTX,
5750         .arg2_type      = ARG_ANYTHING,
5751         .arg3_type      = ARG_PTR_TO_MEM,
5752         .arg4_type      = ARG_CONST_SIZE
5753 };
5754
5755 static void bpf_update_srh_state(struct sk_buff *skb)
5756 {
5757         struct seg6_bpf_srh_state *srh_state =
5758                 this_cpu_ptr(&seg6_bpf_srh_states);
5759         int srhoff = 0;
5760
5761         if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, NULL) < 0) {
5762                 srh_state->srh = NULL;
5763         } else {
5764                 srh_state->srh = (struct ipv6_sr_hdr *)(skb->data + srhoff);
5765                 srh_state->hdrlen = srh_state->srh->hdrlen << 3;
5766                 srh_state->valid = true;
5767         }
5768 }
5769
5770 BPF_CALL_4(bpf_lwt_seg6_action, struct sk_buff *, skb,
5771            u32, action, void *, param, u32, param_len)
5772 {
5773         struct seg6_bpf_srh_state *srh_state =
5774                 this_cpu_ptr(&seg6_bpf_srh_states);
5775         int hdroff = 0;
5776         int err;
5777
5778         switch (action) {
5779         case SEG6_LOCAL_ACTION_END_X:
5780                 if (!seg6_bpf_has_valid_srh(skb))
5781                         return -EBADMSG;
5782                 if (param_len != sizeof(struct in6_addr))
5783                         return -EINVAL;
5784                 return seg6_lookup_nexthop(skb, (struct in6_addr *)param, 0);
5785         case SEG6_LOCAL_ACTION_END_T:
5786                 if (!seg6_bpf_has_valid_srh(skb))
5787                         return -EBADMSG;
5788                 if (param_len != sizeof(int))
5789                         return -EINVAL;
5790                 return seg6_lookup_nexthop(skb, NULL, *(int *)param);
5791         case SEG6_LOCAL_ACTION_END_DT6:
5792                 if (!seg6_bpf_has_valid_srh(skb))
5793                         return -EBADMSG;
5794                 if (param_len != sizeof(int))
5795                         return -EINVAL;
5796
5797                 if (ipv6_find_hdr(skb, &hdroff, IPPROTO_IPV6, NULL, NULL) < 0)
5798                         return -EBADMSG;
5799                 if (!pskb_pull(skb, hdroff))
5800                         return -EBADMSG;
5801
5802                 skb_postpull_rcsum(skb, skb_network_header(skb), hdroff);
5803                 skb_reset_network_header(skb);
5804                 skb_reset_transport_header(skb);
5805                 skb->encapsulation = 0;
5806
5807                 bpf_compute_data_pointers(skb);
5808                 bpf_update_srh_state(skb);
5809                 return seg6_lookup_nexthop(skb, NULL, *(int *)param);
5810         case SEG6_LOCAL_ACTION_END_B6:
5811                 if (srh_state->srh && !seg6_bpf_has_valid_srh(skb))
5812                         return -EBADMSG;
5813                 err = bpf_push_seg6_encap(skb, BPF_LWT_ENCAP_SEG6_INLINE,
5814                                           param, param_len);
5815                 if (!err)
5816                         bpf_update_srh_state(skb);
5817
5818                 return err;
5819         case SEG6_LOCAL_ACTION_END_B6_ENCAP:
5820                 if (srh_state->srh && !seg6_bpf_has_valid_srh(skb))
5821                         return -EBADMSG;
5822                 err = bpf_push_seg6_encap(skb, BPF_LWT_ENCAP_SEG6,
5823                                           param, param_len);
5824                 if (!err)
5825                         bpf_update_srh_state(skb);
5826
5827                 return err;
5828         default:
5829                 return -EINVAL;
5830         }
5831 }
5832
5833 static const struct bpf_func_proto bpf_lwt_seg6_action_proto = {
5834         .func           = bpf_lwt_seg6_action,
5835         .gpl_only       = false,
5836         .ret_type       = RET_INTEGER,
5837         .arg1_type      = ARG_PTR_TO_CTX,
5838         .arg2_type      = ARG_ANYTHING,
5839         .arg3_type      = ARG_PTR_TO_MEM,
5840         .arg4_type      = ARG_CONST_SIZE
5841 };
5842
5843 BPF_CALL_3(bpf_lwt_seg6_adjust_srh, struct sk_buff *, skb, u32, offset,
5844            s32, len)
5845 {
5846         struct seg6_bpf_srh_state *srh_state =
5847                 this_cpu_ptr(&seg6_bpf_srh_states);
5848         struct ipv6_sr_hdr *srh = srh_state->srh;
5849         void *srh_end, *srh_tlvs, *ptr;
5850         struct ipv6hdr *hdr;
5851         int srhoff = 0;
5852         int ret;
5853
5854         if (unlikely(srh == NULL))
5855                 return -EINVAL;
5856
5857         srh_tlvs = (void *)((unsigned char *)srh + sizeof(*srh) +
5858                         ((srh->first_segment + 1) << 4));
5859         srh_end = (void *)((unsigned char *)srh + sizeof(*srh) +
5860                         srh_state->hdrlen);
5861         ptr = skb->data + offset;
5862
5863         if (unlikely(ptr < srh_tlvs || ptr > srh_end))
5864                 return -EFAULT;
5865         if (unlikely(len < 0 && (void *)((char *)ptr - len) > srh_end))
5866                 return -EFAULT;
5867
5868         if (len > 0) {
5869                 ret = skb_cow_head(skb, len);
5870                 if (unlikely(ret < 0))
5871                         return ret;
5872
5873                 ret = bpf_skb_net_hdr_push(skb, offset, len);
5874         } else {
5875                 ret = bpf_skb_net_hdr_pop(skb, offset, -1 * len);
5876         }
5877
5878         bpf_compute_data_pointers(skb);
5879         if (unlikely(ret < 0))
5880                 return ret;
5881
5882         hdr = (struct ipv6hdr *)skb->data;
5883         hdr->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
5884
5885         if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, NULL) < 0)
5886                 return -EINVAL;
5887         srh_state->srh = (struct ipv6_sr_hdr *)(skb->data + srhoff);
5888         srh_state->hdrlen += len;
5889         srh_state->valid = false;
5890         return 0;
5891 }
5892
5893 static const struct bpf_func_proto bpf_lwt_seg6_adjust_srh_proto = {
5894         .func           = bpf_lwt_seg6_adjust_srh,
5895         .gpl_only       = false,
5896         .ret_type       = RET_INTEGER,
5897         .arg1_type      = ARG_PTR_TO_CTX,
5898         .arg2_type      = ARG_ANYTHING,
5899         .arg3_type      = ARG_ANYTHING,
5900 };
5901 #endif /* CONFIG_IPV6_SEG6_BPF */
5902
5903 #ifdef CONFIG_INET
5904 static struct sock *sk_lookup(struct net *net, struct bpf_sock_tuple *tuple,
5905                               int dif, int sdif, u8 family, u8 proto)
5906 {
5907         bool refcounted = false;
5908         struct sock *sk = NULL;
5909
5910         if (family == AF_INET) {
5911                 __be32 src4 = tuple->ipv4.saddr;
5912                 __be32 dst4 = tuple->ipv4.daddr;
5913
5914                 if (proto == IPPROTO_TCP)
5915                         sk = __inet_lookup(net, &tcp_hashinfo, NULL, 0,
5916                                            src4, tuple->ipv4.sport,
5917                                            dst4, tuple->ipv4.dport,
5918                                            dif, sdif, &refcounted);
5919                 else
5920                         sk = __udp4_lib_lookup(net, src4, tuple->ipv4.sport,
5921                                                dst4, tuple->ipv4.dport,
5922                                                dif, sdif, &udp_table, NULL);
5923 #if IS_ENABLED(CONFIG_IPV6)
5924         } else {
5925                 struct in6_addr *src6 = (struct in6_addr *)&tuple->ipv6.saddr;
5926                 struct in6_addr *dst6 = (struct in6_addr *)&tuple->ipv6.daddr;
5927
5928                 if (proto == IPPROTO_TCP)
5929                         sk = __inet6_lookup(net, &tcp_hashinfo, NULL, 0,
5930                                             src6, tuple->ipv6.sport,
5931                                             dst6, ntohs(tuple->ipv6.dport),
5932                                             dif, sdif, &refcounted);
5933                 else if (likely(ipv6_bpf_stub))
5934                         sk = ipv6_bpf_stub->udp6_lib_lookup(net,
5935                                                             src6, tuple->ipv6.sport,
5936                                                             dst6, tuple->ipv6.dport,
5937                                                             dif, sdif,
5938                                                             &udp_table, NULL);
5939 #endif
5940         }
5941
5942         if (unlikely(sk && !refcounted && !sock_flag(sk, SOCK_RCU_FREE))) {
5943                 WARN_ONCE(1, "Found non-RCU, unreferenced socket!");
5944                 sk = NULL;
5945         }
5946         return sk;
5947 }
5948
5949 /* bpf_skc_lookup performs the core lookup for different types of sockets,
5950  * taking a reference on the socket if it doesn't have the flag SOCK_RCU_FREE.
5951  * Returns the socket as an 'unsigned long' to simplify the casting in the
5952  * callers to satisfy BPF_CALL declarations.
5953  */
5954 static struct sock *
5955 __bpf_skc_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
5956                  struct net *caller_net, u32 ifindex, u8 proto, u64 netns_id,
5957                  u64 flags)
5958 {
5959         struct sock *sk = NULL;
5960         u8 family = AF_UNSPEC;
5961         struct net *net;
5962         int sdif;
5963
5964         if (len == sizeof(tuple->ipv4))
5965                 family = AF_INET;
5966         else if (len == sizeof(tuple->ipv6))
5967                 family = AF_INET6;
5968         else
5969                 return NULL;
5970
5971         if (unlikely(family == AF_UNSPEC || flags ||
5972                      !((s32)netns_id < 0 || netns_id <= S32_MAX)))
5973                 goto out;
5974
5975         if (family == AF_INET)
5976                 sdif = inet_sdif(skb);
5977         else
5978                 sdif = inet6_sdif(skb);
5979
5980         if ((s32)netns_id < 0) {
5981                 net = caller_net;
5982                 sk = sk_lookup(net, tuple, ifindex, sdif, family, proto);
5983         } else {
5984                 net = get_net_ns_by_id(caller_net, netns_id);
5985                 if (unlikely(!net))
5986                         goto out;
5987                 sk = sk_lookup(net, tuple, ifindex, sdif, family, proto);
5988                 put_net(net);
5989         }
5990
5991 out:
5992         return sk;
5993 }
5994
5995 static struct sock *
5996 __bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
5997                 struct net *caller_net, u32 ifindex, u8 proto, u64 netns_id,
5998                 u64 flags)
5999 {
6000         struct sock *sk = __bpf_skc_lookup(skb, tuple, len, caller_net,
6001                                            ifindex, proto, netns_id, flags);
6002
6003         if (sk) {
6004                 sk = sk_to_full_sk(sk);
6005                 if (!sk_fullsock(sk)) {
6006                         sock_gen_put(sk);
6007                         return NULL;
6008                 }
6009         }
6010
6011         return sk;
6012 }
6013
6014 static struct sock *
6015 bpf_skc_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
6016                u8 proto, u64 netns_id, u64 flags)
6017 {
6018         struct net *caller_net;
6019         int ifindex;
6020
6021         if (skb->dev) {
6022                 caller_net = dev_net(skb->dev);
6023                 ifindex = skb->dev->ifindex;
6024         } else {
6025                 caller_net = sock_net(skb->sk);
6026                 ifindex = 0;
6027         }
6028
6029         return __bpf_skc_lookup(skb, tuple, len, caller_net, ifindex, proto,
6030                                 netns_id, flags);
6031 }
6032
6033 static struct sock *
6034 bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
6035               u8 proto, u64 netns_id, u64 flags)
6036 {
6037         struct sock *sk = bpf_skc_lookup(skb, tuple, len, proto, netns_id,
6038                                          flags);
6039
6040         if (sk) {
6041                 sk = sk_to_full_sk(sk);
6042                 if (!sk_fullsock(sk)) {
6043                         sock_gen_put(sk);
6044                         return NULL;
6045                 }
6046         }
6047
6048         return sk;
6049 }
6050
6051 BPF_CALL_5(bpf_skc_lookup_tcp, struct sk_buff *, skb,
6052            struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
6053 {
6054         return (unsigned long)bpf_skc_lookup(skb, tuple, len, IPPROTO_TCP,
6055                                              netns_id, flags);
6056 }
6057
6058 static const struct bpf_func_proto bpf_skc_lookup_tcp_proto = {
6059         .func           = bpf_skc_lookup_tcp,
6060         .gpl_only       = false,
6061         .pkt_access     = true,
6062         .ret_type       = RET_PTR_TO_SOCK_COMMON_OR_NULL,
6063         .arg1_type      = ARG_PTR_TO_CTX,
6064         .arg2_type      = ARG_PTR_TO_MEM,
6065         .arg3_type      = ARG_CONST_SIZE,
6066         .arg4_type      = ARG_ANYTHING,
6067         .arg5_type      = ARG_ANYTHING,
6068 };
6069
6070 BPF_CALL_5(bpf_sk_lookup_tcp, struct sk_buff *, skb,
6071            struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
6072 {
6073         return (unsigned long)bpf_sk_lookup(skb, tuple, len, IPPROTO_TCP,
6074                                             netns_id, flags);
6075 }
6076
6077 static const struct bpf_func_proto bpf_sk_lookup_tcp_proto = {
6078         .func           = bpf_sk_lookup_tcp,
6079         .gpl_only       = false,
6080         .pkt_access     = true,
6081         .ret_type       = RET_PTR_TO_SOCKET_OR_NULL,
6082         .arg1_type      = ARG_PTR_TO_CTX,
6083         .arg2_type      = ARG_PTR_TO_MEM,
6084         .arg3_type      = ARG_CONST_SIZE,
6085         .arg4_type      = ARG_ANYTHING,
6086         .arg5_type      = ARG_ANYTHING,
6087 };
6088
6089 BPF_CALL_5(bpf_sk_lookup_udp, struct sk_buff *, skb,
6090            struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
6091 {
6092         return (unsigned long)bpf_sk_lookup(skb, tuple, len, IPPROTO_UDP,
6093                                             netns_id, flags);
6094 }
6095
6096 static const struct bpf_func_proto bpf_sk_lookup_udp_proto = {
6097         .func           = bpf_sk_lookup_udp,
6098         .gpl_only       = false,
6099         .pkt_access     = true,
6100         .ret_type       = RET_PTR_TO_SOCKET_OR_NULL,
6101         .arg1_type      = ARG_PTR_TO_CTX,
6102         .arg2_type      = ARG_PTR_TO_MEM,
6103         .arg3_type      = ARG_CONST_SIZE,
6104         .arg4_type      = ARG_ANYTHING,
6105         .arg5_type      = ARG_ANYTHING,
6106 };
6107
6108 BPF_CALL_1(bpf_sk_release, struct sock *, sk)
6109 {
6110         if (sk && sk_is_refcounted(sk))
6111                 sock_gen_put(sk);
6112         return 0;
6113 }
6114
6115 static const struct bpf_func_proto bpf_sk_release_proto = {
6116         .func           = bpf_sk_release,
6117         .gpl_only       = false,
6118         .ret_type       = RET_INTEGER,
6119         .arg1_type      = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
6120 };
6121
6122 BPF_CALL_5(bpf_xdp_sk_lookup_udp, struct xdp_buff *, ctx,
6123            struct bpf_sock_tuple *, tuple, u32, len, u32, netns_id, u64, flags)
6124 {
6125         struct net *caller_net = dev_net(ctx->rxq->dev);
6126         int ifindex = ctx->rxq->dev->ifindex;
6127
6128         return (unsigned long)__bpf_sk_lookup(NULL, tuple, len, caller_net,
6129                                               ifindex, IPPROTO_UDP, netns_id,
6130                                               flags);
6131 }
6132
6133 static const struct bpf_func_proto bpf_xdp_sk_lookup_udp_proto = {
6134         .func           = bpf_xdp_sk_lookup_udp,
6135         .gpl_only       = false,
6136         .pkt_access     = true,
6137         .ret_type       = RET_PTR_TO_SOCKET_OR_NULL,
6138         .arg1_type      = ARG_PTR_TO_CTX,
6139         .arg2_type      = ARG_PTR_TO_MEM,
6140         .arg3_type      = ARG_CONST_SIZE,
6141         .arg4_type      = ARG_ANYTHING,
6142         .arg5_type      = ARG_ANYTHING,
6143 };
6144
6145 BPF_CALL_5(bpf_xdp_skc_lookup_tcp, struct xdp_buff *, ctx,
6146            struct bpf_sock_tuple *, tuple, u32, len, u32, netns_id, u64, flags)
6147 {
6148         struct net *caller_net = dev_net(ctx->rxq->dev);
6149         int ifindex = ctx->rxq->dev->ifindex;
6150
6151         return (unsigned long)__bpf_skc_lookup(NULL, tuple, len, caller_net,
6152                                                ifindex, IPPROTO_TCP, netns_id,
6153                                                flags);
6154 }
6155
6156 static const struct bpf_func_proto bpf_xdp_skc_lookup_tcp_proto = {
6157         .func           = bpf_xdp_skc_lookup_tcp,
6158         .gpl_only       = false,
6159         .pkt_access     = true,
6160         .ret_type       = RET_PTR_TO_SOCK_COMMON_OR_NULL,
6161         .arg1_type      = ARG_PTR_TO_CTX,
6162         .arg2_type      = ARG_PTR_TO_MEM,
6163         .arg3_type      = ARG_CONST_SIZE,
6164         .arg4_type      = ARG_ANYTHING,
6165         .arg5_type      = ARG_ANYTHING,
6166 };
6167
6168 BPF_CALL_5(bpf_xdp_sk_lookup_tcp, struct xdp_buff *, ctx,
6169            struct bpf_sock_tuple *, tuple, u32, len, u32, netns_id, u64, flags)
6170 {
6171         struct net *caller_net = dev_net(ctx->rxq->dev);
6172         int ifindex = ctx->rxq->dev->ifindex;
6173
6174         return (unsigned long)__bpf_sk_lookup(NULL, tuple, len, caller_net,
6175                                               ifindex, IPPROTO_TCP, netns_id,
6176                                               flags);
6177 }
6178
6179 static const struct bpf_func_proto bpf_xdp_sk_lookup_tcp_proto = {
6180         .func           = bpf_xdp_sk_lookup_tcp,
6181         .gpl_only       = false,
6182         .pkt_access     = true,
6183         .ret_type       = RET_PTR_TO_SOCKET_OR_NULL,
6184         .arg1_type      = ARG_PTR_TO_CTX,
6185         .arg2_type      = ARG_PTR_TO_MEM,
6186         .arg3_type      = ARG_CONST_SIZE,
6187         .arg4_type      = ARG_ANYTHING,
6188         .arg5_type      = ARG_ANYTHING,
6189 };
6190
6191 BPF_CALL_5(bpf_sock_addr_skc_lookup_tcp, struct bpf_sock_addr_kern *, ctx,
6192            struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
6193 {
6194         return (unsigned long)__bpf_skc_lookup(NULL, tuple, len,
6195                                                sock_net(ctx->sk), 0,
6196                                                IPPROTO_TCP, netns_id, flags);
6197 }
6198
6199 static const struct bpf_func_proto bpf_sock_addr_skc_lookup_tcp_proto = {
6200         .func           = bpf_sock_addr_skc_lookup_tcp,
6201         .gpl_only       = false,
6202         .ret_type       = RET_PTR_TO_SOCK_COMMON_OR_NULL,
6203         .arg1_type      = ARG_PTR_TO_CTX,
6204         .arg2_type      = ARG_PTR_TO_MEM,
6205         .arg3_type      = ARG_CONST_SIZE,
6206         .arg4_type      = ARG_ANYTHING,
6207         .arg5_type      = ARG_ANYTHING,
6208 };
6209
6210 BPF_CALL_5(bpf_sock_addr_sk_lookup_tcp, struct bpf_sock_addr_kern *, ctx,
6211            struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
6212 {
6213         return (unsigned long)__bpf_sk_lookup(NULL, tuple, len,
6214                                               sock_net(ctx->sk), 0, IPPROTO_TCP,
6215                                               netns_id, flags);
6216 }
6217
6218 static const struct bpf_func_proto bpf_sock_addr_sk_lookup_tcp_proto = {
6219         .func           = bpf_sock_addr_sk_lookup_tcp,
6220         .gpl_only       = false,
6221         .ret_type       = RET_PTR_TO_SOCKET_OR_NULL,
6222         .arg1_type      = ARG_PTR_TO_CTX,
6223         .arg2_type      = ARG_PTR_TO_MEM,
6224         .arg3_type      = ARG_CONST_SIZE,
6225         .arg4_type      = ARG_ANYTHING,
6226         .arg5_type      = ARG_ANYTHING,
6227 };
6228
6229 BPF_CALL_5(bpf_sock_addr_sk_lookup_udp, struct bpf_sock_addr_kern *, ctx,
6230            struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
6231 {
6232         return (unsigned long)__bpf_sk_lookup(NULL, tuple, len,
6233                                               sock_net(ctx->sk), 0, IPPROTO_UDP,
6234                                               netns_id, flags);
6235 }
6236
6237 static const struct bpf_func_proto bpf_sock_addr_sk_lookup_udp_proto = {
6238         .func           = bpf_sock_addr_sk_lookup_udp,
6239         .gpl_only       = false,
6240         .ret_type       = RET_PTR_TO_SOCKET_OR_NULL,
6241         .arg1_type      = ARG_PTR_TO_CTX,
6242         .arg2_type      = ARG_PTR_TO_MEM,
6243         .arg3_type      = ARG_CONST_SIZE,
6244         .arg4_type      = ARG_ANYTHING,
6245         .arg5_type      = ARG_ANYTHING,
6246 };
6247
6248 bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
6249                                   struct bpf_insn_access_aux *info)
6250 {
6251         if (off < 0 || off >= offsetofend(struct bpf_tcp_sock,
6252                                           icsk_retransmits))
6253                 return false;
6254
6255         if (off % size != 0)
6256                 return false;
6257
6258         switch (off) {
6259         case offsetof(struct bpf_tcp_sock, bytes_received):
6260         case offsetof(struct bpf_tcp_sock, bytes_acked):
6261                 return size == sizeof(__u64);
6262         default:
6263                 return size == sizeof(__u32);
6264         }
6265 }
6266
6267 u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
6268                                     const struct bpf_insn *si,
6269                                     struct bpf_insn *insn_buf,
6270                                     struct bpf_prog *prog, u32 *target_size)
6271 {
6272         struct bpf_insn *insn = insn_buf;
6273
6274 #define BPF_TCP_SOCK_GET_COMMON(FIELD)                                  \
6275         do {                                                            \
6276                 BUILD_BUG_ON(sizeof_field(struct tcp_sock, FIELD) >     \
6277                              sizeof_field(struct bpf_tcp_sock, FIELD)); \
6278                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct tcp_sock, FIELD),\
6279                                       si->dst_reg, si->src_reg,         \
6280                                       offsetof(struct tcp_sock, FIELD)); \
6281         } while (0)
6282
6283 #define BPF_INET_SOCK_GET_COMMON(FIELD)                                 \
6284         do {                                                            \
6285                 BUILD_BUG_ON(sizeof_field(struct inet_connection_sock,  \
6286                                           FIELD) >                      \
6287                              sizeof_field(struct bpf_tcp_sock, FIELD)); \
6288                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(                 \
6289                                         struct inet_connection_sock,    \
6290                                         FIELD),                         \
6291                                       si->dst_reg, si->src_reg,         \
6292                                       offsetof(                         \
6293                                         struct inet_connection_sock,    \
6294                                         FIELD));                        \
6295         } while (0)
6296
6297         if (insn > insn_buf)
6298                 return insn - insn_buf;
6299
6300         switch (si->off) {
6301         case offsetof(struct bpf_tcp_sock, rtt_min):
6302                 BUILD_BUG_ON(sizeof_field(struct tcp_sock, rtt_min) !=
6303                              sizeof(struct minmax));
6304                 BUILD_BUG_ON(sizeof(struct minmax) <
6305                              sizeof(struct minmax_sample));
6306
6307                 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
6308                                       offsetof(struct tcp_sock, rtt_min) +
6309                                       offsetof(struct minmax_sample, v));
6310                 break;
6311         case offsetof(struct bpf_tcp_sock, snd_cwnd):
6312                 BPF_TCP_SOCK_GET_COMMON(snd_cwnd);
6313                 break;
6314         case offsetof(struct bpf_tcp_sock, srtt_us):
6315                 BPF_TCP_SOCK_GET_COMMON(srtt_us);
6316                 break;
6317         case offsetof(struct bpf_tcp_sock, snd_ssthresh):
6318                 BPF_TCP_SOCK_GET_COMMON(snd_ssthresh);
6319                 break;
6320         case offsetof(struct bpf_tcp_sock, rcv_nxt):
6321                 BPF_TCP_SOCK_GET_COMMON(rcv_nxt);
6322                 break;
6323         case offsetof(struct bpf_tcp_sock, snd_nxt):
6324                 BPF_TCP_SOCK_GET_COMMON(snd_nxt);
6325                 break;
6326         case offsetof(struct bpf_tcp_sock, snd_una):
6327                 BPF_TCP_SOCK_GET_COMMON(snd_una);
6328                 break;
6329         case offsetof(struct bpf_tcp_sock, mss_cache):
6330                 BPF_TCP_SOCK_GET_COMMON(mss_cache);
6331                 break;
6332         case offsetof(struct bpf_tcp_sock, ecn_flags):
6333                 BPF_TCP_SOCK_GET_COMMON(ecn_flags);
6334                 break;
6335         case offsetof(struct bpf_tcp_sock, rate_delivered):
6336                 BPF_TCP_SOCK_GET_COMMON(rate_delivered);
6337                 break;
6338         case offsetof(struct bpf_tcp_sock, rate_interval_us):
6339                 BPF_TCP_SOCK_GET_COMMON(rate_interval_us);
6340                 break;
6341         case offsetof(struct bpf_tcp_sock, packets_out):
6342                 BPF_TCP_SOCK_GET_COMMON(packets_out);
6343                 break;
6344         case offsetof(struct bpf_tcp_sock, retrans_out):
6345                 BPF_TCP_SOCK_GET_COMMON(retrans_out);
6346                 break;
6347         case offsetof(struct bpf_tcp_sock, total_retrans):
6348                 BPF_TCP_SOCK_GET_COMMON(total_retrans);
6349                 break;
6350         case offsetof(struct bpf_tcp_sock, segs_in):
6351                 BPF_TCP_SOCK_GET_COMMON(segs_in);
6352                 break;
6353         case offsetof(struct bpf_tcp_sock, data_segs_in):
6354                 BPF_TCP_SOCK_GET_COMMON(data_segs_in);
6355                 break;
6356         case offsetof(struct bpf_tcp_sock, segs_out):
6357                 BPF_TCP_SOCK_GET_COMMON(segs_out);
6358                 break;
6359         case offsetof(struct bpf_tcp_sock, data_segs_out):
6360                 BPF_TCP_SOCK_GET_COMMON(data_segs_out);
6361                 break;
6362         case offsetof(struct bpf_tcp_sock, lost_out):
6363                 BPF_TCP_SOCK_GET_COMMON(lost_out);
6364                 break;
6365         case offsetof(struct bpf_tcp_sock, sacked_out):
6366                 BPF_TCP_SOCK_GET_COMMON(sacked_out);
6367                 break;
6368         case offsetof(struct bpf_tcp_sock, bytes_received):
6369                 BPF_TCP_SOCK_GET_COMMON(bytes_received);
6370                 break;
6371         case offsetof(struct bpf_tcp_sock, bytes_acked):
6372                 BPF_TCP_SOCK_GET_COMMON(bytes_acked);
6373                 break;
6374         case offsetof(struct bpf_tcp_sock, dsack_dups):
6375                 BPF_TCP_SOCK_GET_COMMON(dsack_dups);
6376                 break;
6377         case offsetof(struct bpf_tcp_sock, delivered):
6378                 BPF_TCP_SOCK_GET_COMMON(delivered);
6379                 break;
6380         case offsetof(struct bpf_tcp_sock, delivered_ce):
6381                 BPF_TCP_SOCK_GET_COMMON(delivered_ce);
6382                 break;
6383         case offsetof(struct bpf_tcp_sock, icsk_retransmits):
6384                 BPF_INET_SOCK_GET_COMMON(icsk_retransmits);
6385                 break;
6386         }
6387
6388         return insn - insn_buf;
6389 }
6390
6391 BPF_CALL_1(bpf_tcp_sock, struct sock *, sk)
6392 {
6393         if (sk_fullsock(sk) && sk->sk_protocol == IPPROTO_TCP)
6394                 return (unsigned long)sk;
6395
6396         return (unsigned long)NULL;
6397 }
6398
6399 const struct bpf_func_proto bpf_tcp_sock_proto = {
6400         .func           = bpf_tcp_sock,
6401         .gpl_only       = false,
6402         .ret_type       = RET_PTR_TO_TCP_SOCK_OR_NULL,
6403         .arg1_type      = ARG_PTR_TO_SOCK_COMMON,
6404 };
6405
6406 BPF_CALL_1(bpf_get_listener_sock, struct sock *, sk)
6407 {
6408         sk = sk_to_full_sk(sk);
6409
6410         if (sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_RCU_FREE))
6411                 return (unsigned long)sk;
6412
6413         return (unsigned long)NULL;
6414 }
6415
6416 static const struct bpf_func_proto bpf_get_listener_sock_proto = {
6417         .func           = bpf_get_listener_sock,
6418         .gpl_only       = false,
6419         .ret_type       = RET_PTR_TO_SOCKET_OR_NULL,
6420         .arg1_type      = ARG_PTR_TO_SOCK_COMMON,
6421 };
6422
6423 BPF_CALL_1(bpf_skb_ecn_set_ce, struct sk_buff *, skb)
6424 {
6425         unsigned int iphdr_len;
6426
6427         switch (skb_protocol(skb, true)) {
6428         case cpu_to_be16(ETH_P_IP):
6429                 iphdr_len = sizeof(struct iphdr);
6430                 break;
6431         case cpu_to_be16(ETH_P_IPV6):
6432                 iphdr_len = sizeof(struct ipv6hdr);
6433                 break;
6434         default:
6435                 return 0;
6436         }
6437
6438         if (skb_headlen(skb) < iphdr_len)
6439                 return 0;
6440
6441         if (skb_cloned(skb) && !skb_clone_writable(skb, iphdr_len))
6442                 return 0;
6443
6444         return INET_ECN_set_ce(skb);
6445 }
6446
6447 bool bpf_xdp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
6448                                   struct bpf_insn_access_aux *info)
6449 {
6450         if (off < 0 || off >= offsetofend(struct bpf_xdp_sock, queue_id))
6451                 return false;
6452
6453         if (off % size != 0)
6454                 return false;
6455
6456         switch (off) {
6457         default:
6458                 return size == sizeof(__u32);
6459         }
6460 }
6461
6462 u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,
6463                                     const struct bpf_insn *si,
6464                                     struct bpf_insn *insn_buf,
6465                                     struct bpf_prog *prog, u32 *target_size)
6466 {
6467         struct bpf_insn *insn = insn_buf;
6468
6469 #define BPF_XDP_SOCK_GET(FIELD)                                         \
6470         do {                                                            \
6471                 BUILD_BUG_ON(sizeof_field(struct xdp_sock, FIELD) >     \
6472                              sizeof_field(struct bpf_xdp_sock, FIELD)); \
6473                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_sock, FIELD),\
6474                                       si->dst_reg, si->src_reg,         \
6475                                       offsetof(struct xdp_sock, FIELD)); \
6476         } while (0)
6477
6478         switch (si->off) {
6479         case offsetof(struct bpf_xdp_sock, queue_id):
6480                 BPF_XDP_SOCK_GET(queue_id);
6481                 break;
6482         }
6483
6484         return insn - insn_buf;
6485 }
6486
6487 static const struct bpf_func_proto bpf_skb_ecn_set_ce_proto = {
6488         .func           = bpf_skb_ecn_set_ce,
6489         .gpl_only       = false,
6490         .ret_type       = RET_INTEGER,
6491         .arg1_type      = ARG_PTR_TO_CTX,
6492 };
6493
6494 BPF_CALL_5(bpf_tcp_check_syncookie, struct sock *, sk, void *, iph, u32, iph_len,
6495            struct tcphdr *, th, u32, th_len)
6496 {
6497 #ifdef CONFIG_SYN_COOKIES
6498         u32 cookie;
6499         int ret;
6500
6501         if (unlikely(!sk || th_len < sizeof(*th)))
6502                 return -EINVAL;
6503
6504         /* sk_listener() allows TCP_NEW_SYN_RECV, which makes no sense here. */
6505         if (sk->sk_protocol != IPPROTO_TCP || sk->sk_state != TCP_LISTEN)
6506                 return -EINVAL;
6507
6508         if (!sock_net(sk)->ipv4.sysctl_tcp_syncookies)
6509                 return -EINVAL;
6510
6511         if (!th->ack || th->rst || th->syn)
6512                 return -ENOENT;
6513
6514         if (tcp_synq_no_recent_overflow(sk))
6515                 return -ENOENT;
6516
6517         cookie = ntohl(th->ack_seq) - 1;
6518
6519         switch (sk->sk_family) {
6520         case AF_INET:
6521                 if (unlikely(iph_len < sizeof(struct iphdr)))
6522                         return -EINVAL;
6523
6524                 ret = __cookie_v4_check((struct iphdr *)iph, th, cookie);
6525                 break;
6526
6527 #if IS_BUILTIN(CONFIG_IPV6)
6528         case AF_INET6:
6529                 if (unlikely(iph_len < sizeof(struct ipv6hdr)))
6530                         return -EINVAL;
6531
6532                 ret = __cookie_v6_check((struct ipv6hdr *)iph, th, cookie);
6533                 break;
6534 #endif /* CONFIG_IPV6 */
6535
6536         default:
6537                 return -EPROTONOSUPPORT;
6538         }
6539
6540         if (ret > 0)
6541                 return 0;
6542
6543         return -ENOENT;
6544 #else
6545         return -ENOTSUPP;
6546 #endif
6547 }
6548
6549 static const struct bpf_func_proto bpf_tcp_check_syncookie_proto = {
6550         .func           = bpf_tcp_check_syncookie,
6551         .gpl_only       = true,
6552         .pkt_access     = true,
6553         .ret_type       = RET_INTEGER,
6554         .arg1_type      = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
6555         .arg2_type      = ARG_PTR_TO_MEM,
6556         .arg3_type      = ARG_CONST_SIZE,
6557         .arg4_type      = ARG_PTR_TO_MEM,
6558         .arg5_type      = ARG_CONST_SIZE,
6559 };
6560
6561 BPF_CALL_5(bpf_tcp_gen_syncookie, struct sock *, sk, void *, iph, u32, iph_len,
6562            struct tcphdr *, th, u32, th_len)
6563 {
6564 #ifdef CONFIG_SYN_COOKIES
6565         u32 cookie;
6566         u16 mss;
6567
6568         if (unlikely(!sk || th_len < sizeof(*th) || th_len != th->doff * 4))
6569                 return -EINVAL;
6570
6571         if (sk->sk_protocol != IPPROTO_TCP || sk->sk_state != TCP_LISTEN)
6572                 return -EINVAL;
6573
6574         if (!sock_net(sk)->ipv4.sysctl_tcp_syncookies)
6575                 return -ENOENT;
6576
6577         if (!th->syn || th->ack || th->fin || th->rst)
6578                 return -EINVAL;
6579
6580         if (unlikely(iph_len < sizeof(struct iphdr)))
6581                 return -EINVAL;
6582
6583         /* Both struct iphdr and struct ipv6hdr have the version field at the
6584          * same offset so we can cast to the shorter header (struct iphdr).
6585          */
6586         switch (((struct iphdr *)iph)->version) {
6587         case 4:
6588                 if (sk->sk_family == AF_INET6 && sk->sk_ipv6only)
6589                         return -EINVAL;
6590
6591                 mss = tcp_v4_get_syncookie(sk, iph, th, &cookie);
6592                 break;
6593
6594 #if IS_BUILTIN(CONFIG_IPV6)
6595         case 6:
6596                 if (unlikely(iph_len < sizeof(struct ipv6hdr)))
6597                         return -EINVAL;
6598
6599                 if (sk->sk_family != AF_INET6)
6600                         return -EINVAL;
6601
6602                 mss = tcp_v6_get_syncookie(sk, iph, th, &cookie);
6603                 break;
6604 #endif /* CONFIG_IPV6 */
6605
6606         default:
6607                 return -EPROTONOSUPPORT;
6608         }
6609         if (mss == 0)
6610                 return -ENOENT;
6611
6612         return cookie | ((u64)mss << 32);
6613 #else
6614         return -EOPNOTSUPP;
6615 #endif /* CONFIG_SYN_COOKIES */
6616 }
6617
6618 static const struct bpf_func_proto bpf_tcp_gen_syncookie_proto = {
6619         .func           = bpf_tcp_gen_syncookie,
6620         .gpl_only       = true, /* __cookie_v*_init_sequence() is GPL */
6621         .pkt_access     = true,
6622         .ret_type       = RET_INTEGER,
6623         .arg1_type      = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
6624         .arg2_type      = ARG_PTR_TO_MEM,
6625         .arg3_type      = ARG_CONST_SIZE,
6626         .arg4_type      = ARG_PTR_TO_MEM,
6627         .arg5_type      = ARG_CONST_SIZE,
6628 };
6629
6630 BPF_CALL_3(bpf_sk_assign, struct sk_buff *, skb, struct sock *, sk, u64, flags)
6631 {
6632         if (!sk || flags != 0)
6633                 return -EINVAL;
6634         if (!skb_at_tc_ingress(skb))
6635                 return -EOPNOTSUPP;
6636         if (unlikely(dev_net(skb->dev) != sock_net(sk)))
6637                 return -ENETUNREACH;
6638         if (unlikely(sk_fullsock(sk) && sk->sk_reuseport))
6639                 return -ESOCKTNOSUPPORT;
6640         if (sk_is_refcounted(sk) &&
6641             unlikely(!refcount_inc_not_zero(&sk->sk_refcnt)))
6642                 return -ENOENT;
6643
6644         skb_orphan(skb);
6645         skb->sk = sk;
6646         skb->destructor = sock_pfree;
6647
6648         return 0;
6649 }
6650
6651 static const struct bpf_func_proto bpf_sk_assign_proto = {
6652         .func           = bpf_sk_assign,
6653         .gpl_only       = false,
6654         .ret_type       = RET_INTEGER,
6655         .arg1_type      = ARG_PTR_TO_CTX,
6656         .arg2_type      = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
6657         .arg3_type      = ARG_ANYTHING,
6658 };
6659
6660 static const u8 *bpf_search_tcp_opt(const u8 *op, const u8 *opend,
6661                                     u8 search_kind, const u8 *magic,
6662                                     u8 magic_len, bool *eol)
6663 {
6664         u8 kind, kind_len;
6665
6666         *eol = false;
6667
6668         while (op < opend) {
6669                 kind = op[0];
6670
6671                 if (kind == TCPOPT_EOL) {
6672                         *eol = true;
6673                         return ERR_PTR(-ENOMSG);
6674                 } else if (kind == TCPOPT_NOP) {
6675                         op++;
6676                         continue;
6677                 }
6678
6679                 if (opend - op < 2 || opend - op < op[1] || op[1] < 2)
6680                         /* Something is wrong in the received header.
6681                          * Follow the TCP stack's tcp_parse_options()
6682                          * and just bail here.
6683                          */
6684                         return ERR_PTR(-EFAULT);
6685
6686                 kind_len = op[1];
6687                 if (search_kind == kind) {
6688                         if (!magic_len)
6689                                 return op;
6690
6691                         if (magic_len > kind_len - 2)
6692                                 return ERR_PTR(-ENOMSG);
6693
6694                         if (!memcmp(&op[2], magic, magic_len))
6695                                 return op;
6696                 }
6697
6698                 op += kind_len;
6699         }
6700
6701         return ERR_PTR(-ENOMSG);
6702 }
6703
6704 BPF_CALL_4(bpf_sock_ops_load_hdr_opt, struct bpf_sock_ops_kern *, bpf_sock,
6705            void *, search_res, u32, len, u64, flags)
6706 {
6707         bool eol, load_syn = flags & BPF_LOAD_HDR_OPT_TCP_SYN;
6708         const u8 *op, *opend, *magic, *search = search_res;
6709         u8 search_kind, search_len, copy_len, magic_len;
6710         int ret;
6711
6712         /* 2 byte is the minimal option len except TCPOPT_NOP and
6713          * TCPOPT_EOL which are useless for the bpf prog to learn
6714          * and this helper disallow loading them also.
6715          */
6716         if (len < 2 || flags & ~BPF_LOAD_HDR_OPT_TCP_SYN)
6717                 return -EINVAL;
6718
6719         search_kind = search[0];
6720         search_len = search[1];
6721
6722         if (search_len > len || search_kind == TCPOPT_NOP ||
6723             search_kind == TCPOPT_EOL)
6724                 return -EINVAL;
6725
6726         if (search_kind == TCPOPT_EXP || search_kind == 253) {
6727                 /* 16 or 32 bit magic.  +2 for kind and kind length */
6728                 if (search_len != 4 && search_len != 6)
6729                         return -EINVAL;
6730                 magic = &search[2];
6731                 magic_len = search_len - 2;
6732         } else {
6733                 if (search_len)
6734                         return -EINVAL;
6735                 magic = NULL;
6736                 magic_len = 0;
6737         }
6738
6739         if (load_syn) {
6740                 ret = bpf_sock_ops_get_syn(bpf_sock, TCP_BPF_SYN, &op);
6741                 if (ret < 0)
6742                         return ret;
6743
6744                 opend = op + ret;
6745                 op += sizeof(struct tcphdr);
6746         } else {
6747                 if (!bpf_sock->skb ||
6748                     bpf_sock->op == BPF_SOCK_OPS_HDR_OPT_LEN_CB)
6749                         /* This bpf_sock->op cannot call this helper */
6750                         return -EPERM;
6751
6752                 opend = bpf_sock->skb_data_end;
6753                 op = bpf_sock->skb->data + sizeof(struct tcphdr);
6754         }
6755
6756         op = bpf_search_tcp_opt(op, opend, search_kind, magic, magic_len,
6757                                 &eol);
6758         if (IS_ERR(op))
6759                 return PTR_ERR(op);
6760
6761         copy_len = op[1];
6762         ret = copy_len;
6763         if (copy_len > len) {
6764                 ret = -ENOSPC;
6765                 copy_len = len;
6766         }
6767
6768         memcpy(search_res, op, copy_len);
6769         return ret;
6770 }
6771
6772 static const struct bpf_func_proto bpf_sock_ops_load_hdr_opt_proto = {
6773         .func           = bpf_sock_ops_load_hdr_opt,
6774         .gpl_only       = false,
6775         .ret_type       = RET_INTEGER,
6776         .arg1_type      = ARG_PTR_TO_CTX,
6777         .arg2_type      = ARG_PTR_TO_MEM,
6778         .arg3_type      = ARG_CONST_SIZE,
6779         .arg4_type      = ARG_ANYTHING,
6780 };
6781
6782 BPF_CALL_4(bpf_sock_ops_store_hdr_opt, struct bpf_sock_ops_kern *, bpf_sock,
6783            const void *, from, u32, len, u64, flags)
6784 {
6785         u8 new_kind, new_kind_len, magic_len = 0, *opend;
6786         const u8 *op, *new_op, *magic = NULL;
6787         struct sk_buff *skb;
6788         bool eol;
6789
6790         if (bpf_sock->op != BPF_SOCK_OPS_WRITE_HDR_OPT_CB)
6791                 return -EPERM;
6792
6793         if (len < 2 || flags)
6794                 return -EINVAL;
6795
6796         new_op = from;
6797         new_kind = new_op[0];
6798         new_kind_len = new_op[1];
6799
6800         if (new_kind_len > len || new_kind == TCPOPT_NOP ||
6801             new_kind == TCPOPT_EOL)
6802                 return -EINVAL;
6803
6804         if (new_kind_len > bpf_sock->remaining_opt_len)
6805                 return -ENOSPC;
6806
6807         /* 253 is another experimental kind */
6808         if (new_kind == TCPOPT_EXP || new_kind == 253)  {
6809                 if (new_kind_len < 4)
6810                         return -EINVAL;
6811                 /* Match for the 2 byte magic also.
6812                  * RFC 6994: the magic could be 2 or 4 bytes.
6813                  * Hence, matching by 2 byte only is on the
6814                  * conservative side but it is the right
6815                  * thing to do for the 'search-for-duplication'
6816                  * purpose.
6817                  */
6818                 magic = &new_op[2];
6819                 magic_len = 2;
6820         }
6821
6822         /* Check for duplication */
6823         skb = bpf_sock->skb;
6824         op = skb->data + sizeof(struct tcphdr);
6825         opend = bpf_sock->skb_data_end;
6826
6827         op = bpf_search_tcp_opt(op, opend, new_kind, magic, magic_len,
6828                                 &eol);
6829         if (!IS_ERR(op))
6830                 return -EEXIST;
6831
6832         if (PTR_ERR(op) != -ENOMSG)
6833                 return PTR_ERR(op);
6834
6835         if (eol)
6836                 /* The option has been ended.  Treat it as no more
6837                  * header option can be written.
6838                  */
6839                 return -ENOSPC;
6840
6841         /* No duplication found.  Store the header option. */
6842         memcpy(opend, from, new_kind_len);
6843
6844         bpf_sock->remaining_opt_len -= new_kind_len;
6845         bpf_sock->skb_data_end += new_kind_len;
6846
6847         return 0;
6848 }
6849
6850 static const struct bpf_func_proto bpf_sock_ops_store_hdr_opt_proto = {
6851         .func           = bpf_sock_ops_store_hdr_opt,
6852         .gpl_only       = false,
6853         .ret_type       = RET_INTEGER,
6854         .arg1_type      = ARG_PTR_TO_CTX,
6855         .arg2_type      = ARG_PTR_TO_MEM,
6856         .arg3_type      = ARG_CONST_SIZE,
6857         .arg4_type      = ARG_ANYTHING,
6858 };
6859
6860 BPF_CALL_3(bpf_sock_ops_reserve_hdr_opt, struct bpf_sock_ops_kern *, bpf_sock,
6861            u32, len, u64, flags)
6862 {
6863         if (bpf_sock->op != BPF_SOCK_OPS_HDR_OPT_LEN_CB)
6864                 return -EPERM;
6865
6866         if (flags || len < 2)
6867                 return -EINVAL;
6868
6869         if (len > bpf_sock->remaining_opt_len)
6870                 return -ENOSPC;
6871
6872         bpf_sock->remaining_opt_len -= len;
6873
6874         return 0;
6875 }
6876
6877 static const struct bpf_func_proto bpf_sock_ops_reserve_hdr_opt_proto = {
6878         .func           = bpf_sock_ops_reserve_hdr_opt,
6879         .gpl_only       = false,
6880         .ret_type       = RET_INTEGER,
6881         .arg1_type      = ARG_PTR_TO_CTX,
6882         .arg2_type      = ARG_ANYTHING,
6883         .arg3_type      = ARG_ANYTHING,
6884 };
6885
6886 #endif /* CONFIG_INET */
6887
6888 bool bpf_helper_changes_pkt_data(void *func)
6889 {
6890         if (func == bpf_skb_vlan_push ||
6891             func == bpf_skb_vlan_pop ||
6892             func == bpf_skb_store_bytes ||
6893             func == bpf_skb_change_proto ||
6894             func == bpf_skb_change_head ||
6895             func == sk_skb_change_head ||
6896             func == bpf_skb_change_tail ||
6897             func == sk_skb_change_tail ||
6898             func == bpf_skb_adjust_room ||
6899             func == sk_skb_adjust_room ||
6900             func == bpf_skb_pull_data ||
6901             func == sk_skb_pull_data ||
6902             func == bpf_clone_redirect ||
6903             func == bpf_l3_csum_replace ||
6904             func == bpf_l4_csum_replace ||
6905             func == bpf_xdp_adjust_head ||
6906             func == bpf_xdp_adjust_meta ||
6907             func == bpf_msg_pull_data ||
6908             func == bpf_msg_push_data ||
6909             func == bpf_msg_pop_data ||
6910             func == bpf_xdp_adjust_tail ||
6911 #if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
6912             func == bpf_lwt_seg6_store_bytes ||
6913             func == bpf_lwt_seg6_adjust_srh ||
6914             func == bpf_lwt_seg6_action ||
6915 #endif
6916 #ifdef CONFIG_INET
6917             func == bpf_sock_ops_store_hdr_opt ||
6918 #endif
6919             func == bpf_lwt_in_push_encap ||
6920             func == bpf_lwt_xmit_push_encap)
6921                 return true;
6922
6923         return false;
6924 }
6925
6926 const struct bpf_func_proto bpf_event_output_data_proto __weak;
6927 const struct bpf_func_proto bpf_sk_storage_get_cg_sock_proto __weak;
6928
6929 static const struct bpf_func_proto *
6930 sock_filter_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
6931 {
6932         switch (func_id) {
6933         /* inet and inet6 sockets are created in a process
6934          * context so there is always a valid uid/gid
6935          */
6936         case BPF_FUNC_get_current_uid_gid:
6937                 return &bpf_get_current_uid_gid_proto;
6938         case BPF_FUNC_get_local_storage:
6939                 return &bpf_get_local_storage_proto;
6940         case BPF_FUNC_get_socket_cookie:
6941                 return &bpf_get_socket_cookie_sock_proto;
6942         case BPF_FUNC_get_netns_cookie:
6943                 return &bpf_get_netns_cookie_sock_proto;
6944         case BPF_FUNC_perf_event_output:
6945                 return &bpf_event_output_data_proto;
6946         case BPF_FUNC_get_current_pid_tgid:
6947                 return &bpf_get_current_pid_tgid_proto;
6948         case BPF_FUNC_get_current_comm:
6949                 return &bpf_get_current_comm_proto;
6950 #ifdef CONFIG_CGROUPS
6951         case BPF_FUNC_get_current_cgroup_id:
6952                 return &bpf_get_current_cgroup_id_proto;
6953         case BPF_FUNC_get_current_ancestor_cgroup_id:
6954                 return &bpf_get_current_ancestor_cgroup_id_proto;
6955 #endif
6956 #ifdef CONFIG_CGROUP_NET_CLASSID
6957         case BPF_FUNC_get_cgroup_classid:
6958                 return &bpf_get_cgroup_classid_curr_proto;
6959 #endif
6960         case BPF_FUNC_sk_storage_get:
6961                 return &bpf_sk_storage_get_cg_sock_proto;
6962         default:
6963                 return bpf_base_func_proto(func_id);
6964         }
6965 }
6966
6967 static const struct bpf_func_proto *
6968 sock_addr_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
6969 {
6970         switch (func_id) {
6971         /* inet and inet6 sockets are created in a process
6972          * context so there is always a valid uid/gid
6973          */
6974         case BPF_FUNC_get_current_uid_gid:
6975                 return &bpf_get_current_uid_gid_proto;
6976         case BPF_FUNC_bind:
6977                 switch (prog->expected_attach_type) {
6978                 case BPF_CGROUP_INET4_CONNECT:
6979                 case BPF_CGROUP_INET6_CONNECT:
6980                         return &bpf_bind_proto;
6981                 default:
6982                         return NULL;
6983                 }
6984         case BPF_FUNC_get_socket_cookie:
6985                 return &bpf_get_socket_cookie_sock_addr_proto;
6986         case BPF_FUNC_get_netns_cookie:
6987                 return &bpf_get_netns_cookie_sock_addr_proto;
6988         case BPF_FUNC_get_local_storage:
6989                 return &bpf_get_local_storage_proto;
6990         case BPF_FUNC_perf_event_output:
6991                 return &bpf_event_output_data_proto;
6992         case BPF_FUNC_get_current_pid_tgid:
6993                 return &bpf_get_current_pid_tgid_proto;
6994         case BPF_FUNC_get_current_comm:
6995                 return &bpf_get_current_comm_proto;
6996 #ifdef CONFIG_CGROUPS
6997         case BPF_FUNC_get_current_cgroup_id:
6998                 return &bpf_get_current_cgroup_id_proto;
6999         case BPF_FUNC_get_current_ancestor_cgroup_id:
7000                 return &bpf_get_current_ancestor_cgroup_id_proto;
7001 #endif
7002 #ifdef CONFIG_CGROUP_NET_CLASSID
7003         case BPF_FUNC_get_cgroup_classid:
7004                 return &bpf_get_cgroup_classid_curr_proto;
7005 #endif
7006 #ifdef CONFIG_INET
7007         case BPF_FUNC_sk_lookup_tcp:
7008                 return &bpf_sock_addr_sk_lookup_tcp_proto;
7009         case BPF_FUNC_sk_lookup_udp:
7010                 return &bpf_sock_addr_sk_lookup_udp_proto;
7011         case BPF_FUNC_sk_release:
7012                 return &bpf_sk_release_proto;
7013         case BPF_FUNC_skc_lookup_tcp:
7014                 return &bpf_sock_addr_skc_lookup_tcp_proto;
7015 #endif /* CONFIG_INET */
7016         case BPF_FUNC_sk_storage_get:
7017                 return &bpf_sk_storage_get_proto;
7018         case BPF_FUNC_sk_storage_delete:
7019                 return &bpf_sk_storage_delete_proto;
7020         case BPF_FUNC_setsockopt:
7021                 switch (prog->expected_attach_type) {
7022                 case BPF_CGROUP_INET4_BIND:
7023                 case BPF_CGROUP_INET6_BIND:
7024                 case BPF_CGROUP_INET4_CONNECT:
7025                 case BPF_CGROUP_INET6_CONNECT:
7026                         return &bpf_sock_addr_setsockopt_proto;
7027                 default:
7028                         return NULL;
7029                 }
7030         case BPF_FUNC_getsockopt:
7031                 switch (prog->expected_attach_type) {
7032                 case BPF_CGROUP_INET4_BIND:
7033                 case BPF_CGROUP_INET6_BIND:
7034                 case BPF_CGROUP_INET4_CONNECT:
7035                 case BPF_CGROUP_INET6_CONNECT:
7036                         return &bpf_sock_addr_getsockopt_proto;
7037                 default:
7038                         return NULL;
7039                 }
7040         default:
7041                 return bpf_sk_base_func_proto(func_id);
7042         }
7043 }
7044
7045 static const struct bpf_func_proto *
7046 sk_filter_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
7047 {
7048         switch (func_id) {
7049         case BPF_FUNC_skb_load_bytes:
7050                 return &bpf_skb_load_bytes_proto;
7051         case BPF_FUNC_skb_load_bytes_relative:
7052                 return &bpf_skb_load_bytes_relative_proto;
7053         case BPF_FUNC_get_socket_cookie:
7054                 return &bpf_get_socket_cookie_proto;
7055         case BPF_FUNC_get_socket_uid:
7056                 return &bpf_get_socket_uid_proto;
7057         case BPF_FUNC_perf_event_output:
7058                 return &bpf_skb_event_output_proto;
7059         default:
7060                 return bpf_sk_base_func_proto(func_id);
7061         }
7062 }
7063
7064 const struct bpf_func_proto bpf_sk_storage_get_proto __weak;
7065 const struct bpf_func_proto bpf_sk_storage_delete_proto __weak;
7066
7067 static const struct bpf_func_proto *
7068 cg_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
7069 {
7070         switch (func_id) {
7071         case BPF_FUNC_get_local_storage:
7072                 return &bpf_get_local_storage_proto;
7073         case BPF_FUNC_sk_fullsock:
7074                 return &bpf_sk_fullsock_proto;
7075         case BPF_FUNC_sk_storage_get:
7076                 return &bpf_sk_storage_get_proto;
7077         case BPF_FUNC_sk_storage_delete:
7078                 return &bpf_sk_storage_delete_proto;
7079         case BPF_FUNC_perf_event_output:
7080                 return &bpf_skb_event_output_proto;
7081 #ifdef CONFIG_SOCK_CGROUP_DATA
7082         case BPF_FUNC_skb_cgroup_id:
7083                 return &bpf_skb_cgroup_id_proto;
7084         case BPF_FUNC_skb_ancestor_cgroup_id:
7085                 return &bpf_skb_ancestor_cgroup_id_proto;
7086         case BPF_FUNC_sk_cgroup_id:
7087                 return &bpf_sk_cgroup_id_proto;
7088         case BPF_FUNC_sk_ancestor_cgroup_id:
7089                 return &bpf_sk_ancestor_cgroup_id_proto;
7090 #endif
7091 #ifdef CONFIG_INET
7092         case BPF_FUNC_sk_lookup_tcp:
7093                 return &bpf_sk_lookup_tcp_proto;
7094         case BPF_FUNC_sk_lookup_udp:
7095                 return &bpf_sk_lookup_udp_proto;
7096         case BPF_FUNC_sk_release:
7097                 return &bpf_sk_release_proto;
7098         case BPF_FUNC_skc_lookup_tcp:
7099                 return &bpf_skc_lookup_tcp_proto;
7100         case BPF_FUNC_tcp_sock:
7101                 return &bpf_tcp_sock_proto;
7102         case BPF_FUNC_get_listener_sock:
7103                 return &bpf_get_listener_sock_proto;
7104         case BPF_FUNC_skb_ecn_set_ce:
7105                 return &bpf_skb_ecn_set_ce_proto;
7106 #endif
7107         default:
7108                 return sk_filter_func_proto(func_id, prog);
7109         }
7110 }
7111
7112 static const struct bpf_func_proto *
7113 tc_cls_act_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
7114 {
7115         switch (func_id) {
7116         case BPF_FUNC_skb_store_bytes:
7117                 return &bpf_skb_store_bytes_proto;
7118         case BPF_FUNC_skb_load_bytes:
7119                 return &bpf_skb_load_bytes_proto;
7120         case BPF_FUNC_skb_load_bytes_relative:
7121                 return &bpf_skb_load_bytes_relative_proto;
7122         case BPF_FUNC_skb_pull_data:
7123                 return &bpf_skb_pull_data_proto;
7124         case BPF_FUNC_csum_diff:
7125                 return &bpf_csum_diff_proto;
7126         case BPF_FUNC_csum_update:
7127                 return &bpf_csum_update_proto;
7128         case BPF_FUNC_csum_level:
7129                 return &bpf_csum_level_proto;
7130         case BPF_FUNC_l3_csum_replace:
7131                 return &bpf_l3_csum_replace_proto;
7132         case BPF_FUNC_l4_csum_replace:
7133                 return &bpf_l4_csum_replace_proto;
7134         case BPF_FUNC_clone_redirect:
7135                 return &bpf_clone_redirect_proto;
7136         case BPF_FUNC_get_cgroup_classid:
7137                 return &bpf_get_cgroup_classid_proto;
7138         case BPF_FUNC_skb_vlan_push:
7139                 return &bpf_skb_vlan_push_proto;
7140         case BPF_FUNC_skb_vlan_pop:
7141                 return &bpf_skb_vlan_pop_proto;
7142         case BPF_FUNC_skb_change_proto:
7143                 return &bpf_skb_change_proto_proto;
7144         case BPF_FUNC_skb_change_type:
7145                 return &bpf_skb_change_type_proto;
7146         case BPF_FUNC_skb_adjust_room:
7147                 return &bpf_skb_adjust_room_proto;
7148         case BPF_FUNC_skb_change_tail:
7149                 return &bpf_skb_change_tail_proto;
7150         case BPF_FUNC_skb_change_head:
7151                 return &bpf_skb_change_head_proto;
7152         case BPF_FUNC_skb_get_tunnel_key:
7153                 return &bpf_skb_get_tunnel_key_proto;
7154         case BPF_FUNC_skb_set_tunnel_key:
7155                 return bpf_get_skb_set_tunnel_proto(func_id);
7156         case BPF_FUNC_skb_get_tunnel_opt:
7157                 return &bpf_skb_get_tunnel_opt_proto;
7158         case BPF_FUNC_skb_set_tunnel_opt:
7159                 return bpf_get_skb_set_tunnel_proto(func_id);
7160         case BPF_FUNC_redirect:
7161                 return &bpf_redirect_proto;
7162         case BPF_FUNC_redirect_neigh:
7163                 return &bpf_redirect_neigh_proto;
7164         case BPF_FUNC_redirect_peer:
7165                 return &bpf_redirect_peer_proto;
7166         case BPF_FUNC_get_route_realm:
7167                 return &bpf_get_route_realm_proto;
7168         case BPF_FUNC_get_hash_recalc:
7169                 return &bpf_get_hash_recalc_proto;
7170         case BPF_FUNC_set_hash_invalid:
7171                 return &bpf_set_hash_invalid_proto;
7172         case BPF_FUNC_set_hash:
7173                 return &bpf_set_hash_proto;
7174         case BPF_FUNC_perf_event_output:
7175                 return &bpf_skb_event_output_proto;
7176         case BPF_FUNC_get_smp_processor_id:
7177                 return &bpf_get_smp_processor_id_proto;
7178         case BPF_FUNC_skb_under_cgroup:
7179                 return &bpf_skb_under_cgroup_proto;
7180         case BPF_FUNC_get_socket_cookie:
7181                 return &bpf_get_socket_cookie_proto;
7182         case BPF_FUNC_get_socket_uid:
7183                 return &bpf_get_socket_uid_proto;
7184         case BPF_FUNC_fib_lookup:
7185                 return &bpf_skb_fib_lookup_proto;
7186         case BPF_FUNC_sk_fullsock:
7187                 return &bpf_sk_fullsock_proto;
7188         case BPF_FUNC_sk_storage_get:
7189                 return &bpf_sk_storage_get_proto;
7190         case BPF_FUNC_sk_storage_delete:
7191                 return &bpf_sk_storage_delete_proto;
7192 #ifdef CONFIG_XFRM
7193         case BPF_FUNC_skb_get_xfrm_state:
7194                 return &bpf_skb_get_xfrm_state_proto;
7195 #endif
7196 #ifdef CONFIG_CGROUP_NET_CLASSID
7197         case BPF_FUNC_skb_cgroup_classid:
7198                 return &bpf_skb_cgroup_classid_proto;
7199 #endif
7200 #ifdef CONFIG_SOCK_CGROUP_DATA
7201         case BPF_FUNC_skb_cgroup_id:
7202                 return &bpf_skb_cgroup_id_proto;
7203         case BPF_FUNC_skb_ancestor_cgroup_id:
7204                 return &bpf_skb_ancestor_cgroup_id_proto;
7205 #endif
7206 #ifdef CONFIG_INET
7207         case BPF_FUNC_sk_lookup_tcp:
7208                 return &bpf_sk_lookup_tcp_proto;
7209         case BPF_FUNC_sk_lookup_udp:
7210                 return &bpf_sk_lookup_udp_proto;
7211         case BPF_FUNC_sk_release:
7212                 return &bpf_sk_release_proto;
7213         case BPF_FUNC_tcp_sock:
7214                 return &bpf_tcp_sock_proto;
7215         case BPF_FUNC_get_listener_sock:
7216                 return &bpf_get_listener_sock_proto;
7217         case BPF_FUNC_skc_lookup_tcp:
7218                 return &bpf_skc_lookup_tcp_proto;
7219         case BPF_FUNC_tcp_check_syncookie:
7220                 return &bpf_tcp_check_syncookie_proto;
7221         case BPF_FUNC_skb_ecn_set_ce:
7222                 return &bpf_skb_ecn_set_ce_proto;
7223         case BPF_FUNC_tcp_gen_syncookie:
7224                 return &bpf_tcp_gen_syncookie_proto;
7225         case BPF_FUNC_sk_assign:
7226                 return &bpf_sk_assign_proto;
7227 #endif
7228         default:
7229                 return bpf_sk_base_func_proto(func_id);
7230         }
7231 }
7232
7233 static const struct bpf_func_proto *
7234 xdp_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
7235 {
7236         switch (func_id) {
7237         case BPF_FUNC_perf_event_output:
7238                 return &bpf_xdp_event_output_proto;
7239         case BPF_FUNC_get_smp_processor_id:
7240                 return &bpf_get_smp_processor_id_proto;
7241         case BPF_FUNC_csum_diff:
7242                 return &bpf_csum_diff_proto;
7243         case BPF_FUNC_xdp_adjust_head:
7244                 return &bpf_xdp_adjust_head_proto;
7245         case BPF_FUNC_xdp_adjust_meta:
7246                 return &bpf_xdp_adjust_meta_proto;
7247         case BPF_FUNC_redirect:
7248                 return &bpf_xdp_redirect_proto;
7249         case BPF_FUNC_redirect_map:
7250                 return &bpf_xdp_redirect_map_proto;
7251         case BPF_FUNC_xdp_adjust_tail:
7252                 return &bpf_xdp_adjust_tail_proto;
7253         case BPF_FUNC_fib_lookup:
7254                 return &bpf_xdp_fib_lookup_proto;
7255 #ifdef CONFIG_INET
7256         case BPF_FUNC_sk_lookup_udp:
7257                 return &bpf_xdp_sk_lookup_udp_proto;
7258         case BPF_FUNC_sk_lookup_tcp:
7259                 return &bpf_xdp_sk_lookup_tcp_proto;
7260         case BPF_FUNC_sk_release:
7261                 return &bpf_sk_release_proto;
7262         case BPF_FUNC_skc_lookup_tcp:
7263                 return &bpf_xdp_skc_lookup_tcp_proto;
7264         case BPF_FUNC_tcp_check_syncookie:
7265                 return &bpf_tcp_check_syncookie_proto;
7266         case BPF_FUNC_tcp_gen_syncookie:
7267                 return &bpf_tcp_gen_syncookie_proto;
7268 #endif
7269         default:
7270                 return bpf_sk_base_func_proto(func_id);
7271         }
7272 }
7273
7274 const struct bpf_func_proto bpf_sock_map_update_proto __weak;
7275 const struct bpf_func_proto bpf_sock_hash_update_proto __weak;
7276
7277 static const struct bpf_func_proto *
7278 sock_ops_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
7279 {
7280         switch (func_id) {
7281         case BPF_FUNC_setsockopt:
7282                 return &bpf_sock_ops_setsockopt_proto;
7283         case BPF_FUNC_getsockopt:
7284                 return &bpf_sock_ops_getsockopt_proto;
7285         case BPF_FUNC_sock_ops_cb_flags_set:
7286                 return &bpf_sock_ops_cb_flags_set_proto;
7287         case BPF_FUNC_sock_map_update:
7288                 return &bpf_sock_map_update_proto;
7289         case BPF_FUNC_sock_hash_update:
7290                 return &bpf_sock_hash_update_proto;
7291         case BPF_FUNC_get_socket_cookie:
7292                 return &bpf_get_socket_cookie_sock_ops_proto;
7293         case BPF_FUNC_get_local_storage:
7294                 return &bpf_get_local_storage_proto;
7295         case BPF_FUNC_perf_event_output:
7296                 return &bpf_event_output_data_proto;
7297         case BPF_FUNC_sk_storage_get:
7298                 return &bpf_sk_storage_get_proto;
7299         case BPF_FUNC_sk_storage_delete:
7300                 return &bpf_sk_storage_delete_proto;
7301 #ifdef CONFIG_INET
7302         case BPF_FUNC_load_hdr_opt:
7303                 return &bpf_sock_ops_load_hdr_opt_proto;
7304         case BPF_FUNC_store_hdr_opt:
7305                 return &bpf_sock_ops_store_hdr_opt_proto;
7306         case BPF_FUNC_reserve_hdr_opt:
7307                 return &bpf_sock_ops_reserve_hdr_opt_proto;
7308         case BPF_FUNC_tcp_sock:
7309                 return &bpf_tcp_sock_proto;
7310 #endif /* CONFIG_INET */
7311         default:
7312                 return bpf_sk_base_func_proto(func_id);
7313         }
7314 }
7315
7316 const struct bpf_func_proto bpf_msg_redirect_map_proto __weak;
7317 const struct bpf_func_proto bpf_msg_redirect_hash_proto __weak;
7318
7319 static const struct bpf_func_proto *
7320 sk_msg_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
7321 {
7322         switch (func_id) {
7323         case BPF_FUNC_msg_redirect_map:
7324                 return &bpf_msg_redirect_map_proto;
7325         case BPF_FUNC_msg_redirect_hash:
7326                 return &bpf_msg_redirect_hash_proto;
7327         case BPF_FUNC_msg_apply_bytes:
7328                 return &bpf_msg_apply_bytes_proto;
7329         case BPF_FUNC_msg_cork_bytes:
7330                 return &bpf_msg_cork_bytes_proto;
7331         case BPF_FUNC_msg_pull_data:
7332                 return &bpf_msg_pull_data_proto;
7333         case BPF_FUNC_msg_push_data:
7334                 return &bpf_msg_push_data_proto;
7335         case BPF_FUNC_msg_pop_data:
7336                 return &bpf_msg_pop_data_proto;
7337         case BPF_FUNC_perf_event_output:
7338                 return &bpf_event_output_data_proto;
7339         case BPF_FUNC_get_current_uid_gid:
7340                 return &bpf_get_current_uid_gid_proto;
7341         case BPF_FUNC_get_current_pid_tgid:
7342                 return &bpf_get_current_pid_tgid_proto;
7343         case BPF_FUNC_sk_storage_get:
7344                 return &bpf_sk_storage_get_proto;
7345         case BPF_FUNC_sk_storage_delete:
7346                 return &bpf_sk_storage_delete_proto;
7347 #ifdef CONFIG_CGROUPS
7348         case BPF_FUNC_get_current_cgroup_id:
7349                 return &bpf_get_current_cgroup_id_proto;
7350         case BPF_FUNC_get_current_ancestor_cgroup_id:
7351                 return &bpf_get_current_ancestor_cgroup_id_proto;
7352 #endif
7353 #ifdef CONFIG_CGROUP_NET_CLASSID
7354         case BPF_FUNC_get_cgroup_classid:
7355                 return &bpf_get_cgroup_classid_curr_proto;
7356 #endif
7357         default:
7358                 return bpf_sk_base_func_proto(func_id);
7359         }
7360 }
7361
7362 const struct bpf_func_proto bpf_sk_redirect_map_proto __weak;
7363 const struct bpf_func_proto bpf_sk_redirect_hash_proto __weak;
7364
7365 static const struct bpf_func_proto *
7366 sk_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
7367 {
7368         switch (func_id) {
7369         case BPF_FUNC_skb_store_bytes:
7370                 return &bpf_skb_store_bytes_proto;
7371         case BPF_FUNC_skb_load_bytes:
7372                 return &bpf_skb_load_bytes_proto;
7373         case BPF_FUNC_skb_pull_data:
7374                 return &sk_skb_pull_data_proto;
7375         case BPF_FUNC_skb_change_tail:
7376                 return &sk_skb_change_tail_proto;
7377         case BPF_FUNC_skb_change_head:
7378                 return &sk_skb_change_head_proto;
7379         case BPF_FUNC_skb_adjust_room:
7380                 return &sk_skb_adjust_room_proto;
7381         case BPF_FUNC_get_socket_cookie:
7382                 return &bpf_get_socket_cookie_proto;
7383         case BPF_FUNC_get_socket_uid:
7384                 return &bpf_get_socket_uid_proto;
7385         case BPF_FUNC_sk_redirect_map:
7386                 return &bpf_sk_redirect_map_proto;
7387         case BPF_FUNC_sk_redirect_hash:
7388                 return &bpf_sk_redirect_hash_proto;
7389         case BPF_FUNC_perf_event_output:
7390                 return &bpf_skb_event_output_proto;
7391 #ifdef CONFIG_INET
7392         case BPF_FUNC_sk_lookup_tcp:
7393                 return &bpf_sk_lookup_tcp_proto;
7394         case BPF_FUNC_sk_lookup_udp:
7395                 return &bpf_sk_lookup_udp_proto;
7396         case BPF_FUNC_sk_release:
7397                 return &bpf_sk_release_proto;
7398         case BPF_FUNC_skc_lookup_tcp:
7399                 return &bpf_skc_lookup_tcp_proto;
7400 #endif
7401         default:
7402                 return bpf_sk_base_func_proto(func_id);
7403         }
7404 }
7405
7406 static const struct bpf_func_proto *
7407 flow_dissector_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
7408 {
7409         switch (func_id) {
7410         case BPF_FUNC_skb_load_bytes:
7411                 return &bpf_flow_dissector_load_bytes_proto;
7412         default:
7413                 return bpf_sk_base_func_proto(func_id);
7414         }
7415 }
7416
7417 static const struct bpf_func_proto *
7418 lwt_out_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
7419 {
7420         switch (func_id) {
7421         case BPF_FUNC_skb_load_bytes:
7422                 return &bpf_skb_load_bytes_proto;
7423         case BPF_FUNC_skb_pull_data:
7424                 return &bpf_skb_pull_data_proto;
7425         case BPF_FUNC_csum_diff:
7426                 return &bpf_csum_diff_proto;
7427         case BPF_FUNC_get_cgroup_classid:
7428                 return &bpf_get_cgroup_classid_proto;
7429         case BPF_FUNC_get_route_realm:
7430                 return &bpf_get_route_realm_proto;
7431         case BPF_FUNC_get_hash_recalc:
7432                 return &bpf_get_hash_recalc_proto;
7433         case BPF_FUNC_perf_event_output:
7434                 return &bpf_skb_event_output_proto;
7435         case BPF_FUNC_get_smp_processor_id:
7436                 return &bpf_get_smp_processor_id_proto;
7437         case BPF_FUNC_skb_under_cgroup:
7438                 return &bpf_skb_under_cgroup_proto;
7439         default:
7440                 return bpf_sk_base_func_proto(func_id);
7441         }
7442 }
7443
7444 static const struct bpf_func_proto *
7445 lwt_in_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
7446 {
7447         switch (func_id) {
7448         case BPF_FUNC_lwt_push_encap:
7449                 return &bpf_lwt_in_push_encap_proto;
7450         default:
7451                 return lwt_out_func_proto(func_id, prog);
7452         }
7453 }
7454
7455 static const struct bpf_func_proto *
7456 lwt_xmit_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
7457 {
7458         switch (func_id) {
7459         case BPF_FUNC_skb_get_tunnel_key:
7460                 return &bpf_skb_get_tunnel_key_proto;
7461         case BPF_FUNC_skb_set_tunnel_key:
7462                 return bpf_get_skb_set_tunnel_proto(func_id);
7463         case BPF_FUNC_skb_get_tunnel_opt:
7464                 return &bpf_skb_get_tunnel_opt_proto;
7465         case BPF_FUNC_skb_set_tunnel_opt:
7466                 return bpf_get_skb_set_tunnel_proto(func_id);
7467         case BPF_FUNC_redirect:
7468                 return &bpf_redirect_proto;
7469         case BPF_FUNC_clone_redirect:
7470                 return &bpf_clone_redirect_proto;
7471         case BPF_FUNC_skb_change_tail:
7472                 return &bpf_skb_change_tail_proto;
7473         case BPF_FUNC_skb_change_head:
7474                 return &bpf_skb_change_head_proto;
7475         case BPF_FUNC_skb_store_bytes:
7476                 return &bpf_skb_store_bytes_proto;
7477         case BPF_FUNC_csum_update:
7478                 return &bpf_csum_update_proto;
7479         case BPF_FUNC_csum_level:
7480                 return &bpf_csum_level_proto;
7481         case BPF_FUNC_l3_csum_replace:
7482                 return &bpf_l3_csum_replace_proto;
7483         case BPF_FUNC_l4_csum_replace:
7484                 return &bpf_l4_csum_replace_proto;
7485         case BPF_FUNC_set_hash_invalid:
7486                 return &bpf_set_hash_invalid_proto;
7487         case BPF_FUNC_lwt_push_encap:
7488                 return &bpf_lwt_xmit_push_encap_proto;
7489         default:
7490                 return lwt_out_func_proto(func_id, prog);
7491         }
7492 }
7493
7494 static const struct bpf_func_proto *
7495 lwt_seg6local_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
7496 {
7497         switch (func_id) {
7498 #if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
7499         case BPF_FUNC_lwt_seg6_store_bytes:
7500                 return &bpf_lwt_seg6_store_bytes_proto;
7501         case BPF_FUNC_lwt_seg6_action:
7502                 return &bpf_lwt_seg6_action_proto;
7503         case BPF_FUNC_lwt_seg6_adjust_srh:
7504                 return &bpf_lwt_seg6_adjust_srh_proto;
7505 #endif
7506         default:
7507                 return lwt_out_func_proto(func_id, prog);
7508         }
7509 }
7510
7511 static bool bpf_skb_is_valid_access(int off, int size, enum bpf_access_type type,
7512                                     const struct bpf_prog *prog,
7513                                     struct bpf_insn_access_aux *info)
7514 {
7515         const int size_default = sizeof(__u32);
7516
7517         if (off < 0 || off >= sizeof(struct __sk_buff))
7518                 return false;
7519
7520         /* The verifier guarantees that size > 0. */
7521         if (off % size != 0)
7522                 return false;
7523
7524         switch (off) {
7525         case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
7526                 if (off + size > offsetofend(struct __sk_buff, cb[4]))
7527                         return false;
7528                 break;
7529         case bpf_ctx_range_till(struct __sk_buff, remote_ip6[0], remote_ip6[3]):
7530         case bpf_ctx_range_till(struct __sk_buff, local_ip6[0], local_ip6[3]):
7531         case bpf_ctx_range_till(struct __sk_buff, remote_ip4, remote_ip4):
7532         case bpf_ctx_range_till(struct __sk_buff, local_ip4, local_ip4):
7533         case bpf_ctx_range(struct __sk_buff, data):
7534         case bpf_ctx_range(struct __sk_buff, data_meta):
7535         case bpf_ctx_range(struct __sk_buff, data_end):
7536                 if (size != size_default)
7537                         return false;
7538                 break;
7539         case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
7540                 return false;
7541         case bpf_ctx_range(struct __sk_buff, tstamp):
7542                 if (size != sizeof(__u64))
7543                         return false;
7544                 break;
7545         case offsetof(struct __sk_buff, sk):
7546                 if (type == BPF_WRITE || size != sizeof(__u64))
7547                         return false;
7548                 info->reg_type = PTR_TO_SOCK_COMMON_OR_NULL;
7549                 break;
7550         default:
7551                 /* Only narrow read access allowed for now. */
7552                 if (type == BPF_WRITE) {
7553                         if (size != size_default)
7554                                 return false;
7555                 } else {
7556                         bpf_ctx_record_field_size(info, size_default);
7557                         if (!bpf_ctx_narrow_access_ok(off, size, size_default))
7558                                 return false;
7559                 }
7560         }
7561
7562         return true;
7563 }
7564
7565 static bool sk_filter_is_valid_access(int off, int size,
7566                                       enum bpf_access_type type,
7567                                       const struct bpf_prog *prog,
7568                                       struct bpf_insn_access_aux *info)
7569 {
7570         switch (off) {
7571         case bpf_ctx_range(struct __sk_buff, tc_classid):
7572         case bpf_ctx_range(struct __sk_buff, data):
7573         case bpf_ctx_range(struct __sk_buff, data_meta):
7574         case bpf_ctx_range(struct __sk_buff, data_end):
7575         case bpf_ctx_range_till(struct __sk_buff, family, local_port):
7576         case bpf_ctx_range(struct __sk_buff, tstamp):
7577         case bpf_ctx_range(struct __sk_buff, wire_len):
7578                 return false;
7579         }
7580
7581         if (type == BPF_WRITE) {
7582                 switch (off) {
7583                 case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
7584                         break;
7585                 default:
7586                         return false;
7587                 }
7588         }
7589
7590         return bpf_skb_is_valid_access(off, size, type, prog, info);
7591 }
7592
7593 static bool cg_skb_is_valid_access(int off, int size,
7594                                    enum bpf_access_type type,
7595                                    const struct bpf_prog *prog,
7596                                    struct bpf_insn_access_aux *info)
7597 {
7598         switch (off) {
7599         case bpf_ctx_range(struct __sk_buff, tc_classid):
7600         case bpf_ctx_range(struct __sk_buff, data_meta):
7601         case bpf_ctx_range(struct __sk_buff, wire_len):
7602                 return false;
7603         case bpf_ctx_range(struct __sk_buff, data):
7604         case bpf_ctx_range(struct __sk_buff, data_end):
7605                 if (!bpf_capable())
7606                         return false;
7607                 break;
7608         }
7609
7610         if (type == BPF_WRITE) {
7611                 switch (off) {
7612                 case bpf_ctx_range(struct __sk_buff, mark):
7613                 case bpf_ctx_range(struct __sk_buff, priority):
7614                 case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
7615                         break;
7616                 case bpf_ctx_range(struct __sk_buff, tstamp):
7617                         if (!bpf_capable())
7618                                 return false;
7619                         break;
7620                 default:
7621                         return false;
7622                 }
7623         }
7624
7625         switch (off) {
7626         case bpf_ctx_range(struct __sk_buff, data):
7627                 info->reg_type = PTR_TO_PACKET;
7628                 break;
7629         case bpf_ctx_range(struct __sk_buff, data_end):
7630                 info->reg_type = PTR_TO_PACKET_END;
7631                 break;
7632         }
7633
7634         return bpf_skb_is_valid_access(off, size, type, prog, info);
7635 }
7636
7637 static bool lwt_is_valid_access(int off, int size,
7638                                 enum bpf_access_type type,
7639                                 const struct bpf_prog *prog,
7640                                 struct bpf_insn_access_aux *info)
7641 {
7642         switch (off) {
7643         case bpf_ctx_range(struct __sk_buff, tc_classid):
7644         case bpf_ctx_range_till(struct __sk_buff, family, local_port):
7645         case bpf_ctx_range(struct __sk_buff, data_meta):
7646         case bpf_ctx_range(struct __sk_buff, tstamp):
7647         case bpf_ctx_range(struct __sk_buff, wire_len):
7648                 return false;
7649         }
7650
7651         if (type == BPF_WRITE) {
7652                 switch (off) {
7653                 case bpf_ctx_range(struct __sk_buff, mark):
7654                 case bpf_ctx_range(struct __sk_buff, priority):
7655                 case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
7656                         break;
7657                 default:
7658                         return false;
7659                 }
7660         }
7661
7662         switch (off) {
7663         case bpf_ctx_range(struct __sk_buff, data):
7664                 info->reg_type = PTR_TO_PACKET;
7665                 break;
7666         case bpf_ctx_range(struct __sk_buff, data_end):
7667                 info->reg_type = PTR_TO_PACKET_END;
7668                 break;
7669         }
7670
7671         return bpf_skb_is_valid_access(off, size, type, prog, info);
7672 }
7673
7674 /* Attach type specific accesses */
7675 static bool __sock_filter_check_attach_type(int off,
7676                                             enum bpf_access_type access_type,
7677                                             enum bpf_attach_type attach_type)
7678 {
7679         switch (off) {
7680         case offsetof(struct bpf_sock, bound_dev_if):
7681         case offsetof(struct bpf_sock, mark):
7682         case offsetof(struct bpf_sock, priority):
7683                 switch (attach_type) {
7684                 case BPF_CGROUP_INET_SOCK_CREATE:
7685                 case BPF_CGROUP_INET_SOCK_RELEASE:
7686                         goto full_access;
7687                 default:
7688                         return false;
7689                 }
7690         case bpf_ctx_range(struct bpf_sock, src_ip4):
7691                 switch (attach_type) {
7692                 case BPF_CGROUP_INET4_POST_BIND:
7693                         goto read_only;
7694                 default:
7695                         return false;
7696                 }
7697         case bpf_ctx_range_till(struct bpf_sock, src_ip6[0], src_ip6[3]):
7698                 switch (attach_type) {
7699                 case BPF_CGROUP_INET6_POST_BIND:
7700                         goto read_only;
7701                 default:
7702                         return false;
7703                 }
7704         case bpf_ctx_range(struct bpf_sock, src_port):
7705                 switch (attach_type) {
7706                 case BPF_CGROUP_INET4_POST_BIND:
7707                 case BPF_CGROUP_INET6_POST_BIND:
7708                         goto read_only;
7709                 default:
7710                         return false;
7711                 }
7712         }
7713 read_only:
7714         return access_type == BPF_READ;
7715 full_access:
7716         return true;
7717 }
7718
7719 bool bpf_sock_common_is_valid_access(int off, int size,
7720                                      enum bpf_access_type type,
7721                                      struct bpf_insn_access_aux *info)
7722 {
7723         switch (off) {
7724         case bpf_ctx_range_till(struct bpf_sock, type, priority):
7725                 return false;
7726         default:
7727                 return bpf_sock_is_valid_access(off, size, type, info);
7728         }
7729 }
7730
7731 bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type,
7732                               struct bpf_insn_access_aux *info)
7733 {
7734         const int size_default = sizeof(__u32);
7735
7736         if (off < 0 || off >= sizeof(struct bpf_sock))
7737                 return false;
7738         if (off % size != 0)
7739                 return false;
7740
7741         switch (off) {
7742         case offsetof(struct bpf_sock, state):
7743         case offsetof(struct bpf_sock, family):
7744         case offsetof(struct bpf_sock, type):
7745         case offsetof(struct bpf_sock, protocol):
7746         case offsetof(struct bpf_sock, dst_port):
7747         case offsetof(struct bpf_sock, src_port):
7748         case offsetof(struct bpf_sock, rx_queue_mapping):
7749         case bpf_ctx_range(struct bpf_sock, src_ip4):
7750         case bpf_ctx_range_till(struct bpf_sock, src_ip6[0], src_ip6[3]):
7751         case bpf_ctx_range(struct bpf_sock, dst_ip4):
7752         case bpf_ctx_range_till(struct bpf_sock, dst_ip6[0], dst_ip6[3]):
7753                 bpf_ctx_record_field_size(info, size_default);
7754                 return bpf_ctx_narrow_access_ok(off, size, size_default);
7755         }
7756
7757         return size == size_default;
7758 }
7759
7760 static bool sock_filter_is_valid_access(int off, int size,
7761                                         enum bpf_access_type type,
7762                                         const struct bpf_prog *prog,
7763                                         struct bpf_insn_access_aux *info)
7764 {
7765         if (!bpf_sock_is_valid_access(off, size, type, info))
7766                 return false;
7767         return __sock_filter_check_attach_type(off, type,
7768                                                prog->expected_attach_type);
7769 }
7770
7771 static int bpf_noop_prologue(struct bpf_insn *insn_buf, bool direct_write,
7772                              const struct bpf_prog *prog)
7773 {
7774         /* Neither direct read nor direct write requires any preliminary
7775          * action.
7776          */
7777         return 0;
7778 }
7779
7780 static int bpf_unclone_prologue(struct bpf_insn *insn_buf, bool direct_write,
7781                                 const struct bpf_prog *prog, int drop_verdict)
7782 {
7783         struct bpf_insn *insn = insn_buf;
7784
7785         if (!direct_write)
7786                 return 0;
7787
7788         /* if (!skb->cloned)
7789          *       goto start;
7790          *
7791          * (Fast-path, otherwise approximation that we might be
7792          *  a clone, do the rest in helper.)
7793          */
7794         *insn++ = BPF_LDX_MEM(BPF_B, BPF_REG_6, BPF_REG_1, CLONED_OFFSET());
7795         *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_6, CLONED_MASK);
7796         *insn++ = BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 7);
7797
7798         /* ret = bpf_skb_pull_data(skb, 0); */
7799         *insn++ = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
7800         *insn++ = BPF_ALU64_REG(BPF_XOR, BPF_REG_2, BPF_REG_2);
7801         *insn++ = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7802                                BPF_FUNC_skb_pull_data);
7803         /* if (!ret)
7804          *      goto restore;
7805          * return TC_ACT_SHOT;
7806          */
7807         *insn++ = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2);
7808         *insn++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, drop_verdict);
7809         *insn++ = BPF_EXIT_INSN();
7810
7811         /* restore: */
7812         *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
7813         /* start: */
7814         *insn++ = prog->insnsi[0];
7815
7816         return insn - insn_buf;
7817 }
7818
7819 static int bpf_gen_ld_abs(const struct bpf_insn *orig,
7820                           struct bpf_insn *insn_buf)
7821 {
7822         bool indirect = BPF_MODE(orig->code) == BPF_IND;
7823         struct bpf_insn *insn = insn_buf;
7824
7825         if (!indirect) {
7826                 *insn++ = BPF_MOV64_IMM(BPF_REG_2, orig->imm);
7827         } else {
7828                 *insn++ = BPF_MOV64_REG(BPF_REG_2, orig->src_reg);
7829                 if (orig->imm)
7830                         *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, orig->imm);
7831         }
7832         /* We're guaranteed here that CTX is in R6. */
7833         *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_CTX);
7834
7835         switch (BPF_SIZE(orig->code)) {
7836         case BPF_B:
7837                 *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_8_no_cache);
7838                 break;
7839         case BPF_H:
7840                 *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_16_no_cache);
7841                 break;
7842         case BPF_W:
7843                 *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_32_no_cache);
7844                 break;
7845         }
7846
7847         *insn++ = BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 2);
7848         *insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_0, BPF_REG_0);
7849         *insn++ = BPF_EXIT_INSN();
7850
7851         return insn - insn_buf;
7852 }
7853
7854 static int tc_cls_act_prologue(struct bpf_insn *insn_buf, bool direct_write,
7855                                const struct bpf_prog *prog)
7856 {
7857         return bpf_unclone_prologue(insn_buf, direct_write, prog, TC_ACT_SHOT);
7858 }
7859
7860 static bool tc_cls_act_is_valid_access(int off, int size,
7861                                        enum bpf_access_type type,
7862                                        const struct bpf_prog *prog,
7863                                        struct bpf_insn_access_aux *info)
7864 {
7865         if (type == BPF_WRITE) {
7866                 switch (off) {
7867                 case bpf_ctx_range(struct __sk_buff, mark):
7868                 case bpf_ctx_range(struct __sk_buff, tc_index):
7869                 case bpf_ctx_range(struct __sk_buff, priority):
7870                 case bpf_ctx_range(struct __sk_buff, tc_classid):
7871                 case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
7872                 case bpf_ctx_range(struct __sk_buff, tstamp):
7873                 case bpf_ctx_range(struct __sk_buff, queue_mapping):
7874                         break;
7875                 default:
7876                         return false;
7877                 }
7878         }
7879
7880         switch (off) {
7881         case bpf_ctx_range(struct __sk_buff, data):
7882                 info->reg_type = PTR_TO_PACKET;
7883                 break;
7884         case bpf_ctx_range(struct __sk_buff, data_meta):
7885                 info->reg_type = PTR_TO_PACKET_META;
7886                 break;
7887         case bpf_ctx_range(struct __sk_buff, data_end):
7888                 info->reg_type = PTR_TO_PACKET_END;
7889                 break;
7890         case bpf_ctx_range_till(struct __sk_buff, family, local_port):
7891                 return false;
7892         }
7893
7894         return bpf_skb_is_valid_access(off, size, type, prog, info);
7895 }
7896
7897 static bool __is_valid_xdp_access(int off, int size)
7898 {
7899         if (off < 0 || off >= sizeof(struct xdp_md))
7900                 return false;
7901         if (off % size != 0)
7902                 return false;
7903         if (size != sizeof(__u32))
7904                 return false;
7905
7906         return true;
7907 }
7908
7909 static bool xdp_is_valid_access(int off, int size,
7910                                 enum bpf_access_type type,
7911                                 const struct bpf_prog *prog,
7912                                 struct bpf_insn_access_aux *info)
7913 {
7914         if (prog->expected_attach_type != BPF_XDP_DEVMAP) {
7915                 switch (off) {
7916                 case offsetof(struct xdp_md, egress_ifindex):
7917                         return false;
7918                 }
7919         }
7920
7921         if (type == BPF_WRITE) {
7922                 if (bpf_prog_is_dev_bound(prog->aux)) {
7923                         switch (off) {
7924                         case offsetof(struct xdp_md, rx_queue_index):
7925                                 return __is_valid_xdp_access(off, size);
7926                         }
7927                 }
7928                 return false;
7929         }
7930
7931         switch (off) {
7932         case offsetof(struct xdp_md, data):
7933                 info->reg_type = PTR_TO_PACKET;
7934                 break;
7935         case offsetof(struct xdp_md, data_meta):
7936                 info->reg_type = PTR_TO_PACKET_META;
7937                 break;
7938         case offsetof(struct xdp_md, data_end):
7939                 info->reg_type = PTR_TO_PACKET_END;
7940                 break;
7941         }
7942
7943         return __is_valid_xdp_access(off, size);
7944 }
7945
7946 void bpf_warn_invalid_xdp_action(u32 act)
7947 {
7948         const u32 act_max = XDP_REDIRECT;
7949
7950         WARN_ONCE(1, "%s XDP return value %u, expect packet loss!\n",
7951                   act > act_max ? "Illegal" : "Driver unsupported",
7952                   act);
7953 }
7954 EXPORT_SYMBOL_GPL(bpf_warn_invalid_xdp_action);
7955
7956 static bool sock_addr_is_valid_access(int off, int size,
7957                                       enum bpf_access_type type,
7958                                       const struct bpf_prog *prog,
7959                                       struct bpf_insn_access_aux *info)
7960 {
7961         const int size_default = sizeof(__u32);
7962
7963         if (off < 0 || off >= sizeof(struct bpf_sock_addr))
7964                 return false;
7965         if (off % size != 0)
7966                 return false;
7967
7968         /* Disallow access to IPv6 fields from IPv4 contex and vise
7969          * versa.
7970          */
7971         switch (off) {
7972         case bpf_ctx_range(struct bpf_sock_addr, user_ip4):
7973                 switch (prog->expected_attach_type) {
7974                 case BPF_CGROUP_INET4_BIND:
7975                 case BPF_CGROUP_INET4_CONNECT:
7976                 case BPF_CGROUP_INET4_GETPEERNAME:
7977                 case BPF_CGROUP_INET4_GETSOCKNAME:
7978                 case BPF_CGROUP_UDP4_SENDMSG:
7979                 case BPF_CGROUP_UDP4_RECVMSG:
7980                         break;
7981                 default:
7982                         return false;
7983                 }
7984                 break;
7985         case bpf_ctx_range_till(struct bpf_sock_addr, user_ip6[0], user_ip6[3]):
7986                 switch (prog->expected_attach_type) {
7987                 case BPF_CGROUP_INET6_BIND:
7988                 case BPF_CGROUP_INET6_CONNECT:
7989                 case BPF_CGROUP_INET6_GETPEERNAME:
7990                 case BPF_CGROUP_INET6_GETSOCKNAME:
7991                 case BPF_CGROUP_UDP6_SENDMSG:
7992                 case BPF_CGROUP_UDP6_RECVMSG:
7993                         break;
7994                 default:
7995                         return false;
7996                 }
7997                 break;
7998         case bpf_ctx_range(struct bpf_sock_addr, msg_src_ip4):
7999                 switch (prog->expected_attach_type) {
8000                 case BPF_CGROUP_UDP4_SENDMSG:
8001                         break;
8002                 default:
8003                         return false;
8004                 }
8005                 break;
8006         case bpf_ctx_range_till(struct bpf_sock_addr, msg_src_ip6[0],
8007                                 msg_src_ip6[3]):
8008                 switch (prog->expected_attach_type) {
8009                 case BPF_CGROUP_UDP6_SENDMSG:
8010                         break;
8011                 default:
8012                         return false;
8013                 }
8014                 break;
8015         }
8016
8017         switch (off) {
8018         case bpf_ctx_range(struct bpf_sock_addr, user_ip4):
8019         case bpf_ctx_range_till(struct bpf_sock_addr, user_ip6[0], user_ip6[3]):
8020         case bpf_ctx_range(struct bpf_sock_addr, msg_src_ip4):
8021         case bpf_ctx_range_till(struct bpf_sock_addr, msg_src_ip6[0],
8022                                 msg_src_ip6[3]):
8023         case bpf_ctx_range(struct bpf_sock_addr, user_port):
8024                 if (type == BPF_READ) {
8025                         bpf_ctx_record_field_size(info, size_default);
8026
8027                         if (bpf_ctx_wide_access_ok(off, size,
8028                                                    struct bpf_sock_addr,
8029                                                    user_ip6))
8030                                 return true;
8031
8032                         if (bpf_ctx_wide_access_ok(off, size,
8033                                                    struct bpf_sock_addr,
8034                                                    msg_src_ip6))
8035                                 return true;
8036
8037                         if (!bpf_ctx_narrow_access_ok(off, size, size_default))
8038                                 return false;
8039                 } else {
8040                         if (bpf_ctx_wide_access_ok(off, size,
8041                                                    struct bpf_sock_addr,
8042                                                    user_ip6))
8043                                 return true;
8044
8045                         if (bpf_ctx_wide_access_ok(off, size,
8046                                                    struct bpf_sock_addr,
8047                                                    msg_src_ip6))
8048                                 return true;
8049
8050                         if (size != size_default)
8051                                 return false;
8052                 }
8053                 break;
8054         case offsetof(struct bpf_sock_addr, sk):
8055                 if (type != BPF_READ)
8056                         return false;
8057                 if (size != sizeof(__u64))
8058                         return false;
8059                 info->reg_type = PTR_TO_SOCKET;
8060                 break;
8061         default:
8062                 if (type == BPF_READ) {
8063                         if (size != size_default)
8064                                 return false;
8065                 } else {
8066                         return false;
8067                 }
8068         }
8069
8070         return true;
8071 }
8072
8073 static bool sock_ops_is_valid_access(int off, int size,
8074                                      enum bpf_access_type type,
8075                                      const struct bpf_prog *prog,
8076                                      struct bpf_insn_access_aux *info)
8077 {
8078         const int size_default = sizeof(__u32);
8079
8080         if (off < 0 || off >= sizeof(struct bpf_sock_ops))
8081                 return false;
8082
8083         /* The verifier guarantees that size > 0. */
8084         if (off % size != 0)
8085                 return false;
8086
8087         if (type == BPF_WRITE) {
8088                 switch (off) {
8089                 case offsetof(struct bpf_sock_ops, reply):
8090                 case offsetof(struct bpf_sock_ops, sk_txhash):
8091                         if (size != size_default)
8092                                 return false;
8093                         break;
8094                 default:
8095                         return false;
8096                 }
8097         } else {
8098                 switch (off) {
8099                 case bpf_ctx_range_till(struct bpf_sock_ops, bytes_received,
8100                                         bytes_acked):
8101                         if (size != sizeof(__u64))
8102                                 return false;
8103                         break;
8104                 case offsetof(struct bpf_sock_ops, sk):
8105                         if (size != sizeof(__u64))
8106                                 return false;
8107                         info->reg_type = PTR_TO_SOCKET_OR_NULL;
8108                         break;
8109                 case offsetof(struct bpf_sock_ops, skb_data):
8110                         if (size != sizeof(__u64))
8111                                 return false;
8112                         info->reg_type = PTR_TO_PACKET;
8113                         break;
8114                 case offsetof(struct bpf_sock_ops, skb_data_end):
8115                         if (size != sizeof(__u64))
8116                                 return false;
8117                         info->reg_type = PTR_TO_PACKET_END;
8118                         break;
8119                 case offsetof(struct bpf_sock_ops, skb_tcp_flags):
8120                         bpf_ctx_record_field_size(info, size_default);
8121                         return bpf_ctx_narrow_access_ok(off, size,
8122                                                         size_default);
8123                 default:
8124                         if (size != size_default)
8125                                 return false;
8126                         break;
8127                 }
8128         }
8129
8130         return true;
8131 }
8132
8133 static int sk_skb_prologue(struct bpf_insn *insn_buf, bool direct_write,
8134                            const struct bpf_prog *prog)
8135 {
8136         return bpf_unclone_prologue(insn_buf, direct_write, prog, SK_DROP);
8137 }
8138
8139 static bool sk_skb_is_valid_access(int off, int size,
8140                                    enum bpf_access_type type,
8141                                    const struct bpf_prog *prog,
8142                                    struct bpf_insn_access_aux *info)
8143 {
8144         switch (off) {
8145         case bpf_ctx_range(struct __sk_buff, tc_classid):
8146         case bpf_ctx_range(struct __sk_buff, data_meta):
8147         case bpf_ctx_range(struct __sk_buff, tstamp):
8148         case bpf_ctx_range(struct __sk_buff, wire_len):
8149                 return false;
8150         }
8151
8152         if (type == BPF_WRITE) {
8153                 switch (off) {
8154                 case bpf_ctx_range(struct __sk_buff, tc_index):
8155                 case bpf_ctx_range(struct __sk_buff, priority):
8156                         break;
8157                 default:
8158                         return false;
8159                 }
8160         }
8161
8162         switch (off) {
8163         case bpf_ctx_range(struct __sk_buff, mark):
8164                 return false;
8165         case bpf_ctx_range(struct __sk_buff, data):
8166                 info->reg_type = PTR_TO_PACKET;
8167                 break;
8168         case bpf_ctx_range(struct __sk_buff, data_end):
8169                 info->reg_type = PTR_TO_PACKET_END;
8170                 break;
8171         }
8172
8173         return bpf_skb_is_valid_access(off, size, type, prog, info);
8174 }
8175
8176 static bool sk_msg_is_valid_access(int off, int size,
8177                                    enum bpf_access_type type,
8178                                    const struct bpf_prog *prog,
8179                                    struct bpf_insn_access_aux *info)
8180 {
8181         if (type == BPF_WRITE)
8182                 return false;
8183
8184         if (off % size != 0)
8185                 return false;
8186
8187         switch (off) {
8188         case offsetof(struct sk_msg_md, data):
8189                 info->reg_type = PTR_TO_PACKET;
8190                 if (size != sizeof(__u64))
8191                         return false;
8192                 break;
8193         case offsetof(struct sk_msg_md, data_end):
8194                 info->reg_type = PTR_TO_PACKET_END;
8195                 if (size != sizeof(__u64))
8196                         return false;
8197                 break;
8198         case offsetof(struct sk_msg_md, sk):
8199                 if (size != sizeof(__u64))
8200                         return false;
8201                 info->reg_type = PTR_TO_SOCKET;
8202                 break;
8203         case bpf_ctx_range(struct sk_msg_md, family):
8204         case bpf_ctx_range(struct sk_msg_md, remote_ip4):
8205         case bpf_ctx_range(struct sk_msg_md, local_ip4):
8206         case bpf_ctx_range_till(struct sk_msg_md, remote_ip6[0], remote_ip6[3]):
8207         case bpf_ctx_range_till(struct sk_msg_md, local_ip6[0], local_ip6[3]):
8208         case bpf_ctx_range(struct sk_msg_md, remote_port):
8209         case bpf_ctx_range(struct sk_msg_md, local_port):
8210         case bpf_ctx_range(struct sk_msg_md, size):
8211                 if (size != sizeof(__u32))
8212                         return false;
8213                 break;
8214         default:
8215                 return false;
8216         }
8217         return true;
8218 }
8219
8220 static bool flow_dissector_is_valid_access(int off, int size,
8221                                            enum bpf_access_type type,
8222                                            const struct bpf_prog *prog,
8223                                            struct bpf_insn_access_aux *info)
8224 {
8225         const int size_default = sizeof(__u32);
8226
8227         if (off < 0 || off >= sizeof(struct __sk_buff))
8228                 return false;
8229
8230         if (type == BPF_WRITE)
8231                 return false;
8232
8233         switch (off) {
8234         case bpf_ctx_range(struct __sk_buff, data):
8235                 if (size != size_default)
8236                         return false;
8237                 info->reg_type = PTR_TO_PACKET;
8238                 return true;
8239         case bpf_ctx_range(struct __sk_buff, data_end):
8240                 if (size != size_default)
8241                         return false;
8242                 info->reg_type = PTR_TO_PACKET_END;
8243                 return true;
8244         case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
8245                 if (size != sizeof(__u64))
8246                         return false;
8247                 info->reg_type = PTR_TO_FLOW_KEYS;
8248                 return true;
8249         default:
8250                 return false;
8251         }
8252 }
8253
8254 static u32 flow_dissector_convert_ctx_access(enum bpf_access_type type,
8255                                              const struct bpf_insn *si,
8256                                              struct bpf_insn *insn_buf,
8257                                              struct bpf_prog *prog,
8258                                              u32 *target_size)
8259
8260 {
8261         struct bpf_insn *insn = insn_buf;
8262
8263         switch (si->off) {
8264         case offsetof(struct __sk_buff, data):
8265                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_flow_dissector, data),
8266                                       si->dst_reg, si->src_reg,
8267                                       offsetof(struct bpf_flow_dissector, data));
8268                 break;
8269
8270         case offsetof(struct __sk_buff, data_end):
8271                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_flow_dissector, data_end),
8272                                       si->dst_reg, si->src_reg,
8273                                       offsetof(struct bpf_flow_dissector, data_end));
8274                 break;
8275
8276         case offsetof(struct __sk_buff, flow_keys):
8277                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_flow_dissector, flow_keys),
8278                                       si->dst_reg, si->src_reg,
8279                                       offsetof(struct bpf_flow_dissector, flow_keys));
8280                 break;
8281         }
8282
8283         return insn - insn_buf;
8284 }
8285
8286 static struct bpf_insn *bpf_convert_shinfo_access(const struct bpf_insn *si,
8287                                                   struct bpf_insn *insn)
8288 {
8289         /* si->dst_reg = skb_shinfo(SKB); */
8290 #ifdef NET_SKBUFF_DATA_USES_OFFSET
8291         *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, end),
8292                               BPF_REG_AX, si->src_reg,
8293                               offsetof(struct sk_buff, end));
8294         *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, head),
8295                               si->dst_reg, si->src_reg,
8296                               offsetof(struct sk_buff, head));
8297         *insn++ = BPF_ALU64_REG(BPF_ADD, si->dst_reg, BPF_REG_AX);
8298 #else
8299         *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, end),
8300                               si->dst_reg, si->src_reg,
8301                               offsetof(struct sk_buff, end));
8302 #endif
8303
8304         return insn;
8305 }
8306
8307 static u32 bpf_convert_ctx_access(enum bpf_access_type type,
8308                                   const struct bpf_insn *si,
8309                                   struct bpf_insn *insn_buf,
8310                                   struct bpf_prog *prog, u32 *target_size)
8311 {
8312         struct bpf_insn *insn = insn_buf;
8313         int off;
8314
8315         switch (si->off) {
8316         case offsetof(struct __sk_buff, len):
8317                 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
8318                                       bpf_target_off(struct sk_buff, len, 4,
8319                                                      target_size));
8320                 break;
8321
8322         case offsetof(struct __sk_buff, protocol):
8323                 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
8324                                       bpf_target_off(struct sk_buff, protocol, 2,
8325                                                      target_size));
8326                 break;
8327
8328         case offsetof(struct __sk_buff, vlan_proto):
8329                 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
8330                                       bpf_target_off(struct sk_buff, vlan_proto, 2,
8331                                                      target_size));
8332                 break;
8333
8334         case offsetof(struct __sk_buff, priority):
8335                 if (type == BPF_WRITE)
8336                         *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
8337                                               bpf_target_off(struct sk_buff, priority, 4,
8338                                                              target_size));
8339                 else
8340                         *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
8341                                               bpf_target_off(struct sk_buff, priority, 4,
8342                                                              target_size));
8343                 break;
8344
8345         case offsetof(struct __sk_buff, ingress_ifindex):
8346                 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
8347                                       bpf_target_off(struct sk_buff, skb_iif, 4,
8348                                                      target_size));
8349                 break;
8350
8351         case offsetof(struct __sk_buff, ifindex):
8352                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev),
8353                                       si->dst_reg, si->src_reg,
8354                                       offsetof(struct sk_buff, dev));
8355                 *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1);
8356                 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
8357                                       bpf_target_off(struct net_device, ifindex, 4,
8358                                                      target_size));
8359                 break;
8360
8361         case offsetof(struct __sk_buff, hash):
8362                 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
8363                                       bpf_target_off(struct sk_buff, hash, 4,
8364                                                      target_size));
8365                 break;
8366
8367         case offsetof(struct __sk_buff, mark):
8368                 if (type == BPF_WRITE)
8369                         *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
8370                                               bpf_target_off(struct sk_buff, mark, 4,
8371                                                              target_size));
8372                 else
8373                         *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
8374                                               bpf_target_off(struct sk_buff, mark, 4,
8375                                                              target_size));
8376                 break;
8377
8378         case offsetof(struct __sk_buff, pkt_type):
8379                 *target_size = 1;
8380                 *insn++ = BPF_LDX_MEM(BPF_B, si->dst_reg, si->src_reg,
8381                                       PKT_TYPE_OFFSET());
8382                 *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, PKT_TYPE_MAX);
8383 #ifdef __BIG_ENDIAN_BITFIELD
8384                 *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, 5);
8385 #endif
8386                 break;
8387
8388         case offsetof(struct __sk_buff, queue_mapping):
8389                 if (type == BPF_WRITE) {
8390                         *insn++ = BPF_JMP_IMM(BPF_JGE, si->src_reg, NO_QUEUE_MAPPING, 1);
8391                         *insn++ = BPF_STX_MEM(BPF_H, si->dst_reg, si->src_reg,
8392                                               bpf_target_off(struct sk_buff,
8393                                                              queue_mapping,
8394                                                              2, target_size));
8395                 } else {
8396                         *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
8397                                               bpf_target_off(struct sk_buff,
8398                                                              queue_mapping,
8399                                                              2, target_size));
8400                 }
8401                 break;
8402
8403         case offsetof(struct __sk_buff, vlan_present):
8404                 *target_size = 1;
8405                 *insn++ = BPF_LDX_MEM(BPF_B, si->dst_reg, si->src_reg,
8406                                       PKT_VLAN_PRESENT_OFFSET());
8407                 if (PKT_VLAN_PRESENT_BIT)
8408                         *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, PKT_VLAN_PRESENT_BIT);
8409                 if (PKT_VLAN_PRESENT_BIT < 7)
8410                         *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, 1);
8411                 break;
8412
8413         case offsetof(struct __sk_buff, vlan_tci):
8414                 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
8415                                       bpf_target_off(struct sk_buff, vlan_tci, 2,
8416                                                      target_size));
8417                 break;
8418
8419         case offsetof(struct __sk_buff, cb[0]) ...
8420              offsetofend(struct __sk_buff, cb[4]) - 1:
8421                 BUILD_BUG_ON(sizeof_field(struct qdisc_skb_cb, data) < 20);
8422                 BUILD_BUG_ON((offsetof(struct sk_buff, cb) +
8423                               offsetof(struct qdisc_skb_cb, data)) %
8424                              sizeof(__u64));
8425
8426                 prog->cb_access = 1;
8427                 off  = si->off;
8428                 off -= offsetof(struct __sk_buff, cb[0]);
8429                 off += offsetof(struct sk_buff, cb);
8430                 off += offsetof(struct qdisc_skb_cb, data);
8431                 if (type == BPF_WRITE)
8432                         *insn++ = BPF_STX_MEM(BPF_SIZE(si->code), si->dst_reg,
8433                                               si->src_reg, off);
8434                 else
8435                         *insn++ = BPF_LDX_MEM(BPF_SIZE(si->code), si->dst_reg,
8436                                               si->src_reg, off);
8437                 break;
8438
8439         case offsetof(struct __sk_buff, tc_classid):
8440                 BUILD_BUG_ON(sizeof_field(struct qdisc_skb_cb, tc_classid) != 2);
8441
8442                 off  = si->off;
8443                 off -= offsetof(struct __sk_buff, tc_classid);
8444                 off += offsetof(struct sk_buff, cb);
8445                 off += offsetof(struct qdisc_skb_cb, tc_classid);
8446                 *target_size = 2;
8447                 if (type == BPF_WRITE)
8448                         *insn++ = BPF_STX_MEM(BPF_H, si->dst_reg,
8449                                               si->src_reg, off);
8450                 else
8451                         *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg,
8452                                               si->src_reg, off);
8453                 break;
8454
8455         case offsetof(struct __sk_buff, data):
8456                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, data),
8457                                       si->dst_reg, si->src_reg,
8458                                       offsetof(struct sk_buff, data));
8459                 break;
8460
8461         case offsetof(struct __sk_buff, data_meta):
8462                 off  = si->off;
8463                 off -= offsetof(struct __sk_buff, data_meta);
8464                 off += offsetof(struct sk_buff, cb);
8465                 off += offsetof(struct bpf_skb_data_end, data_meta);
8466                 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg,
8467                                       si->src_reg, off);
8468                 break;
8469
8470         case offsetof(struct __sk_buff, data_end):
8471                 off  = si->off;
8472                 off -= offsetof(struct __sk_buff, data_end);
8473                 off += offsetof(struct sk_buff, cb);
8474                 off += offsetof(struct bpf_skb_data_end, data_end);
8475                 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg,
8476                                       si->src_reg, off);
8477                 break;
8478
8479         case offsetof(struct __sk_buff, tc_index):
8480 #ifdef CONFIG_NET_SCHED
8481                 if (type == BPF_WRITE)
8482                         *insn++ = BPF_STX_MEM(BPF_H, si->dst_reg, si->src_reg,
8483                                               bpf_target_off(struct sk_buff, tc_index, 2,
8484                                                              target_size));
8485                 else
8486                         *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
8487                                               bpf_target_off(struct sk_buff, tc_index, 2,
8488                                                              target_size));
8489 #else
8490                 *target_size = 2;
8491                 if (type == BPF_WRITE)
8492                         *insn++ = BPF_MOV64_REG(si->dst_reg, si->dst_reg);
8493                 else
8494                         *insn++ = BPF_MOV64_IMM(si->dst_reg, 0);
8495 #endif
8496                 break;
8497
8498         case offsetof(struct __sk_buff, napi_id):
8499 #if defined(CONFIG_NET_RX_BUSY_POLL)
8500                 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
8501                                       bpf_target_off(struct sk_buff, napi_id, 4,
8502                                                      target_size));
8503                 *insn++ = BPF_JMP_IMM(BPF_JGE, si->dst_reg, MIN_NAPI_ID, 1);
8504                 *insn++ = BPF_MOV64_IMM(si->dst_reg, 0);
8505 #else
8506                 *target_size = 4;
8507                 *insn++ = BPF_MOV64_IMM(si->dst_reg, 0);
8508 #endif
8509                 break;
8510         case offsetof(struct __sk_buff, family):
8511                 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_family) != 2);
8512
8513                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
8514                                       si->dst_reg, si->src_reg,
8515                                       offsetof(struct sk_buff, sk));
8516                 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
8517                                       bpf_target_off(struct sock_common,
8518                                                      skc_family,
8519                                                      2, target_size));
8520                 break;
8521         case offsetof(struct __sk_buff, remote_ip4):
8522                 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_daddr) != 4);
8523
8524                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
8525                                       si->dst_reg, si->src_reg,
8526                                       offsetof(struct sk_buff, sk));
8527                 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
8528                                       bpf_target_off(struct sock_common,
8529                                                      skc_daddr,
8530                                                      4, target_size));
8531                 break;
8532         case offsetof(struct __sk_buff, local_ip4):
8533                 BUILD_BUG_ON(sizeof_field(struct sock_common,
8534                                           skc_rcv_saddr) != 4);
8535
8536                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
8537                                       si->dst_reg, si->src_reg,
8538                                       offsetof(struct sk_buff, sk));
8539                 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
8540                                       bpf_target_off(struct sock_common,
8541                                                      skc_rcv_saddr,
8542                                                      4, target_size));
8543                 break;
8544         case offsetof(struct __sk_buff, remote_ip6[0]) ...
8545              offsetof(struct __sk_buff, remote_ip6[3]):
8546 #if IS_ENABLED(CONFIG_IPV6)
8547                 BUILD_BUG_ON(sizeof_field(struct sock_common,
8548                                           skc_v6_daddr.s6_addr32[0]) != 4);
8549
8550                 off = si->off;
8551                 off -= offsetof(struct __sk_buff, remote_ip6[0]);
8552
8553                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
8554                                       si->dst_reg, si->src_reg,
8555                                       offsetof(struct sk_buff, sk));
8556                 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
8557                                       offsetof(struct sock_common,
8558                                                skc_v6_daddr.s6_addr32[0]) +
8559                                       off);
8560 #else
8561                 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
8562 #endif
8563                 break;
8564         case offsetof(struct __sk_buff, local_ip6[0]) ...
8565              offsetof(struct __sk_buff, local_ip6[3]):
8566 #if IS_ENABLED(CONFIG_IPV6)
8567                 BUILD_BUG_ON(sizeof_field(struct sock_common,
8568                                           skc_v6_rcv_saddr.s6_addr32[0]) != 4);
8569
8570                 off = si->off;
8571                 off -= offsetof(struct __sk_buff, local_ip6[0]);
8572
8573                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
8574                                       si->dst_reg, si->src_reg,
8575                                       offsetof(struct sk_buff, sk));
8576                 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
8577                                       offsetof(struct sock_common,
8578                                                skc_v6_rcv_saddr.s6_addr32[0]) +
8579                                       off);
8580 #else
8581                 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
8582 #endif
8583                 break;
8584
8585         case offsetof(struct __sk_buff, remote_port):
8586                 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_dport) != 2);
8587
8588                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
8589                                       si->dst_reg, si->src_reg,
8590                                       offsetof(struct sk_buff, sk));
8591                 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
8592                                       bpf_target_off(struct sock_common,
8593                                                      skc_dport,
8594                                                      2, target_size));
8595 #ifndef __BIG_ENDIAN_BITFIELD
8596                 *insn++ = BPF_ALU32_IMM(BPF_LSH, si->dst_reg, 16);
8597 #endif
8598                 break;
8599
8600         case offsetof(struct __sk_buff, local_port):
8601                 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_num) != 2);
8602
8603                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
8604                                       si->dst_reg, si->src_reg,
8605                                       offsetof(struct sk_buff, sk));
8606                 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
8607                                       bpf_target_off(struct sock_common,
8608                                                      skc_num, 2, target_size));
8609                 break;
8610
8611         case offsetof(struct __sk_buff, tstamp):
8612                 BUILD_BUG_ON(sizeof_field(struct sk_buff, tstamp) != 8);
8613
8614                 if (type == BPF_WRITE)
8615                         *insn++ = BPF_STX_MEM(BPF_DW,
8616                                               si->dst_reg, si->src_reg,
8617                                               bpf_target_off(struct sk_buff,
8618                                                              tstamp, 8,
8619                                                              target_size));
8620                 else
8621                         *insn++ = BPF_LDX_MEM(BPF_DW,
8622                                               si->dst_reg, si->src_reg,
8623                                               bpf_target_off(struct sk_buff,
8624                                                              tstamp, 8,
8625                                                              target_size));
8626                 break;
8627
8628         case offsetof(struct __sk_buff, gso_segs):
8629                 insn = bpf_convert_shinfo_access(si, insn);
8630                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct skb_shared_info, gso_segs),
8631                                       si->dst_reg, si->dst_reg,
8632                                       bpf_target_off(struct skb_shared_info,
8633                                                      gso_segs, 2,
8634                                                      target_size));
8635                 break;
8636         case offsetof(struct __sk_buff, gso_size):
8637                 insn = bpf_convert_shinfo_access(si, insn);
8638                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct skb_shared_info, gso_size),
8639                                       si->dst_reg, si->dst_reg,
8640                                       bpf_target_off(struct skb_shared_info,
8641                                                      gso_size, 2,
8642                                                      target_size));
8643                 break;
8644         case offsetof(struct __sk_buff, wire_len):
8645                 BUILD_BUG_ON(sizeof_field(struct qdisc_skb_cb, pkt_len) != 4);
8646
8647                 off = si->off;
8648                 off -= offsetof(struct __sk_buff, wire_len);
8649                 off += offsetof(struct sk_buff, cb);
8650                 off += offsetof(struct qdisc_skb_cb, pkt_len);
8651                 *target_size = 4;
8652                 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, off);
8653                 break;
8654
8655         case offsetof(struct __sk_buff, sk):
8656                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
8657                                       si->dst_reg, si->src_reg,
8658                                       offsetof(struct sk_buff, sk));
8659                 break;
8660         }
8661
8662         return insn - insn_buf;
8663 }
8664
8665 u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
8666                                 const struct bpf_insn *si,
8667                                 struct bpf_insn *insn_buf,
8668                                 struct bpf_prog *prog, u32 *target_size)
8669 {
8670         struct bpf_insn *insn = insn_buf;
8671         int off;
8672
8673         switch (si->off) {
8674         case offsetof(struct bpf_sock, bound_dev_if):
8675                 BUILD_BUG_ON(sizeof_field(struct sock, sk_bound_dev_if) != 4);
8676
8677                 if (type == BPF_WRITE)
8678                         *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
8679                                         offsetof(struct sock, sk_bound_dev_if));
8680                 else
8681                         *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
8682                                       offsetof(struct sock, sk_bound_dev_if));
8683                 break;
8684
8685         case offsetof(struct bpf_sock, mark):
8686                 BUILD_BUG_ON(sizeof_field(struct sock, sk_mark) != 4);
8687
8688                 if (type == BPF_WRITE)
8689                         *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
8690                                         offsetof(struct sock, sk_mark));
8691                 else
8692                         *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
8693                                       offsetof(struct sock, sk_mark));
8694                 break;
8695
8696         case offsetof(struct bpf_sock, priority):
8697                 BUILD_BUG_ON(sizeof_field(struct sock, sk_priority) != 4);
8698
8699                 if (type == BPF_WRITE)
8700                         *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
8701                                         offsetof(struct sock, sk_priority));
8702                 else
8703                         *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
8704                                       offsetof(struct sock, sk_priority));
8705                 break;
8706
8707         case offsetof(struct bpf_sock, family):
8708                 *insn++ = BPF_LDX_MEM(
8709                         BPF_FIELD_SIZEOF(struct sock_common, skc_family),
8710                         si->dst_reg, si->src_reg,
8711                         bpf_target_off(struct sock_common,
8712                                        skc_family,
8713                                        sizeof_field(struct sock_common,
8714                                                     skc_family),
8715                                        target_size));
8716                 break;
8717
8718         case offsetof(struct bpf_sock, type):
8719                 *insn++ = BPF_LDX_MEM(
8720                         BPF_FIELD_SIZEOF(struct sock, sk_type),
8721                         si->dst_reg, si->src_reg,
8722                         bpf_target_off(struct sock, sk_type,
8723                                        sizeof_field(struct sock, sk_type),
8724                                        target_size));
8725                 break;
8726
8727         case offsetof(struct bpf_sock, protocol):
8728                 *insn++ = BPF_LDX_MEM(
8729                         BPF_FIELD_SIZEOF(struct sock, sk_protocol),
8730                         si->dst_reg, si->src_reg,
8731                         bpf_target_off(struct sock, sk_protocol,
8732                                        sizeof_field(struct sock, sk_protocol),
8733                                        target_size));
8734                 break;
8735
8736         case offsetof(struct bpf_sock, src_ip4):
8737                 *insn++ = BPF_LDX_MEM(
8738                         BPF_SIZE(si->code), si->dst_reg, si->src_reg,
8739                         bpf_target_off(struct sock_common, skc_rcv_saddr,
8740                                        sizeof_field(struct sock_common,
8741                                                     skc_rcv_saddr),
8742                                        target_size));
8743                 break;
8744
8745         case offsetof(struct bpf_sock, dst_ip4):
8746                 *insn++ = BPF_LDX_MEM(
8747                         BPF_SIZE(si->code), si->dst_reg, si->src_reg,
8748                         bpf_target_off(struct sock_common, skc_daddr,
8749                                        sizeof_field(struct sock_common,
8750                                                     skc_daddr),
8751                                        target_size));
8752                 break;
8753
8754         case bpf_ctx_range_till(struct bpf_sock, src_ip6[0], src_ip6[3]):
8755 #if IS_ENABLED(CONFIG_IPV6)
8756                 off = si->off;
8757                 off -= offsetof(struct bpf_sock, src_ip6[0]);
8758                 *insn++ = BPF_LDX_MEM(
8759                         BPF_SIZE(si->code), si->dst_reg, si->src_reg,
8760                         bpf_target_off(
8761                                 struct sock_common,
8762                                 skc_v6_rcv_saddr.s6_addr32[0],
8763                                 sizeof_field(struct sock_common,
8764                                              skc_v6_rcv_saddr.s6_addr32[0]),
8765                                 target_size) + off);
8766 #else
8767                 (void)off;
8768                 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
8769 #endif
8770                 break;
8771
8772         case bpf_ctx_range_till(struct bpf_sock, dst_ip6[0], dst_ip6[3]):
8773 #if IS_ENABLED(CONFIG_IPV6)
8774                 off = si->off;
8775                 off -= offsetof(struct bpf_sock, dst_ip6[0]);
8776                 *insn++ = BPF_LDX_MEM(
8777                         BPF_SIZE(si->code), si->dst_reg, si->src_reg,
8778                         bpf_target_off(struct sock_common,
8779                                        skc_v6_daddr.s6_addr32[0],
8780                                        sizeof_field(struct sock_common,
8781                                                     skc_v6_daddr.s6_addr32[0]),
8782                                        target_size) + off);
8783 #else
8784                 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
8785                 *target_size = 4;
8786 #endif
8787                 break;
8788
8789         case offsetof(struct bpf_sock, src_port):
8790                 *insn++ = BPF_LDX_MEM(
8791                         BPF_FIELD_SIZEOF(struct sock_common, skc_num),
8792                         si->dst_reg, si->src_reg,
8793                         bpf_target_off(struct sock_common, skc_num,
8794                                        sizeof_field(struct sock_common,
8795                                                     skc_num),
8796                                        target_size));
8797                 break;
8798
8799         case offsetof(struct bpf_sock, dst_port):
8800                 *insn++ = BPF_LDX_MEM(
8801                         BPF_FIELD_SIZEOF(struct sock_common, skc_dport),
8802                         si->dst_reg, si->src_reg,
8803                         bpf_target_off(struct sock_common, skc_dport,
8804                                        sizeof_field(struct sock_common,
8805                                                     skc_dport),
8806                                        target_size));
8807                 break;
8808
8809         case offsetof(struct bpf_sock, state):
8810                 *insn++ = BPF_LDX_MEM(
8811                         BPF_FIELD_SIZEOF(struct sock_common, skc_state),
8812                         si->dst_reg, si->src_reg,
8813                         bpf_target_off(struct sock_common, skc_state,
8814                                        sizeof_field(struct sock_common,
8815                                                     skc_state),
8816                                        target_size));
8817                 break;
8818         case offsetof(struct bpf_sock, rx_queue_mapping):
8819 #ifdef CONFIG_XPS
8820                 *insn++ = BPF_LDX_MEM(
8821                         BPF_FIELD_SIZEOF(struct sock, sk_rx_queue_mapping),
8822                         si->dst_reg, si->src_reg,
8823                         bpf_target_off(struct sock, sk_rx_queue_mapping,
8824                                        sizeof_field(struct sock,
8825                                                     sk_rx_queue_mapping),
8826                                        target_size));
8827                 *insn++ = BPF_JMP_IMM(BPF_JNE, si->dst_reg, NO_QUEUE_MAPPING,
8828                                       1);
8829                 *insn++ = BPF_MOV64_IMM(si->dst_reg, -1);
8830 #else
8831                 *insn++ = BPF_MOV64_IMM(si->dst_reg, -1);
8832                 *target_size = 2;
8833 #endif
8834                 break;
8835         }
8836
8837         return insn - insn_buf;
8838 }
8839
8840 static u32 tc_cls_act_convert_ctx_access(enum bpf_access_type type,
8841                                          const struct bpf_insn *si,
8842                                          struct bpf_insn *insn_buf,
8843                                          struct bpf_prog *prog, u32 *target_size)
8844 {
8845         struct bpf_insn *insn = insn_buf;
8846
8847         switch (si->off) {
8848         case offsetof(struct __sk_buff, ifindex):
8849                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev),
8850                                       si->dst_reg, si->src_reg,
8851                                       offsetof(struct sk_buff, dev));
8852                 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
8853                                       bpf_target_off(struct net_device, ifindex, 4,
8854                                                      target_size));
8855                 break;
8856         default:
8857                 return bpf_convert_ctx_access(type, si, insn_buf, prog,
8858                                               target_size);
8859         }
8860
8861         return insn - insn_buf;
8862 }
8863
8864 static u32 xdp_convert_ctx_access(enum bpf_access_type type,
8865                                   const struct bpf_insn *si,
8866                                   struct bpf_insn *insn_buf,
8867                                   struct bpf_prog *prog, u32 *target_size)
8868 {
8869         struct bpf_insn *insn = insn_buf;
8870
8871         switch (si->off) {
8872         case offsetof(struct xdp_md, data):
8873                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, data),
8874                                       si->dst_reg, si->src_reg,
8875                                       offsetof(struct xdp_buff, data));
8876                 break;
8877         case offsetof(struct xdp_md, data_meta):
8878                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, data_meta),
8879                                       si->dst_reg, si->src_reg,
8880                                       offsetof(struct xdp_buff, data_meta));
8881                 break;
8882         case offsetof(struct xdp_md, data_end):
8883                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, data_end),
8884                                       si->dst_reg, si->src_reg,
8885                                       offsetof(struct xdp_buff, data_end));
8886                 break;
8887         case offsetof(struct xdp_md, ingress_ifindex):
8888                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, rxq),
8889                                       si->dst_reg, si->src_reg,
8890                                       offsetof(struct xdp_buff, rxq));
8891                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_rxq_info, dev),
8892                                       si->dst_reg, si->dst_reg,
8893                                       offsetof(struct xdp_rxq_info, dev));
8894                 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
8895                                       offsetof(struct net_device, ifindex));
8896                 break;
8897         case offsetof(struct xdp_md, rx_queue_index):
8898                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, rxq),
8899                                       si->dst_reg, si->src_reg,
8900                                       offsetof(struct xdp_buff, rxq));
8901                 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
8902                                       offsetof(struct xdp_rxq_info,
8903                                                queue_index));
8904                 break;
8905         case offsetof(struct xdp_md, egress_ifindex):
8906                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, txq),
8907                                       si->dst_reg, si->src_reg,
8908                                       offsetof(struct xdp_buff, txq));
8909                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_txq_info, dev),
8910                                       si->dst_reg, si->dst_reg,
8911                                       offsetof(struct xdp_txq_info, dev));
8912                 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
8913                                       offsetof(struct net_device, ifindex));
8914                 break;
8915         }
8916
8917         return insn - insn_buf;
8918 }
8919
8920 /* SOCK_ADDR_LOAD_NESTED_FIELD() loads Nested Field S.F.NF where S is type of
8921  * context Structure, F is Field in context structure that contains a pointer
8922  * to Nested Structure of type NS that has the field NF.
8923  *
8924  * SIZE encodes the load size (BPF_B, BPF_H, etc). It's up to caller to make
8925  * sure that SIZE is not greater than actual size of S.F.NF.
8926  *
8927  * If offset OFF is provided, the load happens from that offset relative to
8928  * offset of NF.
8929  */
8930 #define SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(S, NS, F, NF, SIZE, OFF)          \
8931         do {                                                                   \
8932                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(S, F), si->dst_reg,     \
8933                                       si->src_reg, offsetof(S, F));            \
8934                 *insn++ = BPF_LDX_MEM(                                         \
8935                         SIZE, si->dst_reg, si->dst_reg,                        \
8936                         bpf_target_off(NS, NF, sizeof_field(NS, NF),           \
8937                                        target_size)                            \
8938                                 + OFF);                                        \
8939         } while (0)
8940
8941 #define SOCK_ADDR_LOAD_NESTED_FIELD(S, NS, F, NF)                              \
8942         SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(S, NS, F, NF,                     \
8943                                              BPF_FIELD_SIZEOF(NS, NF), 0)
8944
8945 /* SOCK_ADDR_STORE_NESTED_FIELD_OFF() has semantic similar to
8946  * SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF() but for store operation.
8947  *
8948  * In addition it uses Temporary Field TF (member of struct S) as the 3rd
8949  * "register" since two registers available in convert_ctx_access are not
8950  * enough: we can't override neither SRC, since it contains value to store, nor
8951  * DST since it contains pointer to context that may be used by later
8952  * instructions. But we need a temporary place to save pointer to nested
8953  * structure whose field we want to store to.
8954  */
8955 #define SOCK_ADDR_STORE_NESTED_FIELD_OFF(S, NS, F, NF, SIZE, OFF, TF)          \
8956         do {                                                                   \
8957                 int tmp_reg = BPF_REG_9;                                       \
8958                 if (si->src_reg == tmp_reg || si->dst_reg == tmp_reg)          \
8959                         --tmp_reg;                                             \
8960                 if (si->src_reg == tmp_reg || si->dst_reg == tmp_reg)          \
8961                         --tmp_reg;                                             \
8962                 *insn++ = BPF_STX_MEM(BPF_DW, si->dst_reg, tmp_reg,            \
8963                                       offsetof(S, TF));                        \
8964                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(S, F), tmp_reg,         \
8965                                       si->dst_reg, offsetof(S, F));            \
8966                 *insn++ = BPF_STX_MEM(SIZE, tmp_reg, si->src_reg,              \
8967                         bpf_target_off(NS, NF, sizeof_field(NS, NF),           \
8968                                        target_size)                            \
8969                                 + OFF);                                        \
8970                 *insn++ = BPF_LDX_MEM(BPF_DW, tmp_reg, si->dst_reg,            \
8971                                       offsetof(S, TF));                        \
8972         } while (0)
8973
8974 #define SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(S, NS, F, NF, SIZE, OFF, \
8975                                                       TF)                      \
8976         do {                                                                   \
8977                 if (type == BPF_WRITE) {                                       \
8978                         SOCK_ADDR_STORE_NESTED_FIELD_OFF(S, NS, F, NF, SIZE,   \
8979                                                          OFF, TF);             \
8980                 } else {                                                       \
8981                         SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(                  \
8982                                 S, NS, F, NF, SIZE, OFF);  \
8983                 }                                                              \
8984         } while (0)
8985
8986 #define SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD(S, NS, F, NF, TF)                 \
8987         SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(                         \
8988                 S, NS, F, NF, BPF_FIELD_SIZEOF(NS, NF), 0, TF)
8989
8990 static u32 sock_addr_convert_ctx_access(enum bpf_access_type type,
8991                                         const struct bpf_insn *si,
8992                                         struct bpf_insn *insn_buf,
8993                                         struct bpf_prog *prog, u32 *target_size)
8994 {
8995         int off, port_size = sizeof_field(struct sockaddr_in6, sin6_port);
8996         struct bpf_insn *insn = insn_buf;
8997
8998         switch (si->off) {
8999         case offsetof(struct bpf_sock_addr, user_family):
9000                 SOCK_ADDR_LOAD_NESTED_FIELD(struct bpf_sock_addr_kern,
9001                                             struct sockaddr, uaddr, sa_family);
9002                 break;
9003
9004         case offsetof(struct bpf_sock_addr, user_ip4):
9005                 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(
9006                         struct bpf_sock_addr_kern, struct sockaddr_in, uaddr,
9007                         sin_addr, BPF_SIZE(si->code), 0, tmp_reg);
9008                 break;
9009
9010         case bpf_ctx_range_till(struct bpf_sock_addr, user_ip6[0], user_ip6[3]):
9011                 off = si->off;
9012                 off -= offsetof(struct bpf_sock_addr, user_ip6[0]);
9013                 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(
9014                         struct bpf_sock_addr_kern, struct sockaddr_in6, uaddr,
9015                         sin6_addr.s6_addr32[0], BPF_SIZE(si->code), off,
9016                         tmp_reg);
9017                 break;
9018
9019         case offsetof(struct bpf_sock_addr, user_port):
9020                 /* To get port we need to know sa_family first and then treat
9021                  * sockaddr as either sockaddr_in or sockaddr_in6.
9022                  * Though we can simplify since port field has same offset and
9023                  * size in both structures.
9024                  * Here we check this invariant and use just one of the
9025                  * structures if it's true.
9026                  */
9027                 BUILD_BUG_ON(offsetof(struct sockaddr_in, sin_port) !=
9028                              offsetof(struct sockaddr_in6, sin6_port));
9029                 BUILD_BUG_ON(sizeof_field(struct sockaddr_in, sin_port) !=
9030                              sizeof_field(struct sockaddr_in6, sin6_port));
9031                 /* Account for sin6_port being smaller than user_port. */
9032                 port_size = min(port_size, BPF_LDST_BYTES(si));
9033                 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(
9034                         struct bpf_sock_addr_kern, struct sockaddr_in6, uaddr,
9035                         sin6_port, bytes_to_bpf_size(port_size), 0, tmp_reg);
9036                 break;
9037
9038         case offsetof(struct bpf_sock_addr, family):
9039                 SOCK_ADDR_LOAD_NESTED_FIELD(struct bpf_sock_addr_kern,
9040                                             struct sock, sk, sk_family);
9041                 break;
9042
9043         case offsetof(struct bpf_sock_addr, type):
9044                 SOCK_ADDR_LOAD_NESTED_FIELD(struct bpf_sock_addr_kern,
9045                                             struct sock, sk, sk_type);
9046                 break;
9047
9048         case offsetof(struct bpf_sock_addr, protocol):
9049                 SOCK_ADDR_LOAD_NESTED_FIELD(struct bpf_sock_addr_kern,
9050                                             struct sock, sk, sk_protocol);
9051                 break;
9052
9053         case offsetof(struct bpf_sock_addr, msg_src_ip4):
9054                 /* Treat t_ctx as struct in_addr for msg_src_ip4. */
9055                 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(
9056                         struct bpf_sock_addr_kern, struct in_addr, t_ctx,
9057                         s_addr, BPF_SIZE(si->code), 0, tmp_reg);
9058                 break;
9059
9060         case bpf_ctx_range_till(struct bpf_sock_addr, msg_src_ip6[0],
9061                                 msg_src_ip6[3]):
9062                 off = si->off;
9063                 off -= offsetof(struct bpf_sock_addr, msg_src_ip6[0]);
9064                 /* Treat t_ctx as struct in6_addr for msg_src_ip6. */
9065                 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(
9066                         struct bpf_sock_addr_kern, struct in6_addr, t_ctx,
9067                         s6_addr32[0], BPF_SIZE(si->code), off, tmp_reg);
9068                 break;
9069         case offsetof(struct bpf_sock_addr, sk):
9070                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sock_addr_kern, sk),
9071                                       si->dst_reg, si->src_reg,
9072                                       offsetof(struct bpf_sock_addr_kern, sk));
9073                 break;
9074         }
9075
9076         return insn - insn_buf;
9077 }
9078
9079 static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
9080                                        const struct bpf_insn *si,
9081                                        struct bpf_insn *insn_buf,
9082                                        struct bpf_prog *prog,
9083                                        u32 *target_size)
9084 {
9085         struct bpf_insn *insn = insn_buf;
9086         int off;
9087
9088 /* Helper macro for adding read access to tcp_sock or sock fields. */
9089 #define SOCK_OPS_GET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ)                         \
9090         do {                                                                  \
9091                 int fullsock_reg = si->dst_reg, reg = BPF_REG_9, jmp = 2;     \
9092                 BUILD_BUG_ON(sizeof_field(OBJ, OBJ_FIELD) >                   \
9093                              sizeof_field(struct bpf_sock_ops, BPF_FIELD));   \
9094                 if (si->dst_reg == reg || si->src_reg == reg)                 \
9095                         reg--;                                                \
9096                 if (si->dst_reg == reg || si->src_reg == reg)                 \
9097                         reg--;                                                \
9098                 if (si->dst_reg == si->src_reg) {                             \
9099                         *insn++ = BPF_STX_MEM(BPF_DW, si->src_reg, reg,       \
9100                                           offsetof(struct bpf_sock_ops_kern,  \
9101                                           temp));                             \
9102                         fullsock_reg = reg;                                   \
9103                         jmp += 2;                                             \
9104                 }                                                             \
9105                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(                       \
9106                                                 struct bpf_sock_ops_kern,     \
9107                                                 is_fullsock),                 \
9108                                       fullsock_reg, si->src_reg,              \
9109                                       offsetof(struct bpf_sock_ops_kern,      \
9110                                                is_fullsock));                 \
9111                 *insn++ = BPF_JMP_IMM(BPF_JEQ, fullsock_reg, 0, jmp);         \
9112                 if (si->dst_reg == si->src_reg)                               \
9113                         *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg,       \
9114                                       offsetof(struct bpf_sock_ops_kern,      \
9115                                       temp));                                 \
9116                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(                       \
9117                                                 struct bpf_sock_ops_kern, sk),\
9118                                       si->dst_reg, si->src_reg,               \
9119                                       offsetof(struct bpf_sock_ops_kern, sk));\
9120                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(OBJ,                   \
9121                                                        OBJ_FIELD),            \
9122                                       si->dst_reg, si->dst_reg,               \
9123                                       offsetof(OBJ, OBJ_FIELD));              \
9124                 if (si->dst_reg == si->src_reg) {                             \
9125                         *insn++ = BPF_JMP_A(1);                               \
9126                         *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg,       \
9127                                       offsetof(struct bpf_sock_ops_kern,      \
9128                                       temp));                                 \
9129                 }                                                             \
9130         } while (0)
9131
9132 #define SOCK_OPS_GET_SK()                                                             \
9133         do {                                                                  \
9134                 int fullsock_reg = si->dst_reg, reg = BPF_REG_9, jmp = 1;     \
9135                 if (si->dst_reg == reg || si->src_reg == reg)                 \
9136                         reg--;                                                \
9137                 if (si->dst_reg == reg || si->src_reg == reg)                 \
9138                         reg--;                                                \
9139                 if (si->dst_reg == si->src_reg) {                             \
9140                         *insn++ = BPF_STX_MEM(BPF_DW, si->src_reg, reg,       \
9141                                           offsetof(struct bpf_sock_ops_kern,  \
9142                                           temp));                             \
9143                         fullsock_reg = reg;                                   \
9144                         jmp += 2;                                             \
9145                 }                                                             \
9146                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(                       \
9147                                                 struct bpf_sock_ops_kern,     \
9148                                                 is_fullsock),                 \
9149                                       fullsock_reg, si->src_reg,              \
9150                                       offsetof(struct bpf_sock_ops_kern,      \
9151                                                is_fullsock));                 \
9152                 *insn++ = BPF_JMP_IMM(BPF_JEQ, fullsock_reg, 0, jmp);         \
9153                 if (si->dst_reg == si->src_reg)                               \
9154                         *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg,       \
9155                                       offsetof(struct bpf_sock_ops_kern,      \
9156                                       temp));                                 \
9157                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(                       \
9158                                                 struct bpf_sock_ops_kern, sk),\
9159                                       si->dst_reg, si->src_reg,               \
9160                                       offsetof(struct bpf_sock_ops_kern, sk));\
9161                 if (si->dst_reg == si->src_reg) {                             \
9162                         *insn++ = BPF_JMP_A(1);                               \
9163                         *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg,       \
9164                                       offsetof(struct bpf_sock_ops_kern,      \
9165                                       temp));                                 \
9166                 }                                                             \
9167         } while (0)
9168
9169 #define SOCK_OPS_GET_TCP_SOCK_FIELD(FIELD) \
9170                 SOCK_OPS_GET_FIELD(FIELD, FIELD, struct tcp_sock)
9171
9172 /* Helper macro for adding write access to tcp_sock or sock fields.
9173  * The macro is called with two registers, dst_reg which contains a pointer
9174  * to ctx (context) and src_reg which contains the value that should be
9175  * stored. However, we need an additional register since we cannot overwrite
9176  * dst_reg because it may be used later in the program.
9177  * Instead we "borrow" one of the other register. We first save its value
9178  * into a new (temp) field in bpf_sock_ops_kern, use it, and then restore
9179  * it at the end of the macro.
9180  */
9181 #define SOCK_OPS_SET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ)                         \
9182         do {                                                                  \
9183                 int reg = BPF_REG_9;                                          \
9184                 BUILD_BUG_ON(sizeof_field(OBJ, OBJ_FIELD) >                   \
9185                              sizeof_field(struct bpf_sock_ops, BPF_FIELD));   \
9186                 if (si->dst_reg == reg || si->src_reg == reg)                 \
9187                         reg--;                                                \
9188                 if (si->dst_reg == reg || si->src_reg == reg)                 \
9189                         reg--;                                                \
9190                 *insn++ = BPF_STX_MEM(BPF_DW, si->dst_reg, reg,               \
9191                                       offsetof(struct bpf_sock_ops_kern,      \
9192                                                temp));                        \
9193                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(                       \
9194                                                 struct bpf_sock_ops_kern,     \
9195                                                 is_fullsock),                 \
9196                                       reg, si->dst_reg,                       \
9197                                       offsetof(struct bpf_sock_ops_kern,      \
9198                                                is_fullsock));                 \
9199                 *insn++ = BPF_JMP_IMM(BPF_JEQ, reg, 0, 2);                    \
9200                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(                       \
9201                                                 struct bpf_sock_ops_kern, sk),\
9202                                       reg, si->dst_reg,                       \
9203                                       offsetof(struct bpf_sock_ops_kern, sk));\
9204                 *insn++ = BPF_STX_MEM(BPF_FIELD_SIZEOF(OBJ, OBJ_FIELD),       \
9205                                       reg, si->src_reg,                       \
9206                                       offsetof(OBJ, OBJ_FIELD));              \
9207                 *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->dst_reg,               \
9208                                       offsetof(struct bpf_sock_ops_kern,      \
9209                                                temp));                        \
9210         } while (0)
9211
9212 #define SOCK_OPS_GET_OR_SET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ, TYPE)            \
9213         do {                                                                  \
9214                 if (TYPE == BPF_WRITE)                                        \
9215                         SOCK_OPS_SET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ);        \
9216                 else                                                          \
9217                         SOCK_OPS_GET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ);        \
9218         } while (0)
9219
9220         if (insn > insn_buf)
9221                 return insn - insn_buf;
9222
9223         switch (si->off) {
9224         case offsetof(struct bpf_sock_ops, op):
9225                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sock_ops_kern,
9226                                                        op),
9227                                       si->dst_reg, si->src_reg,
9228                                       offsetof(struct bpf_sock_ops_kern, op));
9229                 break;
9230
9231         case offsetof(struct bpf_sock_ops, replylong[0]) ...
9232              offsetof(struct bpf_sock_ops, replylong[3]):
9233                 BUILD_BUG_ON(sizeof_field(struct bpf_sock_ops, reply) !=
9234                              sizeof_field(struct bpf_sock_ops_kern, reply));
9235                 BUILD_BUG_ON(sizeof_field(struct bpf_sock_ops, replylong) !=
9236                              sizeof_field(struct bpf_sock_ops_kern, replylong));
9237                 off = si->off;
9238                 off -= offsetof(struct bpf_sock_ops, replylong[0]);
9239                 off += offsetof(struct bpf_sock_ops_kern, replylong[0]);
9240                 if (type == BPF_WRITE)
9241                         *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
9242                                               off);
9243                 else
9244                         *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
9245                                               off);
9246                 break;
9247
9248         case offsetof(struct bpf_sock_ops, family):
9249                 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_family) != 2);
9250
9251                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
9252                                               struct bpf_sock_ops_kern, sk),
9253                                       si->dst_reg, si->src_reg,
9254                                       offsetof(struct bpf_sock_ops_kern, sk));
9255                 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
9256                                       offsetof(struct sock_common, skc_family));
9257                 break;
9258
9259         case offsetof(struct bpf_sock_ops, remote_ip4):
9260                 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_daddr) != 4);
9261
9262                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
9263                                                 struct bpf_sock_ops_kern, sk),
9264                                       si->dst_reg, si->src_reg,
9265                                       offsetof(struct bpf_sock_ops_kern, sk));
9266                 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
9267                                       offsetof(struct sock_common, skc_daddr));
9268                 break;
9269
9270         case offsetof(struct bpf_sock_ops, local_ip4):
9271                 BUILD_BUG_ON(sizeof_field(struct sock_common,
9272                                           skc_rcv_saddr) != 4);
9273
9274                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
9275                                               struct bpf_sock_ops_kern, sk),
9276                                       si->dst_reg, si->src_reg,
9277                                       offsetof(struct bpf_sock_ops_kern, sk));
9278                 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
9279                                       offsetof(struct sock_common,
9280                                                skc_rcv_saddr));
9281                 break;
9282
9283         case offsetof(struct bpf_sock_ops, remote_ip6[0]) ...
9284              offsetof(struct bpf_sock_ops, remote_ip6[3]):
9285 #if IS_ENABLED(CONFIG_IPV6)
9286                 BUILD_BUG_ON(sizeof_field(struct sock_common,
9287                                           skc_v6_daddr.s6_addr32[0]) != 4);
9288
9289                 off = si->off;
9290                 off -= offsetof(struct bpf_sock_ops, remote_ip6[0]);
9291                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
9292                                                 struct bpf_sock_ops_kern, sk),
9293                                       si->dst_reg, si->src_reg,
9294                                       offsetof(struct bpf_sock_ops_kern, sk));
9295                 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
9296                                       offsetof(struct sock_common,
9297                                                skc_v6_daddr.s6_addr32[0]) +
9298                                       off);
9299 #else
9300                 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
9301 #endif
9302                 break;
9303
9304         case offsetof(struct bpf_sock_ops, local_ip6[0]) ...
9305              offsetof(struct bpf_sock_ops, local_ip6[3]):
9306 #if IS_ENABLED(CONFIG_IPV6)
9307                 BUILD_BUG_ON(sizeof_field(struct sock_common,
9308                                           skc_v6_rcv_saddr.s6_addr32[0]) != 4);
9309
9310                 off = si->off;
9311                 off -= offsetof(struct bpf_sock_ops, local_ip6[0]);
9312                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
9313                                                 struct bpf_sock_ops_kern, sk),
9314                                       si->dst_reg, si->src_reg,
9315                                       offsetof(struct bpf_sock_ops_kern, sk));
9316                 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
9317                                       offsetof(struct sock_common,
9318                                                skc_v6_rcv_saddr.s6_addr32[0]) +
9319                                       off);
9320 #else
9321                 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
9322 #endif
9323                 break;
9324
9325         case offsetof(struct bpf_sock_ops, remote_port):
9326                 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_dport) != 2);
9327
9328                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
9329                                                 struct bpf_sock_ops_kern, sk),
9330                                       si->dst_reg, si->src_reg,
9331                                       offsetof(struct bpf_sock_ops_kern, sk));
9332                 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
9333                                       offsetof(struct sock_common, skc_dport));
9334 #ifndef __BIG_ENDIAN_BITFIELD
9335                 *insn++ = BPF_ALU32_IMM(BPF_LSH, si->dst_reg, 16);
9336 #endif
9337                 break;
9338
9339         case offsetof(struct bpf_sock_ops, local_port):
9340                 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_num) != 2);
9341
9342                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
9343                                                 struct bpf_sock_ops_kern, sk),
9344                                       si->dst_reg, si->src_reg,
9345                                       offsetof(struct bpf_sock_ops_kern, sk));
9346                 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
9347                                       offsetof(struct sock_common, skc_num));
9348                 break;
9349
9350         case offsetof(struct bpf_sock_ops, is_fullsock):
9351                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
9352                                                 struct bpf_sock_ops_kern,
9353                                                 is_fullsock),
9354                                       si->dst_reg, si->src_reg,
9355                                       offsetof(struct bpf_sock_ops_kern,
9356                                                is_fullsock));
9357                 break;
9358
9359         case offsetof(struct bpf_sock_ops, state):
9360                 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_state) != 1);
9361
9362                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
9363                                                 struct bpf_sock_ops_kern, sk),
9364                                       si->dst_reg, si->src_reg,
9365                                       offsetof(struct bpf_sock_ops_kern, sk));
9366                 *insn++ = BPF_LDX_MEM(BPF_B, si->dst_reg, si->dst_reg,
9367                                       offsetof(struct sock_common, skc_state));
9368                 break;
9369
9370         case offsetof(struct bpf_sock_ops, rtt_min):
9371                 BUILD_BUG_ON(sizeof_field(struct tcp_sock, rtt_min) !=
9372                              sizeof(struct minmax));
9373                 BUILD_BUG_ON(sizeof(struct minmax) <
9374                              sizeof(struct minmax_sample));
9375
9376                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
9377                                                 struct bpf_sock_ops_kern, sk),
9378                                       si->dst_reg, si->src_reg,
9379                                       offsetof(struct bpf_sock_ops_kern, sk));
9380                 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
9381                                       offsetof(struct tcp_sock, rtt_min) +
9382                                       sizeof_field(struct minmax_sample, t));
9383                 break;
9384
9385         case offsetof(struct bpf_sock_ops, bpf_sock_ops_cb_flags):
9386                 SOCK_OPS_GET_FIELD(bpf_sock_ops_cb_flags, bpf_sock_ops_cb_flags,
9387                                    struct tcp_sock);
9388                 break;
9389
9390         case offsetof(struct bpf_sock_ops, sk_txhash):
9391                 SOCK_OPS_GET_OR_SET_FIELD(sk_txhash, sk_txhash,
9392                                           struct sock, type);
9393                 break;
9394         case offsetof(struct bpf_sock_ops, snd_cwnd):
9395                 SOCK_OPS_GET_TCP_SOCK_FIELD(snd_cwnd);
9396                 break;
9397         case offsetof(struct bpf_sock_ops, srtt_us):
9398                 SOCK_OPS_GET_TCP_SOCK_FIELD(srtt_us);
9399                 break;
9400         case offsetof(struct bpf_sock_ops, snd_ssthresh):
9401                 SOCK_OPS_GET_TCP_SOCK_FIELD(snd_ssthresh);
9402                 break;
9403         case offsetof(struct bpf_sock_ops, rcv_nxt):
9404                 SOCK_OPS_GET_TCP_SOCK_FIELD(rcv_nxt);
9405                 break;
9406         case offsetof(struct bpf_sock_ops, snd_nxt):
9407                 SOCK_OPS_GET_TCP_SOCK_FIELD(snd_nxt);
9408                 break;
9409         case offsetof(struct bpf_sock_ops, snd_una):
9410                 SOCK_OPS_GET_TCP_SOCK_FIELD(snd_una);
9411                 break;
9412         case offsetof(struct bpf_sock_ops, mss_cache):
9413                 SOCK_OPS_GET_TCP_SOCK_FIELD(mss_cache);
9414                 break;
9415         case offsetof(struct bpf_sock_ops, ecn_flags):
9416                 SOCK_OPS_GET_TCP_SOCK_FIELD(ecn_flags);
9417                 break;
9418         case offsetof(struct bpf_sock_ops, rate_delivered):
9419                 SOCK_OPS_GET_TCP_SOCK_FIELD(rate_delivered);
9420                 break;
9421         case offsetof(struct bpf_sock_ops, rate_interval_us):
9422                 SOCK_OPS_GET_TCP_SOCK_FIELD(rate_interval_us);
9423                 break;
9424         case offsetof(struct bpf_sock_ops, packets_out):
9425                 SOCK_OPS_GET_TCP_SOCK_FIELD(packets_out);
9426                 break;
9427         case offsetof(struct bpf_sock_ops, retrans_out):
9428                 SOCK_OPS_GET_TCP_SOCK_FIELD(retrans_out);
9429                 break;
9430         case offsetof(struct bpf_sock_ops, total_retrans):
9431                 SOCK_OPS_GET_TCP_SOCK_FIELD(total_retrans);
9432                 break;
9433         case offsetof(struct bpf_sock_ops, segs_in):
9434                 SOCK_OPS_GET_TCP_SOCK_FIELD(segs_in);
9435                 break;
9436         case offsetof(struct bpf_sock_ops, data_segs_in):
9437                 SOCK_OPS_GET_TCP_SOCK_FIELD(data_segs_in);
9438                 break;
9439         case offsetof(struct bpf_sock_ops, segs_out):
9440                 SOCK_OPS_GET_TCP_SOCK_FIELD(segs_out);
9441                 break;
9442         case offsetof(struct bpf_sock_ops, data_segs_out):
9443                 SOCK_OPS_GET_TCP_SOCK_FIELD(data_segs_out);
9444                 break;
9445         case offsetof(struct bpf_sock_ops, lost_out):
9446                 SOCK_OPS_GET_TCP_SOCK_FIELD(lost_out);
9447                 break;
9448         case offsetof(struct bpf_sock_ops, sacked_out):
9449                 SOCK_OPS_GET_TCP_SOCK_FIELD(sacked_out);
9450                 break;
9451         case offsetof(struct bpf_sock_ops, bytes_received):
9452                 SOCK_OPS_GET_TCP_SOCK_FIELD(bytes_received);
9453                 break;
9454         case offsetof(struct bpf_sock_ops, bytes_acked):
9455                 SOCK_OPS_GET_TCP_SOCK_FIELD(bytes_acked);
9456                 break;
9457         case offsetof(struct bpf_sock_ops, sk):
9458                 SOCK_OPS_GET_SK();
9459                 break;
9460         case offsetof(struct bpf_sock_ops, skb_data_end):
9461                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sock_ops_kern,
9462                                                        skb_data_end),
9463                                       si->dst_reg, si->src_reg,
9464                                       offsetof(struct bpf_sock_ops_kern,
9465                                                skb_data_end));
9466                 break;
9467         case offsetof(struct bpf_sock_ops, skb_data):
9468                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sock_ops_kern,
9469                                                        skb),
9470                                       si->dst_reg, si->src_reg,
9471                                       offsetof(struct bpf_sock_ops_kern,
9472                                                skb));
9473                 *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1);
9474                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, data),
9475                                       si->dst_reg, si->dst_reg,
9476                                       offsetof(struct sk_buff, data));
9477                 break;
9478         case offsetof(struct bpf_sock_ops, skb_len):
9479                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sock_ops_kern,
9480                                                        skb),
9481                                       si->dst_reg, si->src_reg,
9482                                       offsetof(struct bpf_sock_ops_kern,
9483                                                skb));
9484                 *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1);
9485                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, len),
9486                                       si->dst_reg, si->dst_reg,
9487                                       offsetof(struct sk_buff, len));
9488                 break;
9489         case offsetof(struct bpf_sock_ops, skb_tcp_flags):
9490                 off = offsetof(struct sk_buff, cb);
9491                 off += offsetof(struct tcp_skb_cb, tcp_flags);
9492                 *target_size = sizeof_field(struct tcp_skb_cb, tcp_flags);
9493                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sock_ops_kern,
9494                                                        skb),
9495                                       si->dst_reg, si->src_reg,
9496                                       offsetof(struct bpf_sock_ops_kern,
9497                                                skb));
9498                 *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1);
9499                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct tcp_skb_cb,
9500                                                        tcp_flags),
9501                                       si->dst_reg, si->dst_reg, off);
9502                 break;
9503         }
9504         return insn - insn_buf;
9505 }
9506
9507 static u32 sk_skb_convert_ctx_access(enum bpf_access_type type,
9508                                      const struct bpf_insn *si,
9509                                      struct bpf_insn *insn_buf,
9510                                      struct bpf_prog *prog, u32 *target_size)
9511 {
9512         struct bpf_insn *insn = insn_buf;
9513         int off;
9514
9515         switch (si->off) {
9516         case offsetof(struct __sk_buff, data_end):
9517                 off  = si->off;
9518                 off -= offsetof(struct __sk_buff, data_end);
9519                 off += offsetof(struct sk_buff, cb);
9520                 off += offsetof(struct tcp_skb_cb, bpf.data_end);
9521                 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg,
9522                                       si->src_reg, off);
9523                 break;
9524         default:
9525                 return bpf_convert_ctx_access(type, si, insn_buf, prog,
9526                                               target_size);
9527         }
9528
9529         return insn - insn_buf;
9530 }
9531
9532 static u32 sk_msg_convert_ctx_access(enum bpf_access_type type,
9533                                      const struct bpf_insn *si,
9534                                      struct bpf_insn *insn_buf,
9535                                      struct bpf_prog *prog, u32 *target_size)
9536 {
9537         struct bpf_insn *insn = insn_buf;
9538 #if IS_ENABLED(CONFIG_IPV6)
9539         int off;
9540 #endif
9541
9542         /* convert ctx uses the fact sg element is first in struct */
9543         BUILD_BUG_ON(offsetof(struct sk_msg, sg) != 0);
9544
9545         switch (si->off) {
9546         case offsetof(struct sk_msg_md, data):
9547                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_msg, data),
9548                                       si->dst_reg, si->src_reg,
9549                                       offsetof(struct sk_msg, data));
9550                 break;
9551         case offsetof(struct sk_msg_md, data_end):
9552                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_msg, data_end),
9553                                       si->dst_reg, si->src_reg,
9554                                       offsetof(struct sk_msg, data_end));
9555                 break;
9556         case offsetof(struct sk_msg_md, family):
9557                 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_family) != 2);
9558
9559                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
9560                                               struct sk_msg, sk),
9561                                       si->dst_reg, si->src_reg,
9562                                       offsetof(struct sk_msg, sk));
9563                 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
9564                                       offsetof(struct sock_common, skc_family));
9565                 break;
9566
9567         case offsetof(struct sk_msg_md, remote_ip4):
9568                 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_daddr) != 4);
9569
9570                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
9571                                                 struct sk_msg, sk),
9572                                       si->dst_reg, si->src_reg,
9573                                       offsetof(struct sk_msg, sk));
9574                 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
9575                                       offsetof(struct sock_common, skc_daddr));
9576                 break;
9577
9578         case offsetof(struct sk_msg_md, local_ip4):
9579                 BUILD_BUG_ON(sizeof_field(struct sock_common,
9580                                           skc_rcv_saddr) != 4);
9581
9582                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
9583                                               struct sk_msg, sk),
9584                                       si->dst_reg, si->src_reg,
9585                                       offsetof(struct sk_msg, sk));
9586                 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
9587                                       offsetof(struct sock_common,
9588                                                skc_rcv_saddr));
9589                 break;
9590
9591         case offsetof(struct sk_msg_md, remote_ip6[0]) ...
9592              offsetof(struct sk_msg_md, remote_ip6[3]):
9593 #if IS_ENABLED(CONFIG_IPV6)
9594                 BUILD_BUG_ON(sizeof_field(struct sock_common,
9595                                           skc_v6_daddr.s6_addr32[0]) != 4);
9596
9597                 off = si->off;
9598                 off -= offsetof(struct sk_msg_md, remote_ip6[0]);
9599                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
9600                                                 struct sk_msg, sk),
9601                                       si->dst_reg, si->src_reg,
9602                                       offsetof(struct sk_msg, sk));
9603                 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
9604                                       offsetof(struct sock_common,
9605                                                skc_v6_daddr.s6_addr32[0]) +
9606                                       off);
9607 #else
9608                 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
9609 #endif
9610                 break;
9611
9612         case offsetof(struct sk_msg_md, local_ip6[0]) ...
9613              offsetof(struct sk_msg_md, local_ip6[3]):
9614 #if IS_ENABLED(CONFIG_IPV6)
9615                 BUILD_BUG_ON(sizeof_field(struct sock_common,
9616                                           skc_v6_rcv_saddr.s6_addr32[0]) != 4);
9617
9618                 off = si->off;
9619                 off -= offsetof(struct sk_msg_md, local_ip6[0]);
9620                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
9621                                                 struct sk_msg, sk),
9622                                       si->dst_reg, si->src_reg,
9623                                       offsetof(struct sk_msg, sk));
9624                 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
9625                                       offsetof(struct sock_common,
9626                                                skc_v6_rcv_saddr.s6_addr32[0]) +
9627                                       off);
9628 #else
9629                 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
9630 #endif
9631                 break;
9632
9633         case offsetof(struct sk_msg_md, remote_port):
9634                 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_dport) != 2);
9635
9636                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
9637                                                 struct sk_msg, sk),
9638                                       si->dst_reg, si->src_reg,
9639                                       offsetof(struct sk_msg, sk));
9640                 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
9641                                       offsetof(struct sock_common, skc_dport));
9642 #ifndef __BIG_ENDIAN_BITFIELD
9643                 *insn++ = BPF_ALU32_IMM(BPF_LSH, si->dst_reg, 16);
9644 #endif
9645                 break;
9646
9647         case offsetof(struct sk_msg_md, local_port):
9648                 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_num) != 2);
9649
9650                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
9651                                                 struct sk_msg, sk),
9652                                       si->dst_reg, si->src_reg,
9653                                       offsetof(struct sk_msg, sk));
9654                 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
9655                                       offsetof(struct sock_common, skc_num));
9656                 break;
9657
9658         case offsetof(struct sk_msg_md, size):
9659                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_msg_sg, size),
9660                                       si->dst_reg, si->src_reg,
9661                                       offsetof(struct sk_msg_sg, size));
9662                 break;
9663
9664         case offsetof(struct sk_msg_md, sk):
9665                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_msg, sk),
9666                                       si->dst_reg, si->src_reg,
9667                                       offsetof(struct sk_msg, sk));
9668                 break;
9669         }
9670
9671         return insn - insn_buf;
9672 }
9673
9674 const struct bpf_verifier_ops sk_filter_verifier_ops = {
9675         .get_func_proto         = sk_filter_func_proto,
9676         .is_valid_access        = sk_filter_is_valid_access,
9677         .convert_ctx_access     = bpf_convert_ctx_access,
9678         .gen_ld_abs             = bpf_gen_ld_abs,
9679 };
9680
9681 const struct bpf_prog_ops sk_filter_prog_ops = {
9682         .test_run               = bpf_prog_test_run_skb,
9683 };
9684
9685 const struct bpf_verifier_ops tc_cls_act_verifier_ops = {
9686         .get_func_proto         = tc_cls_act_func_proto,
9687         .is_valid_access        = tc_cls_act_is_valid_access,
9688         .convert_ctx_access     = tc_cls_act_convert_ctx_access,
9689         .gen_prologue           = tc_cls_act_prologue,
9690         .gen_ld_abs             = bpf_gen_ld_abs,
9691 };
9692
9693 const struct bpf_prog_ops tc_cls_act_prog_ops = {
9694         .test_run               = bpf_prog_test_run_skb,
9695 };
9696
9697 const struct bpf_verifier_ops xdp_verifier_ops = {
9698         .get_func_proto         = xdp_func_proto,
9699         .is_valid_access        = xdp_is_valid_access,
9700         .convert_ctx_access     = xdp_convert_ctx_access,
9701         .gen_prologue           = bpf_noop_prologue,
9702 };
9703
9704 const struct bpf_prog_ops xdp_prog_ops = {
9705         .test_run               = bpf_prog_test_run_xdp,
9706 };
9707
9708 const struct bpf_verifier_ops cg_skb_verifier_ops = {
9709         .get_func_proto         = cg_skb_func_proto,
9710         .is_valid_access        = cg_skb_is_valid_access,
9711         .convert_ctx_access     = bpf_convert_ctx_access,
9712 };
9713
9714 const struct bpf_prog_ops cg_skb_prog_ops = {
9715         .test_run               = bpf_prog_test_run_skb,
9716 };
9717
9718 const struct bpf_verifier_ops lwt_in_verifier_ops = {
9719         .get_func_proto         = lwt_in_func_proto,
9720         .is_valid_access        = lwt_is_valid_access,
9721         .convert_ctx_access     = bpf_convert_ctx_access,
9722 };
9723
9724 const struct bpf_prog_ops lwt_in_prog_ops = {
9725         .test_run               = bpf_prog_test_run_skb,
9726 };
9727
9728 const struct bpf_verifier_ops lwt_out_verifier_ops = {
9729         .get_func_proto         = lwt_out_func_proto,
9730         .is_valid_access        = lwt_is_valid_access,
9731         .convert_ctx_access     = bpf_convert_ctx_access,
9732 };
9733
9734 const struct bpf_prog_ops lwt_out_prog_ops = {
9735         .test_run               = bpf_prog_test_run_skb,
9736 };
9737
9738 const struct bpf_verifier_ops lwt_xmit_verifier_ops = {
9739         .get_func_proto         = lwt_xmit_func_proto,
9740         .is_valid_access        = lwt_is_valid_access,
9741         .convert_ctx_access     = bpf_convert_ctx_access,
9742         .gen_prologue           = tc_cls_act_prologue,
9743 };
9744
9745 const struct bpf_prog_ops lwt_xmit_prog_ops = {
9746         .test_run               = bpf_prog_test_run_skb,
9747 };
9748
9749 const struct bpf_verifier_ops lwt_seg6local_verifier_ops = {
9750         .get_func_proto         = lwt_seg6local_func_proto,
9751         .is_valid_access        = lwt_is_valid_access,
9752         .convert_ctx_access     = bpf_convert_ctx_access,
9753 };
9754
9755 const struct bpf_prog_ops lwt_seg6local_prog_ops = {
9756         .test_run               = bpf_prog_test_run_skb,
9757 };
9758
9759 const struct bpf_verifier_ops cg_sock_verifier_ops = {
9760         .get_func_proto         = sock_filter_func_proto,
9761         .is_valid_access        = sock_filter_is_valid_access,
9762         .convert_ctx_access     = bpf_sock_convert_ctx_access,
9763 };
9764
9765 const struct bpf_prog_ops cg_sock_prog_ops = {
9766 };
9767
9768 const struct bpf_verifier_ops cg_sock_addr_verifier_ops = {
9769         .get_func_proto         = sock_addr_func_proto,
9770         .is_valid_access        = sock_addr_is_valid_access,
9771         .convert_ctx_access     = sock_addr_convert_ctx_access,
9772 };
9773
9774 const struct bpf_prog_ops cg_sock_addr_prog_ops = {
9775 };
9776
9777 const struct bpf_verifier_ops sock_ops_verifier_ops = {
9778         .get_func_proto         = sock_ops_func_proto,
9779         .is_valid_access        = sock_ops_is_valid_access,
9780         .convert_ctx_access     = sock_ops_convert_ctx_access,
9781 };
9782
9783 const struct bpf_prog_ops sock_ops_prog_ops = {
9784 };
9785
9786 const struct bpf_verifier_ops sk_skb_verifier_ops = {
9787         .get_func_proto         = sk_skb_func_proto,
9788         .is_valid_access        = sk_skb_is_valid_access,
9789         .convert_ctx_access     = sk_skb_convert_ctx_access,
9790         .gen_prologue           = sk_skb_prologue,
9791 };
9792
9793 const struct bpf_prog_ops sk_skb_prog_ops = {
9794 };
9795
9796 const struct bpf_verifier_ops sk_msg_verifier_ops = {
9797         .get_func_proto         = sk_msg_func_proto,
9798         .is_valid_access        = sk_msg_is_valid_access,
9799         .convert_ctx_access     = sk_msg_convert_ctx_access,
9800         .gen_prologue           = bpf_noop_prologue,
9801 };
9802
9803 const struct bpf_prog_ops sk_msg_prog_ops = {
9804 };
9805
9806 const struct bpf_verifier_ops flow_dissector_verifier_ops = {
9807         .get_func_proto         = flow_dissector_func_proto,
9808         .is_valid_access        = flow_dissector_is_valid_access,
9809         .convert_ctx_access     = flow_dissector_convert_ctx_access,
9810 };
9811
9812 const struct bpf_prog_ops flow_dissector_prog_ops = {
9813         .test_run               = bpf_prog_test_run_flow_dissector,
9814 };
9815
9816 int sk_detach_filter(struct sock *sk)
9817 {
9818         int ret = -ENOENT;
9819         struct sk_filter *filter;
9820
9821         if (sock_flag(sk, SOCK_FILTER_LOCKED))
9822                 return -EPERM;
9823
9824         filter = rcu_dereference_protected(sk->sk_filter,
9825                                            lockdep_sock_is_held(sk));
9826         if (filter) {
9827                 RCU_INIT_POINTER(sk->sk_filter, NULL);
9828                 sk_filter_uncharge(sk, filter);
9829                 ret = 0;
9830         }
9831
9832         return ret;
9833 }
9834 EXPORT_SYMBOL_GPL(sk_detach_filter);
9835
9836 int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf,
9837                   unsigned int len)
9838 {
9839         struct sock_fprog_kern *fprog;
9840         struct sk_filter *filter;
9841         int ret = 0;
9842
9843         lock_sock(sk);
9844         filter = rcu_dereference_protected(sk->sk_filter,
9845                                            lockdep_sock_is_held(sk));
9846         if (!filter)
9847                 goto out;
9848
9849         /* We're copying the filter that has been originally attached,
9850          * so no conversion/decode needed anymore. eBPF programs that
9851          * have no original program cannot be dumped through this.
9852          */
9853         ret = -EACCES;
9854         fprog = filter->prog->orig_prog;
9855         if (!fprog)
9856                 goto out;
9857
9858         ret = fprog->len;
9859         if (!len)
9860                 /* User space only enquires number of filter blocks. */
9861                 goto out;
9862
9863         ret = -EINVAL;
9864         if (len < fprog->len)
9865                 goto out;
9866
9867         ret = -EFAULT;
9868         if (copy_to_user(ubuf, fprog->filter, bpf_classic_proglen(fprog)))
9869                 goto out;
9870
9871         /* Instead of bytes, the API requests to return the number
9872          * of filter blocks.
9873          */
9874         ret = fprog->len;
9875 out:
9876         release_sock(sk);
9877         return ret;
9878 }
9879
9880 #ifdef CONFIG_INET
9881 static void bpf_init_reuseport_kern(struct sk_reuseport_kern *reuse_kern,
9882                                     struct sock_reuseport *reuse,
9883                                     struct sock *sk, struct sk_buff *skb,
9884                                     u32 hash)
9885 {
9886         reuse_kern->skb = skb;
9887         reuse_kern->sk = sk;
9888         reuse_kern->selected_sk = NULL;
9889         reuse_kern->data_end = skb->data + skb_headlen(skb);
9890         reuse_kern->hash = hash;
9891         reuse_kern->reuseport_id = reuse->reuseport_id;
9892         reuse_kern->bind_inany = reuse->bind_inany;
9893 }
9894
9895 struct sock *bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk,
9896                                   struct bpf_prog *prog, struct sk_buff *skb,
9897                                   u32 hash)
9898 {
9899         struct sk_reuseport_kern reuse_kern;
9900         enum sk_action action;
9901
9902         bpf_init_reuseport_kern(&reuse_kern, reuse, sk, skb, hash);
9903         action = BPF_PROG_RUN(prog, &reuse_kern);
9904
9905         if (action == SK_PASS)
9906                 return reuse_kern.selected_sk;
9907         else
9908                 return ERR_PTR(-ECONNREFUSED);
9909 }
9910
9911 BPF_CALL_4(sk_select_reuseport, struct sk_reuseport_kern *, reuse_kern,
9912            struct bpf_map *, map, void *, key, u32, flags)
9913 {
9914         bool is_sockarray = map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY;
9915         struct sock_reuseport *reuse;
9916         struct sock *selected_sk;
9917
9918         selected_sk = map->ops->map_lookup_elem(map, key);
9919         if (!selected_sk)
9920                 return -ENOENT;
9921
9922         reuse = rcu_dereference(selected_sk->sk_reuseport_cb);
9923         if (!reuse) {
9924                 /* Lookup in sock_map can return TCP ESTABLISHED sockets. */
9925                 if (sk_is_refcounted(selected_sk))
9926                         sock_put(selected_sk);
9927
9928                 /* reuseport_array has only sk with non NULL sk_reuseport_cb.
9929                  * The only (!reuse) case here is - the sk has already been
9930                  * unhashed (e.g. by close()), so treat it as -ENOENT.
9931                  *
9932                  * Other maps (e.g. sock_map) do not provide this guarantee and
9933                  * the sk may never be in the reuseport group to begin with.
9934                  */
9935                 return is_sockarray ? -ENOENT : -EINVAL;
9936         }
9937
9938         if (unlikely(reuse->reuseport_id != reuse_kern->reuseport_id)) {
9939                 struct sock *sk = reuse_kern->sk;
9940
9941                 if (sk->sk_protocol != selected_sk->sk_protocol)
9942                         return -EPROTOTYPE;
9943                 else if (sk->sk_family != selected_sk->sk_family)
9944                         return -EAFNOSUPPORT;
9945
9946                 /* Catch all. Likely bound to a different sockaddr. */
9947                 return -EBADFD;
9948         }
9949
9950         reuse_kern->selected_sk = selected_sk;
9951
9952         return 0;
9953 }
9954
9955 static const struct bpf_func_proto sk_select_reuseport_proto = {
9956         .func           = sk_select_reuseport,
9957         .gpl_only       = false,
9958         .ret_type       = RET_INTEGER,
9959         .arg1_type      = ARG_PTR_TO_CTX,
9960         .arg2_type      = ARG_CONST_MAP_PTR,
9961         .arg3_type      = ARG_PTR_TO_MAP_KEY,
9962         .arg4_type      = ARG_ANYTHING,
9963 };
9964
9965 BPF_CALL_4(sk_reuseport_load_bytes,
9966            const struct sk_reuseport_kern *, reuse_kern, u32, offset,
9967            void *, to, u32, len)
9968 {
9969         return ____bpf_skb_load_bytes(reuse_kern->skb, offset, to, len);
9970 }
9971
9972 static const struct bpf_func_proto sk_reuseport_load_bytes_proto = {
9973         .func           = sk_reuseport_load_bytes,
9974         .gpl_only       = false,
9975         .ret_type       = RET_INTEGER,
9976         .arg1_type      = ARG_PTR_TO_CTX,
9977         .arg2_type      = ARG_ANYTHING,
9978         .arg3_type      = ARG_PTR_TO_UNINIT_MEM,
9979         .arg4_type      = ARG_CONST_SIZE,
9980 };
9981
9982 BPF_CALL_5(sk_reuseport_load_bytes_relative,
9983            const struct sk_reuseport_kern *, reuse_kern, u32, offset,
9984            void *, to, u32, len, u32, start_header)
9985 {
9986         return ____bpf_skb_load_bytes_relative(reuse_kern->skb, offset, to,
9987                                                len, start_header);
9988 }
9989
9990 static const struct bpf_func_proto sk_reuseport_load_bytes_relative_proto = {
9991         .func           = sk_reuseport_load_bytes_relative,
9992         .gpl_only       = false,
9993         .ret_type       = RET_INTEGER,
9994         .arg1_type      = ARG_PTR_TO_CTX,
9995         .arg2_type      = ARG_ANYTHING,
9996         .arg3_type      = ARG_PTR_TO_UNINIT_MEM,
9997         .arg4_type      = ARG_CONST_SIZE,
9998         .arg5_type      = ARG_ANYTHING,
9999 };
10000
10001 static const struct bpf_func_proto *
10002 sk_reuseport_func_proto(enum bpf_func_id func_id,
10003                         const struct bpf_prog *prog)
10004 {
10005         switch (func_id) {
10006         case BPF_FUNC_sk_select_reuseport:
10007                 return &sk_select_reuseport_proto;
10008         case BPF_FUNC_skb_load_bytes:
10009                 return &sk_reuseport_load_bytes_proto;
10010         case BPF_FUNC_skb_load_bytes_relative:
10011                 return &sk_reuseport_load_bytes_relative_proto;
10012         default:
10013                 return bpf_base_func_proto(func_id);
10014         }
10015 }
10016
10017 static bool
10018 sk_reuseport_is_valid_access(int off, int size,
10019                              enum bpf_access_type type,
10020                              const struct bpf_prog *prog,
10021                              struct bpf_insn_access_aux *info)
10022 {
10023         const u32 size_default = sizeof(__u32);
10024
10025         if (off < 0 || off >= sizeof(struct sk_reuseport_md) ||
10026             off % size || type != BPF_READ)
10027                 return false;
10028
10029         switch (off) {
10030         case offsetof(struct sk_reuseport_md, data):
10031                 info->reg_type = PTR_TO_PACKET;
10032                 return size == sizeof(__u64);
10033
10034         case offsetof(struct sk_reuseport_md, data_end):
10035                 info->reg_type = PTR_TO_PACKET_END;
10036                 return size == sizeof(__u64);
10037
10038         case offsetof(struct sk_reuseport_md, hash):
10039                 return size == size_default;
10040
10041         /* Fields that allow narrowing */
10042         case bpf_ctx_range(struct sk_reuseport_md, eth_protocol):
10043                 if (size < sizeof_field(struct sk_buff, protocol))
10044                         return false;
10045                 fallthrough;
10046         case bpf_ctx_range(struct sk_reuseport_md, ip_protocol):
10047         case bpf_ctx_range(struct sk_reuseport_md, bind_inany):
10048         case bpf_ctx_range(struct sk_reuseport_md, len):
10049                 bpf_ctx_record_field_size(info, size_default);
10050                 return bpf_ctx_narrow_access_ok(off, size, size_default);
10051
10052         default:
10053                 return false;
10054         }
10055 }
10056
10057 #define SK_REUSEPORT_LOAD_FIELD(F) ({                                   \
10058         *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_reuseport_kern, F), \
10059                               si->dst_reg, si->src_reg,                 \
10060                               bpf_target_off(struct sk_reuseport_kern, F, \
10061                                              sizeof_field(struct sk_reuseport_kern, F), \
10062                                              target_size));             \
10063         })
10064
10065 #define SK_REUSEPORT_LOAD_SKB_FIELD(SKB_FIELD)                          \
10066         SOCK_ADDR_LOAD_NESTED_FIELD(struct sk_reuseport_kern,           \
10067                                     struct sk_buff,                     \
10068                                     skb,                                \
10069                                     SKB_FIELD)
10070
10071 #define SK_REUSEPORT_LOAD_SK_FIELD(SK_FIELD)                            \
10072         SOCK_ADDR_LOAD_NESTED_FIELD(struct sk_reuseport_kern,           \
10073                                     struct sock,                        \
10074                                     sk,                                 \
10075                                     SK_FIELD)
10076
10077 static u32 sk_reuseport_convert_ctx_access(enum bpf_access_type type,
10078                                            const struct bpf_insn *si,
10079                                            struct bpf_insn *insn_buf,
10080                                            struct bpf_prog *prog,
10081                                            u32 *target_size)
10082 {
10083         struct bpf_insn *insn = insn_buf;
10084
10085         switch (si->off) {
10086         case offsetof(struct sk_reuseport_md, data):
10087                 SK_REUSEPORT_LOAD_SKB_FIELD(data);
10088                 break;
10089
10090         case offsetof(struct sk_reuseport_md, len):
10091                 SK_REUSEPORT_LOAD_SKB_FIELD(len);
10092                 break;
10093
10094         case offsetof(struct sk_reuseport_md, eth_protocol):
10095                 SK_REUSEPORT_LOAD_SKB_FIELD(protocol);
10096                 break;
10097
10098         case offsetof(struct sk_reuseport_md, ip_protocol):
10099                 SK_REUSEPORT_LOAD_SK_FIELD(sk_protocol);
10100                 break;
10101
10102         case offsetof(struct sk_reuseport_md, data_end):
10103                 SK_REUSEPORT_LOAD_FIELD(data_end);
10104                 break;
10105
10106         case offsetof(struct sk_reuseport_md, hash):
10107                 SK_REUSEPORT_LOAD_FIELD(hash);
10108                 break;
10109
10110         case offsetof(struct sk_reuseport_md, bind_inany):
10111                 SK_REUSEPORT_LOAD_FIELD(bind_inany);
10112                 break;
10113         }
10114
10115         return insn - insn_buf;
10116 }
10117
10118 const struct bpf_verifier_ops sk_reuseport_verifier_ops = {
10119         .get_func_proto         = sk_reuseport_func_proto,
10120         .is_valid_access        = sk_reuseport_is_valid_access,
10121         .convert_ctx_access     = sk_reuseport_convert_ctx_access,
10122 };
10123
10124 const struct bpf_prog_ops sk_reuseport_prog_ops = {
10125 };
10126
10127 DEFINE_STATIC_KEY_FALSE(bpf_sk_lookup_enabled);
10128 EXPORT_SYMBOL(bpf_sk_lookup_enabled);
10129
10130 BPF_CALL_3(bpf_sk_lookup_assign, struct bpf_sk_lookup_kern *, ctx,
10131            struct sock *, sk, u64, flags)
10132 {
10133         if (unlikely(flags & ~(BPF_SK_LOOKUP_F_REPLACE |
10134                                BPF_SK_LOOKUP_F_NO_REUSEPORT)))
10135                 return -EINVAL;
10136         if (unlikely(sk && sk_is_refcounted(sk)))
10137                 return -ESOCKTNOSUPPORT; /* reject non-RCU freed sockets */
10138         if (unlikely(sk && sk->sk_state == TCP_ESTABLISHED))
10139                 return -ESOCKTNOSUPPORT; /* reject connected sockets */
10140
10141         /* Check if socket is suitable for packet L3/L4 protocol */
10142         if (sk && sk->sk_protocol != ctx->protocol)
10143                 return -EPROTOTYPE;
10144         if (sk && sk->sk_family != ctx->family &&
10145             (sk->sk_family == AF_INET || ipv6_only_sock(sk)))
10146                 return -EAFNOSUPPORT;
10147
10148         if (ctx->selected_sk && !(flags & BPF_SK_LOOKUP_F_REPLACE))
10149                 return -EEXIST;
10150
10151         /* Select socket as lookup result */
10152         ctx->selected_sk = sk;
10153         ctx->no_reuseport = flags & BPF_SK_LOOKUP_F_NO_REUSEPORT;
10154         return 0;
10155 }
10156
10157 static const struct bpf_func_proto bpf_sk_lookup_assign_proto = {
10158         .func           = bpf_sk_lookup_assign,
10159         .gpl_only       = false,
10160         .ret_type       = RET_INTEGER,
10161         .arg1_type      = ARG_PTR_TO_CTX,
10162         .arg2_type      = ARG_PTR_TO_SOCKET_OR_NULL,
10163         .arg3_type      = ARG_ANYTHING,
10164 };
10165
10166 static const struct bpf_func_proto *
10167 sk_lookup_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
10168 {
10169         switch (func_id) {
10170         case BPF_FUNC_perf_event_output:
10171                 return &bpf_event_output_data_proto;
10172         case BPF_FUNC_sk_assign:
10173                 return &bpf_sk_lookup_assign_proto;
10174         case BPF_FUNC_sk_release:
10175                 return &bpf_sk_release_proto;
10176         default:
10177                 return bpf_sk_base_func_proto(func_id);
10178         }
10179 }
10180
10181 static bool sk_lookup_is_valid_access(int off, int size,
10182                                       enum bpf_access_type type,
10183                                       const struct bpf_prog *prog,
10184                                       struct bpf_insn_access_aux *info)
10185 {
10186         if (off < 0 || off >= sizeof(struct bpf_sk_lookup))
10187                 return false;
10188         if (off % size != 0)
10189                 return false;
10190         if (type != BPF_READ)
10191                 return false;
10192
10193         switch (off) {
10194         case offsetof(struct bpf_sk_lookup, sk):
10195                 info->reg_type = PTR_TO_SOCKET_OR_NULL;
10196                 return size == sizeof(__u64);
10197
10198         case bpf_ctx_range(struct bpf_sk_lookup, family):
10199         case bpf_ctx_range(struct bpf_sk_lookup, protocol):
10200         case bpf_ctx_range(struct bpf_sk_lookup, remote_ip4):
10201         case bpf_ctx_range(struct bpf_sk_lookup, local_ip4):
10202         case bpf_ctx_range_till(struct bpf_sk_lookup, remote_ip6[0], remote_ip6[3]):
10203         case bpf_ctx_range_till(struct bpf_sk_lookup, local_ip6[0], local_ip6[3]):
10204         case bpf_ctx_range(struct bpf_sk_lookup, remote_port):
10205         case bpf_ctx_range(struct bpf_sk_lookup, local_port):
10206                 bpf_ctx_record_field_size(info, sizeof(__u32));
10207                 return bpf_ctx_narrow_access_ok(off, size, sizeof(__u32));
10208
10209         default:
10210                 return false;
10211         }
10212 }
10213
10214 static u32 sk_lookup_convert_ctx_access(enum bpf_access_type type,
10215                                         const struct bpf_insn *si,
10216                                         struct bpf_insn *insn_buf,
10217                                         struct bpf_prog *prog,
10218                                         u32 *target_size)
10219 {
10220         struct bpf_insn *insn = insn_buf;
10221
10222         switch (si->off) {
10223         case offsetof(struct bpf_sk_lookup, sk):
10224                 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg, si->src_reg,
10225                                       offsetof(struct bpf_sk_lookup_kern, selected_sk));
10226                 break;
10227
10228         case offsetof(struct bpf_sk_lookup, family):
10229                 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
10230                                       bpf_target_off(struct bpf_sk_lookup_kern,
10231                                                      family, 2, target_size));
10232                 break;
10233
10234         case offsetof(struct bpf_sk_lookup, protocol):
10235                 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
10236                                       bpf_target_off(struct bpf_sk_lookup_kern,
10237                                                      protocol, 2, target_size));
10238                 break;
10239
10240         case offsetof(struct bpf_sk_lookup, remote_ip4):
10241                 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
10242                                       bpf_target_off(struct bpf_sk_lookup_kern,
10243                                                      v4.saddr, 4, target_size));
10244                 break;
10245
10246         case offsetof(struct bpf_sk_lookup, local_ip4):
10247                 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
10248                                       bpf_target_off(struct bpf_sk_lookup_kern,
10249                                                      v4.daddr, 4, target_size));
10250                 break;
10251
10252         case bpf_ctx_range_till(struct bpf_sk_lookup,
10253                                 remote_ip6[0], remote_ip6[3]): {
10254 #if IS_ENABLED(CONFIG_IPV6)
10255                 int off = si->off;
10256
10257                 off -= offsetof(struct bpf_sk_lookup, remote_ip6[0]);
10258                 off += bpf_target_off(struct in6_addr, s6_addr32[0], 4, target_size);
10259                 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg, si->src_reg,
10260                                       offsetof(struct bpf_sk_lookup_kern, v6.saddr));
10261                 *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1);
10262                 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, off);
10263 #else
10264                 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
10265 #endif
10266                 break;
10267         }
10268         case bpf_ctx_range_till(struct bpf_sk_lookup,
10269                                 local_ip6[0], local_ip6[3]): {
10270 #if IS_ENABLED(CONFIG_IPV6)
10271                 int off = si->off;
10272
10273                 off -= offsetof(struct bpf_sk_lookup, local_ip6[0]);
10274                 off += bpf_target_off(struct in6_addr, s6_addr32[0], 4, target_size);
10275                 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg, si->src_reg,
10276                                       offsetof(struct bpf_sk_lookup_kern, v6.daddr));
10277                 *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1);
10278                 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, off);
10279 #else
10280                 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
10281 #endif
10282                 break;
10283         }
10284         case offsetof(struct bpf_sk_lookup, remote_port):
10285                 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
10286                                       bpf_target_off(struct bpf_sk_lookup_kern,
10287                                                      sport, 2, target_size));
10288                 break;
10289
10290         case offsetof(struct bpf_sk_lookup, local_port):
10291                 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
10292                                       bpf_target_off(struct bpf_sk_lookup_kern,
10293                                                      dport, 2, target_size));
10294                 break;
10295         }
10296
10297         return insn - insn_buf;
10298 }
10299
10300 const struct bpf_prog_ops sk_lookup_prog_ops = {
10301 };
10302
10303 const struct bpf_verifier_ops sk_lookup_verifier_ops = {
10304         .get_func_proto         = sk_lookup_func_proto,
10305         .is_valid_access        = sk_lookup_is_valid_access,
10306         .convert_ctx_access     = sk_lookup_convert_ctx_access,
10307 };
10308
10309 #endif /* CONFIG_INET */
10310
10311 DEFINE_BPF_DISPATCHER(xdp)
10312
10313 void bpf_prog_change_xdp(struct bpf_prog *prev_prog, struct bpf_prog *prog)
10314 {
10315         bpf_dispatcher_change_prog(BPF_DISPATCHER_PTR(xdp), prev_prog, prog);
10316 }
10317
10318 #ifdef CONFIG_DEBUG_INFO_BTF
10319 BTF_ID_LIST_GLOBAL(btf_sock_ids)
10320 #define BTF_SOCK_TYPE(name, type) BTF_ID(struct, type)
10321 BTF_SOCK_TYPE_xxx
10322 #undef BTF_SOCK_TYPE
10323 #else
10324 u32 btf_sock_ids[MAX_BTF_SOCK_TYPE];
10325 #endif
10326
10327 BPF_CALL_1(bpf_skc_to_tcp6_sock, struct sock *, sk)
10328 {
10329         /* tcp6_sock type is not generated in dwarf and hence btf,
10330          * trigger an explicit type generation here.
10331          */
10332         BTF_TYPE_EMIT(struct tcp6_sock);
10333         if (sk && sk_fullsock(sk) && sk->sk_protocol == IPPROTO_TCP &&
10334             sk->sk_family == AF_INET6)
10335                 return (unsigned long)sk;
10336
10337         return (unsigned long)NULL;
10338 }
10339
10340 const struct bpf_func_proto bpf_skc_to_tcp6_sock_proto = {
10341         .func                   = bpf_skc_to_tcp6_sock,
10342         .gpl_only               = false,
10343         .ret_type               = RET_PTR_TO_BTF_ID_OR_NULL,
10344         .arg1_type              = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
10345         .ret_btf_id             = &btf_sock_ids[BTF_SOCK_TYPE_TCP6],
10346 };
10347
10348 BPF_CALL_1(bpf_skc_to_tcp_sock, struct sock *, sk)
10349 {
10350         if (sk && sk_fullsock(sk) && sk->sk_protocol == IPPROTO_TCP)
10351                 return (unsigned long)sk;
10352
10353         return (unsigned long)NULL;
10354 }
10355
10356 const struct bpf_func_proto bpf_skc_to_tcp_sock_proto = {
10357         .func                   = bpf_skc_to_tcp_sock,
10358         .gpl_only               = false,
10359         .ret_type               = RET_PTR_TO_BTF_ID_OR_NULL,
10360         .arg1_type              = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
10361         .ret_btf_id             = &btf_sock_ids[BTF_SOCK_TYPE_TCP],
10362 };
10363
10364 BPF_CALL_1(bpf_skc_to_tcp_timewait_sock, struct sock *, sk)
10365 {
10366         /* BTF types for tcp_timewait_sock and inet_timewait_sock are not
10367          * generated if CONFIG_INET=n. Trigger an explicit generation here.
10368          */
10369         BTF_TYPE_EMIT(struct inet_timewait_sock);
10370         BTF_TYPE_EMIT(struct tcp_timewait_sock);
10371
10372 #ifdef CONFIG_INET
10373         if (sk && sk->sk_prot == &tcp_prot && sk->sk_state == TCP_TIME_WAIT)
10374                 return (unsigned long)sk;
10375 #endif
10376
10377 #if IS_BUILTIN(CONFIG_IPV6)
10378         if (sk && sk->sk_prot == &tcpv6_prot && sk->sk_state == TCP_TIME_WAIT)
10379                 return (unsigned long)sk;
10380 #endif
10381
10382         return (unsigned long)NULL;
10383 }
10384
10385 const struct bpf_func_proto bpf_skc_to_tcp_timewait_sock_proto = {
10386         .func                   = bpf_skc_to_tcp_timewait_sock,
10387         .gpl_only               = false,
10388         .ret_type               = RET_PTR_TO_BTF_ID_OR_NULL,
10389         .arg1_type              = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
10390         .ret_btf_id             = &btf_sock_ids[BTF_SOCK_TYPE_TCP_TW],
10391 };
10392
10393 BPF_CALL_1(bpf_skc_to_tcp_request_sock, struct sock *, sk)
10394 {
10395 #ifdef CONFIG_INET
10396         if (sk && sk->sk_prot == &tcp_prot && sk->sk_state == TCP_NEW_SYN_RECV)
10397                 return (unsigned long)sk;
10398 #endif
10399
10400 #if IS_BUILTIN(CONFIG_IPV6)
10401         if (sk && sk->sk_prot == &tcpv6_prot && sk->sk_state == TCP_NEW_SYN_RECV)
10402                 return (unsigned long)sk;
10403 #endif
10404
10405         return (unsigned long)NULL;
10406 }
10407
10408 const struct bpf_func_proto bpf_skc_to_tcp_request_sock_proto = {
10409         .func                   = bpf_skc_to_tcp_request_sock,
10410         .gpl_only               = false,
10411         .ret_type               = RET_PTR_TO_BTF_ID_OR_NULL,
10412         .arg1_type              = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
10413         .ret_btf_id             = &btf_sock_ids[BTF_SOCK_TYPE_TCP_REQ],
10414 };
10415
10416 BPF_CALL_1(bpf_skc_to_udp6_sock, struct sock *, sk)
10417 {
10418         /* udp6_sock type is not generated in dwarf and hence btf,
10419          * trigger an explicit type generation here.
10420          */
10421         BTF_TYPE_EMIT(struct udp6_sock);
10422         if (sk && sk_fullsock(sk) && sk->sk_protocol == IPPROTO_UDP &&
10423             sk->sk_type == SOCK_DGRAM && sk->sk_family == AF_INET6)
10424                 return (unsigned long)sk;
10425
10426         return (unsigned long)NULL;
10427 }
10428
10429 const struct bpf_func_proto bpf_skc_to_udp6_sock_proto = {
10430         .func                   = bpf_skc_to_udp6_sock,
10431         .gpl_only               = false,
10432         .ret_type               = RET_PTR_TO_BTF_ID_OR_NULL,
10433         .arg1_type              = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
10434         .ret_btf_id             = &btf_sock_ids[BTF_SOCK_TYPE_UDP6],
10435 };
10436
10437 BPF_CALL_1(bpf_sock_from_file, struct file *, file)
10438 {
10439         return (unsigned long)sock_from_file(file);
10440 }
10441
10442 BTF_ID_LIST(bpf_sock_from_file_btf_ids)
10443 BTF_ID(struct, socket)
10444 BTF_ID(struct, file)
10445
10446 const struct bpf_func_proto bpf_sock_from_file_proto = {
10447         .func           = bpf_sock_from_file,
10448         .gpl_only       = false,
10449         .ret_type       = RET_PTR_TO_BTF_ID_OR_NULL,
10450         .ret_btf_id     = &bpf_sock_from_file_btf_ids[0],
10451         .arg1_type      = ARG_PTR_TO_BTF_ID,
10452         .arg1_btf_id    = &bpf_sock_from_file_btf_ids[1],
10453 };
10454
10455 static const struct bpf_func_proto *
10456 bpf_sk_base_func_proto(enum bpf_func_id func_id)
10457 {
10458         const struct bpf_func_proto *func;
10459
10460         switch (func_id) {
10461         case BPF_FUNC_skc_to_tcp6_sock:
10462                 func = &bpf_skc_to_tcp6_sock_proto;
10463                 break;
10464         case BPF_FUNC_skc_to_tcp_sock:
10465                 func = &bpf_skc_to_tcp_sock_proto;
10466                 break;
10467         case BPF_FUNC_skc_to_tcp_timewait_sock:
10468                 func = &bpf_skc_to_tcp_timewait_sock_proto;
10469                 break;
10470         case BPF_FUNC_skc_to_tcp_request_sock:
10471                 func = &bpf_skc_to_tcp_request_sock_proto;
10472                 break;
10473         case BPF_FUNC_skc_to_udp6_sock:
10474                 func = &bpf_skc_to_udp6_sock_proto;
10475                 break;
10476         default:
10477                 return bpf_base_func_proto(func_id);
10478         }
10479
10480         if (!perfmon_capable())
10481                 return NULL;
10482
10483         return func;
10484 }