flow_dissector: Dissect tos and ttl from the tunnel info
[sfrench/cifs-2.6.git] / net / core / flow_dissector.c
1 #include <linux/kernel.h>
2 #include <linux/skbuff.h>
3 #include <linux/export.h>
4 #include <linux/ip.h>
5 #include <linux/ipv6.h>
6 #include <linux/if_vlan.h>
7 #include <net/dsa.h>
8 #include <net/dst_metadata.h>
9 #include <net/ip.h>
10 #include <net/ipv6.h>
11 #include <net/gre.h>
12 #include <net/pptp.h>
13 #include <net/tipc.h>
14 #include <linux/igmp.h>
15 #include <linux/icmp.h>
16 #include <linux/sctp.h>
17 #include <linux/dccp.h>
18 #include <linux/if_tunnel.h>
19 #include <linux/if_pppox.h>
20 #include <linux/ppp_defs.h>
21 #include <linux/stddef.h>
22 #include <linux/if_ether.h>
23 #include <linux/mpls.h>
24 #include <linux/tcp.h>
25 #include <net/flow_dissector.h>
26 #include <scsi/fc/fc_fcoe.h>
27 #include <uapi/linux/batadv_packet.h>
28
29 static void dissector_set_key(struct flow_dissector *flow_dissector,
30                               enum flow_dissector_key_id key_id)
31 {
32         flow_dissector->used_keys |= (1 << key_id);
33 }
34
35 void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
36                              const struct flow_dissector_key *key,
37                              unsigned int key_count)
38 {
39         unsigned int i;
40
41         memset(flow_dissector, 0, sizeof(*flow_dissector));
42
43         for (i = 0; i < key_count; i++, key++) {
44                 /* User should make sure that every key target offset is withing
45                  * boundaries of unsigned short.
46                  */
47                 BUG_ON(key->offset > USHRT_MAX);
48                 BUG_ON(dissector_uses_key(flow_dissector,
49                                           key->key_id));
50
51                 dissector_set_key(flow_dissector, key->key_id);
52                 flow_dissector->offset[key->key_id] = key->offset;
53         }
54
55         /* Ensure that the dissector always includes control and basic key.
56          * That way we are able to avoid handling lack of these in fast path.
57          */
58         BUG_ON(!dissector_uses_key(flow_dissector,
59                                    FLOW_DISSECTOR_KEY_CONTROL));
60         BUG_ON(!dissector_uses_key(flow_dissector,
61                                    FLOW_DISSECTOR_KEY_BASIC));
62 }
63 EXPORT_SYMBOL(skb_flow_dissector_init);
64
65 /**
66  * skb_flow_get_be16 - extract be16 entity
67  * @skb: sk_buff to extract from
68  * @poff: offset to extract at
69  * @data: raw buffer pointer to the packet
70  * @hlen: packet header length
71  *
72  * The function will try to retrieve a be32 entity at
73  * offset poff
74  */
75 static __be16 skb_flow_get_be16(const struct sk_buff *skb, int poff,
76                                 void *data, int hlen)
77 {
78         __be16 *u, _u;
79
80         u = __skb_header_pointer(skb, poff, sizeof(_u), data, hlen, &_u);
81         if (u)
82                 return *u;
83
84         return 0;
85 }
86
87 /**
88  * __skb_flow_get_ports - extract the upper layer ports and return them
89  * @skb: sk_buff to extract the ports from
90  * @thoff: transport header offset
91  * @ip_proto: protocol for which to get port offset
92  * @data: raw buffer pointer to the packet, if NULL use skb->data
93  * @hlen: packet header length, if @data is NULL use skb_headlen(skb)
94  *
95  * The function will try to retrieve the ports at offset thoff + poff where poff
96  * is the protocol port offset returned from proto_ports_offset
97  */
98 __be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
99                             void *data, int hlen)
100 {
101         int poff = proto_ports_offset(ip_proto);
102
103         if (!data) {
104                 data = skb->data;
105                 hlen = skb_headlen(skb);
106         }
107
108         if (poff >= 0) {
109                 __be32 *ports, _ports;
110
111                 ports = __skb_header_pointer(skb, thoff + poff,
112                                              sizeof(_ports), data, hlen, &_ports);
113                 if (ports)
114                         return *ports;
115         }
116
117         return 0;
118 }
119 EXPORT_SYMBOL(__skb_flow_get_ports);
120
121 static void
122 skb_flow_dissect_set_enc_addr_type(enum flow_dissector_key_id type,
123                                    struct flow_dissector *flow_dissector,
124                                    void *target_container)
125 {
126         struct flow_dissector_key_control *ctrl;
127
128         if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL))
129                 return;
130
131         ctrl = skb_flow_dissector_target(flow_dissector,
132                                          FLOW_DISSECTOR_KEY_ENC_CONTROL,
133                                          target_container);
134         ctrl->addr_type = type;
135 }
136
137 void
138 skb_flow_dissect_tunnel_info(const struct sk_buff *skb,
139                              struct flow_dissector *flow_dissector,
140                              void *target_container)
141 {
142         struct ip_tunnel_info *info;
143         struct ip_tunnel_key *key;
144
145         /* A quick check to see if there might be something to do. */
146         if (!dissector_uses_key(flow_dissector,
147                                 FLOW_DISSECTOR_KEY_ENC_KEYID) &&
148             !dissector_uses_key(flow_dissector,
149                                 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) &&
150             !dissector_uses_key(flow_dissector,
151                                 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) &&
152             !dissector_uses_key(flow_dissector,
153                                 FLOW_DISSECTOR_KEY_ENC_CONTROL) &&
154             !dissector_uses_key(flow_dissector,
155                                 FLOW_DISSECTOR_KEY_ENC_PORTS) &&
156             !dissector_uses_key(flow_dissector,
157                                 FLOW_DISSECTOR_KEY_ENC_IP))
158                 return;
159
160         info = skb_tunnel_info(skb);
161         if (!info)
162                 return;
163
164         key = &info->key;
165
166         switch (ip_tunnel_info_af(info)) {
167         case AF_INET:
168                 skb_flow_dissect_set_enc_addr_type(FLOW_DISSECTOR_KEY_IPV4_ADDRS,
169                                                    flow_dissector,
170                                                    target_container);
171                 if (dissector_uses_key(flow_dissector,
172                                        FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
173                         struct flow_dissector_key_ipv4_addrs *ipv4;
174
175                         ipv4 = skb_flow_dissector_target(flow_dissector,
176                                                          FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
177                                                          target_container);
178                         ipv4->src = key->u.ipv4.src;
179                         ipv4->dst = key->u.ipv4.dst;
180                 }
181                 break;
182         case AF_INET6:
183                 skb_flow_dissect_set_enc_addr_type(FLOW_DISSECTOR_KEY_IPV6_ADDRS,
184                                                    flow_dissector,
185                                                    target_container);
186                 if (dissector_uses_key(flow_dissector,
187                                        FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) {
188                         struct flow_dissector_key_ipv6_addrs *ipv6;
189
190                         ipv6 = skb_flow_dissector_target(flow_dissector,
191                                                          FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
192                                                          target_container);
193                         ipv6->src = key->u.ipv6.src;
194                         ipv6->dst = key->u.ipv6.dst;
195                 }
196                 break;
197         }
198
199         if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
200                 struct flow_dissector_key_keyid *keyid;
201
202                 keyid = skb_flow_dissector_target(flow_dissector,
203                                                   FLOW_DISSECTOR_KEY_ENC_KEYID,
204                                                   target_container);
205                 keyid->keyid = tunnel_id_to_key32(key->tun_id);
206         }
207
208         if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
209                 struct flow_dissector_key_ports *tp;
210
211                 tp = skb_flow_dissector_target(flow_dissector,
212                                                FLOW_DISSECTOR_KEY_ENC_PORTS,
213                                                target_container);
214                 tp->src = key->tp_src;
215                 tp->dst = key->tp_dst;
216         }
217
218         if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ENC_IP)) {
219                 struct flow_dissector_key_ip *ip;
220
221                 ip = skb_flow_dissector_target(flow_dissector,
222                                                FLOW_DISSECTOR_KEY_ENC_IP,
223                                                target_container);
224                 ip->tos = key->tos;
225                 ip->ttl = key->ttl;
226         }
227 }
228 EXPORT_SYMBOL(skb_flow_dissect_tunnel_info);
229
230 static enum flow_dissect_ret
231 __skb_flow_dissect_mpls(const struct sk_buff *skb,
232                         struct flow_dissector *flow_dissector,
233                         void *target_container, void *data, int nhoff, int hlen)
234 {
235         struct flow_dissector_key_keyid *key_keyid;
236         struct mpls_label *hdr, _hdr[2];
237         u32 entry, label;
238
239         if (!dissector_uses_key(flow_dissector,
240                                 FLOW_DISSECTOR_KEY_MPLS_ENTROPY) &&
241             !dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_MPLS))
242                 return FLOW_DISSECT_RET_OUT_GOOD;
243
244         hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data,
245                                    hlen, &_hdr);
246         if (!hdr)
247                 return FLOW_DISSECT_RET_OUT_BAD;
248
249         entry = ntohl(hdr[0].entry);
250         label = (entry & MPLS_LS_LABEL_MASK) >> MPLS_LS_LABEL_SHIFT;
251
252         if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_MPLS)) {
253                 struct flow_dissector_key_mpls *key_mpls;
254
255                 key_mpls = skb_flow_dissector_target(flow_dissector,
256                                                      FLOW_DISSECTOR_KEY_MPLS,
257                                                      target_container);
258                 key_mpls->mpls_label = label;
259                 key_mpls->mpls_ttl = (entry & MPLS_LS_TTL_MASK)
260                                         >> MPLS_LS_TTL_SHIFT;
261                 key_mpls->mpls_tc = (entry & MPLS_LS_TC_MASK)
262                                         >> MPLS_LS_TC_SHIFT;
263                 key_mpls->mpls_bos = (entry & MPLS_LS_S_MASK)
264                                         >> MPLS_LS_S_SHIFT;
265         }
266
267         if (label == MPLS_LABEL_ENTROPY) {
268                 key_keyid = skb_flow_dissector_target(flow_dissector,
269                                                       FLOW_DISSECTOR_KEY_MPLS_ENTROPY,
270                                                       target_container);
271                 key_keyid->keyid = hdr[1].entry & htonl(MPLS_LS_LABEL_MASK);
272         }
273         return FLOW_DISSECT_RET_OUT_GOOD;
274 }
275
276 static enum flow_dissect_ret
277 __skb_flow_dissect_arp(const struct sk_buff *skb,
278                        struct flow_dissector *flow_dissector,
279                        void *target_container, void *data, int nhoff, int hlen)
280 {
281         struct flow_dissector_key_arp *key_arp;
282         struct {
283                 unsigned char ar_sha[ETH_ALEN];
284                 unsigned char ar_sip[4];
285                 unsigned char ar_tha[ETH_ALEN];
286                 unsigned char ar_tip[4];
287         } *arp_eth, _arp_eth;
288         const struct arphdr *arp;
289         struct arphdr _arp;
290
291         if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ARP))
292                 return FLOW_DISSECT_RET_OUT_GOOD;
293
294         arp = __skb_header_pointer(skb, nhoff, sizeof(_arp), data,
295                                    hlen, &_arp);
296         if (!arp)
297                 return FLOW_DISSECT_RET_OUT_BAD;
298
299         if (arp->ar_hrd != htons(ARPHRD_ETHER) ||
300             arp->ar_pro != htons(ETH_P_IP) ||
301             arp->ar_hln != ETH_ALEN ||
302             arp->ar_pln != 4 ||
303             (arp->ar_op != htons(ARPOP_REPLY) &&
304              arp->ar_op != htons(ARPOP_REQUEST)))
305                 return FLOW_DISSECT_RET_OUT_BAD;
306
307         arp_eth = __skb_header_pointer(skb, nhoff + sizeof(_arp),
308                                        sizeof(_arp_eth), data,
309                                        hlen, &_arp_eth);
310         if (!arp_eth)
311                 return FLOW_DISSECT_RET_OUT_BAD;
312
313         key_arp = skb_flow_dissector_target(flow_dissector,
314                                             FLOW_DISSECTOR_KEY_ARP,
315                                             target_container);
316
317         memcpy(&key_arp->sip, arp_eth->ar_sip, sizeof(key_arp->sip));
318         memcpy(&key_arp->tip, arp_eth->ar_tip, sizeof(key_arp->tip));
319
320         /* Only store the lower byte of the opcode;
321          * this covers ARPOP_REPLY and ARPOP_REQUEST.
322          */
323         key_arp->op = ntohs(arp->ar_op) & 0xff;
324
325         ether_addr_copy(key_arp->sha, arp_eth->ar_sha);
326         ether_addr_copy(key_arp->tha, arp_eth->ar_tha);
327
328         return FLOW_DISSECT_RET_OUT_GOOD;
329 }
330
331 static enum flow_dissect_ret
332 __skb_flow_dissect_gre(const struct sk_buff *skb,
333                        struct flow_dissector_key_control *key_control,
334                        struct flow_dissector *flow_dissector,
335                        void *target_container, void *data,
336                        __be16 *p_proto, int *p_nhoff, int *p_hlen,
337                        unsigned int flags)
338 {
339         struct flow_dissector_key_keyid *key_keyid;
340         struct gre_base_hdr *hdr, _hdr;
341         int offset = 0;
342         u16 gre_ver;
343
344         hdr = __skb_header_pointer(skb, *p_nhoff, sizeof(_hdr),
345                                    data, *p_hlen, &_hdr);
346         if (!hdr)
347                 return FLOW_DISSECT_RET_OUT_BAD;
348
349         /* Only look inside GRE without routing */
350         if (hdr->flags & GRE_ROUTING)
351                 return FLOW_DISSECT_RET_OUT_GOOD;
352
353         /* Only look inside GRE for version 0 and 1 */
354         gre_ver = ntohs(hdr->flags & GRE_VERSION);
355         if (gre_ver > 1)
356                 return FLOW_DISSECT_RET_OUT_GOOD;
357
358         *p_proto = hdr->protocol;
359         if (gre_ver) {
360                 /* Version1 must be PPTP, and check the flags */
361                 if (!(*p_proto == GRE_PROTO_PPP && (hdr->flags & GRE_KEY)))
362                         return FLOW_DISSECT_RET_OUT_GOOD;
363         }
364
365         offset += sizeof(struct gre_base_hdr);
366
367         if (hdr->flags & GRE_CSUM)
368                 offset += sizeof(((struct gre_full_hdr *) 0)->csum) +
369                           sizeof(((struct gre_full_hdr *) 0)->reserved1);
370
371         if (hdr->flags & GRE_KEY) {
372                 const __be32 *keyid;
373                 __be32 _keyid;
374
375                 keyid = __skb_header_pointer(skb, *p_nhoff + offset,
376                                              sizeof(_keyid),
377                                              data, *p_hlen, &_keyid);
378                 if (!keyid)
379                         return FLOW_DISSECT_RET_OUT_BAD;
380
381                 if (dissector_uses_key(flow_dissector,
382                                        FLOW_DISSECTOR_KEY_GRE_KEYID)) {
383                         key_keyid = skb_flow_dissector_target(flow_dissector,
384                                                               FLOW_DISSECTOR_KEY_GRE_KEYID,
385                                                               target_container);
386                         if (gre_ver == 0)
387                                 key_keyid->keyid = *keyid;
388                         else
389                                 key_keyid->keyid = *keyid & GRE_PPTP_KEY_MASK;
390                 }
391                 offset += sizeof(((struct gre_full_hdr *) 0)->key);
392         }
393
394         if (hdr->flags & GRE_SEQ)
395                 offset += sizeof(((struct pptp_gre_header *) 0)->seq);
396
397         if (gre_ver == 0) {
398                 if (*p_proto == htons(ETH_P_TEB)) {
399                         const struct ethhdr *eth;
400                         struct ethhdr _eth;
401
402                         eth = __skb_header_pointer(skb, *p_nhoff + offset,
403                                                    sizeof(_eth),
404                                                    data, *p_hlen, &_eth);
405                         if (!eth)
406                                 return FLOW_DISSECT_RET_OUT_BAD;
407                         *p_proto = eth->h_proto;
408                         offset += sizeof(*eth);
409
410                         /* Cap headers that we access via pointers at the
411                          * end of the Ethernet header as our maximum alignment
412                          * at that point is only 2 bytes.
413                          */
414                         if (NET_IP_ALIGN)
415                                 *p_hlen = *p_nhoff + offset;
416                 }
417         } else { /* version 1, must be PPTP */
418                 u8 _ppp_hdr[PPP_HDRLEN];
419                 u8 *ppp_hdr;
420
421                 if (hdr->flags & GRE_ACK)
422                         offset += sizeof(((struct pptp_gre_header *) 0)->ack);
423
424                 ppp_hdr = __skb_header_pointer(skb, *p_nhoff + offset,
425                                                sizeof(_ppp_hdr),
426                                                data, *p_hlen, _ppp_hdr);
427                 if (!ppp_hdr)
428                         return FLOW_DISSECT_RET_OUT_BAD;
429
430                 switch (PPP_PROTOCOL(ppp_hdr)) {
431                 case PPP_IP:
432                         *p_proto = htons(ETH_P_IP);
433                         break;
434                 case PPP_IPV6:
435                         *p_proto = htons(ETH_P_IPV6);
436                         break;
437                 default:
438                         /* Could probably catch some more like MPLS */
439                         break;
440                 }
441
442                 offset += PPP_HDRLEN;
443         }
444
445         *p_nhoff += offset;
446         key_control->flags |= FLOW_DIS_ENCAPSULATION;
447         if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP)
448                 return FLOW_DISSECT_RET_OUT_GOOD;
449
450         return FLOW_DISSECT_RET_PROTO_AGAIN;
451 }
452
453 /**
454  * __skb_flow_dissect_batadv() - dissect batman-adv header
455  * @skb: sk_buff to with the batman-adv header
456  * @key_control: flow dissectors control key
457  * @data: raw buffer pointer to the packet, if NULL use skb->data
458  * @p_proto: pointer used to update the protocol to process next
459  * @p_nhoff: pointer used to update inner network header offset
460  * @hlen: packet header length
461  * @flags: any combination of FLOW_DISSECTOR_F_*
462  *
463  * ETH_P_BATMAN packets are tried to be dissected. Only
464  * &struct batadv_unicast packets are actually processed because they contain an
465  * inner ethernet header and are usually followed by actual network header. This
466  * allows the flow dissector to continue processing the packet.
467  *
468  * Return: FLOW_DISSECT_RET_PROTO_AGAIN when &struct batadv_unicast was found,
469  *  FLOW_DISSECT_RET_OUT_GOOD when dissector should stop after encapsulation,
470  *  otherwise FLOW_DISSECT_RET_OUT_BAD
471  */
472 static enum flow_dissect_ret
473 __skb_flow_dissect_batadv(const struct sk_buff *skb,
474                           struct flow_dissector_key_control *key_control,
475                           void *data, __be16 *p_proto, int *p_nhoff, int hlen,
476                           unsigned int flags)
477 {
478         struct {
479                 struct batadv_unicast_packet batadv_unicast;
480                 struct ethhdr eth;
481         } *hdr, _hdr;
482
483         hdr = __skb_header_pointer(skb, *p_nhoff, sizeof(_hdr), data, hlen,
484                                    &_hdr);
485         if (!hdr)
486                 return FLOW_DISSECT_RET_OUT_BAD;
487
488         if (hdr->batadv_unicast.version != BATADV_COMPAT_VERSION)
489                 return FLOW_DISSECT_RET_OUT_BAD;
490
491         if (hdr->batadv_unicast.packet_type != BATADV_UNICAST)
492                 return FLOW_DISSECT_RET_OUT_BAD;
493
494         *p_proto = hdr->eth.h_proto;
495         *p_nhoff += sizeof(*hdr);
496
497         key_control->flags |= FLOW_DIS_ENCAPSULATION;
498         if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP)
499                 return FLOW_DISSECT_RET_OUT_GOOD;
500
501         return FLOW_DISSECT_RET_PROTO_AGAIN;
502 }
503
504 static void
505 __skb_flow_dissect_tcp(const struct sk_buff *skb,
506                        struct flow_dissector *flow_dissector,
507                        void *target_container, void *data, int thoff, int hlen)
508 {
509         struct flow_dissector_key_tcp *key_tcp;
510         struct tcphdr *th, _th;
511
512         if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_TCP))
513                 return;
514
515         th = __skb_header_pointer(skb, thoff, sizeof(_th), data, hlen, &_th);
516         if (!th)
517                 return;
518
519         if (unlikely(__tcp_hdrlen(th) < sizeof(_th)))
520                 return;
521
522         key_tcp = skb_flow_dissector_target(flow_dissector,
523                                             FLOW_DISSECTOR_KEY_TCP,
524                                             target_container);
525         key_tcp->flags = (*(__be16 *) &tcp_flag_word(th) & htons(0x0FFF));
526 }
527
528 static void
529 __skb_flow_dissect_ipv4(const struct sk_buff *skb,
530                         struct flow_dissector *flow_dissector,
531                         void *target_container, void *data, const struct iphdr *iph)
532 {
533         struct flow_dissector_key_ip *key_ip;
534
535         if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_IP))
536                 return;
537
538         key_ip = skb_flow_dissector_target(flow_dissector,
539                                            FLOW_DISSECTOR_KEY_IP,
540                                            target_container);
541         key_ip->tos = iph->tos;
542         key_ip->ttl = iph->ttl;
543 }
544
545 static void
546 __skb_flow_dissect_ipv6(const struct sk_buff *skb,
547                         struct flow_dissector *flow_dissector,
548                         void *target_container, void *data, const struct ipv6hdr *iph)
549 {
550         struct flow_dissector_key_ip *key_ip;
551
552         if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_IP))
553                 return;
554
555         key_ip = skb_flow_dissector_target(flow_dissector,
556                                            FLOW_DISSECTOR_KEY_IP,
557                                            target_container);
558         key_ip->tos = ipv6_get_dsfield(iph);
559         key_ip->ttl = iph->hop_limit;
560 }
561
562 /* Maximum number of protocol headers that can be parsed in
563  * __skb_flow_dissect
564  */
565 #define MAX_FLOW_DISSECT_HDRS   15
566
567 static bool skb_flow_dissect_allowed(int *num_hdrs)
568 {
569         ++*num_hdrs;
570
571         return (*num_hdrs <= MAX_FLOW_DISSECT_HDRS);
572 }
573
574 /**
575  * __skb_flow_dissect - extract the flow_keys struct and return it
576  * @skb: sk_buff to extract the flow from, can be NULL if the rest are specified
577  * @flow_dissector: list of keys to dissect
578  * @target_container: target structure to put dissected values into
579  * @data: raw buffer pointer to the packet, if NULL use skb->data
580  * @proto: protocol for which to get the flow, if @data is NULL use skb->protocol
581  * @nhoff: network header offset, if @data is NULL use skb_network_offset(skb)
582  * @hlen: packet header length, if @data is NULL use skb_headlen(skb)
583  *
584  * The function will try to retrieve individual keys into target specified
585  * by flow_dissector from either the skbuff or a raw buffer specified by the
586  * rest parameters.
587  *
588  * Caller must take care of zeroing target container memory.
589  */
590 bool __skb_flow_dissect(const struct sk_buff *skb,
591                         struct flow_dissector *flow_dissector,
592                         void *target_container,
593                         void *data, __be16 proto, int nhoff, int hlen,
594                         unsigned int flags)
595 {
596         struct flow_dissector_key_control *key_control;
597         struct flow_dissector_key_basic *key_basic;
598         struct flow_dissector_key_addrs *key_addrs;
599         struct flow_dissector_key_ports *key_ports;
600         struct flow_dissector_key_icmp *key_icmp;
601         struct flow_dissector_key_tags *key_tags;
602         struct flow_dissector_key_vlan *key_vlan;
603         enum flow_dissect_ret fdret;
604         enum flow_dissector_key_id dissector_vlan = FLOW_DISSECTOR_KEY_MAX;
605         int num_hdrs = 0;
606         u8 ip_proto = 0;
607         bool ret;
608
609         if (!data) {
610                 data = skb->data;
611                 proto = skb_vlan_tag_present(skb) ?
612                          skb->vlan_proto : skb->protocol;
613                 nhoff = skb_network_offset(skb);
614                 hlen = skb_headlen(skb);
615 #if IS_ENABLED(CONFIG_NET_DSA)
616                 if (unlikely(skb->dev && netdev_uses_dsa(skb->dev))) {
617                         const struct dsa_device_ops *ops;
618                         int offset;
619
620                         ops = skb->dev->dsa_ptr->tag_ops;
621                         if (ops->flow_dissect &&
622                             !ops->flow_dissect(skb, &proto, &offset)) {
623                                 hlen -= offset;
624                                 nhoff += offset;
625                         }
626                 }
627 #endif
628         }
629
630         /* It is ensured by skb_flow_dissector_init() that control key will
631          * be always present.
632          */
633         key_control = skb_flow_dissector_target(flow_dissector,
634                                                 FLOW_DISSECTOR_KEY_CONTROL,
635                                                 target_container);
636
637         /* It is ensured by skb_flow_dissector_init() that basic key will
638          * be always present.
639          */
640         key_basic = skb_flow_dissector_target(flow_dissector,
641                                               FLOW_DISSECTOR_KEY_BASIC,
642                                               target_container);
643
644         if (dissector_uses_key(flow_dissector,
645                                FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
646                 struct ethhdr *eth = eth_hdr(skb);
647                 struct flow_dissector_key_eth_addrs *key_eth_addrs;
648
649                 key_eth_addrs = skb_flow_dissector_target(flow_dissector,
650                                                           FLOW_DISSECTOR_KEY_ETH_ADDRS,
651                                                           target_container);
652                 memcpy(key_eth_addrs, &eth->h_dest, sizeof(*key_eth_addrs));
653         }
654
655 proto_again:
656         fdret = FLOW_DISSECT_RET_CONTINUE;
657
658         switch (proto) {
659         case htons(ETH_P_IP): {
660                 const struct iphdr *iph;
661                 struct iphdr _iph;
662
663                 iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph);
664                 if (!iph || iph->ihl < 5) {
665                         fdret = FLOW_DISSECT_RET_OUT_BAD;
666                         break;
667                 }
668
669                 nhoff += iph->ihl * 4;
670
671                 ip_proto = iph->protocol;
672
673                 if (dissector_uses_key(flow_dissector,
674                                        FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
675                         key_addrs = skb_flow_dissector_target(flow_dissector,
676                                                               FLOW_DISSECTOR_KEY_IPV4_ADDRS,
677                                                               target_container);
678
679                         memcpy(&key_addrs->v4addrs, &iph->saddr,
680                                sizeof(key_addrs->v4addrs));
681                         key_control->addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
682                 }
683
684                 if (ip_is_fragment(iph)) {
685                         key_control->flags |= FLOW_DIS_IS_FRAGMENT;
686
687                         if (iph->frag_off & htons(IP_OFFSET)) {
688                                 fdret = FLOW_DISSECT_RET_OUT_GOOD;
689                                 break;
690                         } else {
691                                 key_control->flags |= FLOW_DIS_FIRST_FRAG;
692                                 if (!(flags &
693                                       FLOW_DISSECTOR_F_PARSE_1ST_FRAG)) {
694                                         fdret = FLOW_DISSECT_RET_OUT_GOOD;
695                                         break;
696                                 }
697                         }
698                 }
699
700                 __skb_flow_dissect_ipv4(skb, flow_dissector,
701                                         target_container, data, iph);
702
703                 if (flags & FLOW_DISSECTOR_F_STOP_AT_L3) {
704                         fdret = FLOW_DISSECT_RET_OUT_GOOD;
705                         break;
706                 }
707
708                 break;
709         }
710         case htons(ETH_P_IPV6): {
711                 const struct ipv6hdr *iph;
712                 struct ipv6hdr _iph;
713
714                 iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph);
715                 if (!iph) {
716                         fdret = FLOW_DISSECT_RET_OUT_BAD;
717                         break;
718                 }
719
720                 ip_proto = iph->nexthdr;
721                 nhoff += sizeof(struct ipv6hdr);
722
723                 if (dissector_uses_key(flow_dissector,
724                                        FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
725                         key_addrs = skb_flow_dissector_target(flow_dissector,
726                                                               FLOW_DISSECTOR_KEY_IPV6_ADDRS,
727                                                               target_container);
728
729                         memcpy(&key_addrs->v6addrs, &iph->saddr,
730                                sizeof(key_addrs->v6addrs));
731                         key_control->addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
732                 }
733
734                 if ((dissector_uses_key(flow_dissector,
735                                         FLOW_DISSECTOR_KEY_FLOW_LABEL) ||
736                      (flags & FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL)) &&
737                     ip6_flowlabel(iph)) {
738                         __be32 flow_label = ip6_flowlabel(iph);
739
740                         if (dissector_uses_key(flow_dissector,
741                                                FLOW_DISSECTOR_KEY_FLOW_LABEL)) {
742                                 key_tags = skb_flow_dissector_target(flow_dissector,
743                                                                      FLOW_DISSECTOR_KEY_FLOW_LABEL,
744                                                                      target_container);
745                                 key_tags->flow_label = ntohl(flow_label);
746                         }
747                         if (flags & FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL) {
748                                 fdret = FLOW_DISSECT_RET_OUT_GOOD;
749                                 break;
750                         }
751                 }
752
753                 __skb_flow_dissect_ipv6(skb, flow_dissector,
754                                         target_container, data, iph);
755
756                 if (flags & FLOW_DISSECTOR_F_STOP_AT_L3)
757                         fdret = FLOW_DISSECT_RET_OUT_GOOD;
758
759                 break;
760         }
761         case htons(ETH_P_8021AD):
762         case htons(ETH_P_8021Q): {
763                 const struct vlan_hdr *vlan = NULL;
764                 struct vlan_hdr _vlan;
765                 __be16 saved_vlan_tpid = proto;
766
767                 if (dissector_vlan == FLOW_DISSECTOR_KEY_MAX &&
768                     skb && skb_vlan_tag_present(skb)) {
769                         proto = skb->protocol;
770                 } else {
771                         vlan = __skb_header_pointer(skb, nhoff, sizeof(_vlan),
772                                                     data, hlen, &_vlan);
773                         if (!vlan) {
774                                 fdret = FLOW_DISSECT_RET_OUT_BAD;
775                                 break;
776                         }
777
778                         proto = vlan->h_vlan_encapsulated_proto;
779                         nhoff += sizeof(*vlan);
780                 }
781
782                 if (dissector_vlan == FLOW_DISSECTOR_KEY_MAX) {
783                         dissector_vlan = FLOW_DISSECTOR_KEY_VLAN;
784                 } else if (dissector_vlan == FLOW_DISSECTOR_KEY_VLAN) {
785                         dissector_vlan = FLOW_DISSECTOR_KEY_CVLAN;
786                 } else {
787                         fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
788                         break;
789                 }
790
791                 if (dissector_uses_key(flow_dissector, dissector_vlan)) {
792                         key_vlan = skb_flow_dissector_target(flow_dissector,
793                                                              dissector_vlan,
794                                                              target_container);
795
796                         if (!vlan) {
797                                 key_vlan->vlan_id = skb_vlan_tag_get_id(skb);
798                                 key_vlan->vlan_priority =
799                                         (skb_vlan_tag_get_prio(skb) >> VLAN_PRIO_SHIFT);
800                         } else {
801                                 key_vlan->vlan_id = ntohs(vlan->h_vlan_TCI) &
802                                         VLAN_VID_MASK;
803                                 key_vlan->vlan_priority =
804                                         (ntohs(vlan->h_vlan_TCI) &
805                                          VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
806                         }
807                         key_vlan->vlan_tpid = saved_vlan_tpid;
808                 }
809
810                 fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
811                 break;
812         }
813         case htons(ETH_P_PPP_SES): {
814                 struct {
815                         struct pppoe_hdr hdr;
816                         __be16 proto;
817                 } *hdr, _hdr;
818                 hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, &_hdr);
819                 if (!hdr) {
820                         fdret = FLOW_DISSECT_RET_OUT_BAD;
821                         break;
822                 }
823
824                 proto = hdr->proto;
825                 nhoff += PPPOE_SES_HLEN;
826                 switch (proto) {
827                 case htons(PPP_IP):
828                         proto = htons(ETH_P_IP);
829                         fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
830                         break;
831                 case htons(PPP_IPV6):
832                         proto = htons(ETH_P_IPV6);
833                         fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
834                         break;
835                 default:
836                         fdret = FLOW_DISSECT_RET_OUT_BAD;
837                         break;
838                 }
839                 break;
840         }
841         case htons(ETH_P_TIPC): {
842                 struct tipc_basic_hdr *hdr, _hdr;
843
844                 hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr),
845                                            data, hlen, &_hdr);
846                 if (!hdr) {
847                         fdret = FLOW_DISSECT_RET_OUT_BAD;
848                         break;
849                 }
850
851                 if (dissector_uses_key(flow_dissector,
852                                        FLOW_DISSECTOR_KEY_TIPC)) {
853                         key_addrs = skb_flow_dissector_target(flow_dissector,
854                                                               FLOW_DISSECTOR_KEY_TIPC,
855                                                               target_container);
856                         key_addrs->tipckey.key = tipc_hdr_rps_key(hdr);
857                         key_control->addr_type = FLOW_DISSECTOR_KEY_TIPC;
858                 }
859                 fdret = FLOW_DISSECT_RET_OUT_GOOD;
860                 break;
861         }
862
863         case htons(ETH_P_MPLS_UC):
864         case htons(ETH_P_MPLS_MC):
865                 fdret = __skb_flow_dissect_mpls(skb, flow_dissector,
866                                                 target_container, data,
867                                                 nhoff, hlen);
868                 break;
869         case htons(ETH_P_FCOE):
870                 if ((hlen - nhoff) < FCOE_HEADER_LEN) {
871                         fdret = FLOW_DISSECT_RET_OUT_BAD;
872                         break;
873                 }
874
875                 nhoff += FCOE_HEADER_LEN;
876                 fdret = FLOW_DISSECT_RET_OUT_GOOD;
877                 break;
878
879         case htons(ETH_P_ARP):
880         case htons(ETH_P_RARP):
881                 fdret = __skb_flow_dissect_arp(skb, flow_dissector,
882                                                target_container, data,
883                                                nhoff, hlen);
884                 break;
885
886         case htons(ETH_P_BATMAN):
887                 fdret = __skb_flow_dissect_batadv(skb, key_control, data,
888                                                   &proto, &nhoff, hlen, flags);
889                 break;
890
891         default:
892                 fdret = FLOW_DISSECT_RET_OUT_BAD;
893                 break;
894         }
895
896         /* Process result of proto processing */
897         switch (fdret) {
898         case FLOW_DISSECT_RET_OUT_GOOD:
899                 goto out_good;
900         case FLOW_DISSECT_RET_PROTO_AGAIN:
901                 if (skb_flow_dissect_allowed(&num_hdrs))
902                         goto proto_again;
903                 goto out_good;
904         case FLOW_DISSECT_RET_CONTINUE:
905         case FLOW_DISSECT_RET_IPPROTO_AGAIN:
906                 break;
907         case FLOW_DISSECT_RET_OUT_BAD:
908         default:
909                 goto out_bad;
910         }
911
912 ip_proto_again:
913         fdret = FLOW_DISSECT_RET_CONTINUE;
914
915         switch (ip_proto) {
916         case IPPROTO_GRE:
917                 fdret = __skb_flow_dissect_gre(skb, key_control, flow_dissector,
918                                                target_container, data,
919                                                &proto, &nhoff, &hlen, flags);
920                 break;
921
922         case NEXTHDR_HOP:
923         case NEXTHDR_ROUTING:
924         case NEXTHDR_DEST: {
925                 u8 _opthdr[2], *opthdr;
926
927                 if (proto != htons(ETH_P_IPV6))
928                         break;
929
930                 opthdr = __skb_header_pointer(skb, nhoff, sizeof(_opthdr),
931                                               data, hlen, &_opthdr);
932                 if (!opthdr) {
933                         fdret = FLOW_DISSECT_RET_OUT_BAD;
934                         break;
935                 }
936
937                 ip_proto = opthdr[0];
938                 nhoff += (opthdr[1] + 1) << 3;
939
940                 fdret = FLOW_DISSECT_RET_IPPROTO_AGAIN;
941                 break;
942         }
943         case NEXTHDR_FRAGMENT: {
944                 struct frag_hdr _fh, *fh;
945
946                 if (proto != htons(ETH_P_IPV6))
947                         break;
948
949                 fh = __skb_header_pointer(skb, nhoff, sizeof(_fh),
950                                           data, hlen, &_fh);
951
952                 if (!fh) {
953                         fdret = FLOW_DISSECT_RET_OUT_BAD;
954                         break;
955                 }
956
957                 key_control->flags |= FLOW_DIS_IS_FRAGMENT;
958
959                 nhoff += sizeof(_fh);
960                 ip_proto = fh->nexthdr;
961
962                 if (!(fh->frag_off & htons(IP6_OFFSET))) {
963                         key_control->flags |= FLOW_DIS_FIRST_FRAG;
964                         if (flags & FLOW_DISSECTOR_F_PARSE_1ST_FRAG) {
965                                 fdret = FLOW_DISSECT_RET_IPPROTO_AGAIN;
966                                 break;
967                         }
968                 }
969
970                 fdret = FLOW_DISSECT_RET_OUT_GOOD;
971                 break;
972         }
973         case IPPROTO_IPIP:
974                 proto = htons(ETH_P_IP);
975
976                 key_control->flags |= FLOW_DIS_ENCAPSULATION;
977                 if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP) {
978                         fdret = FLOW_DISSECT_RET_OUT_GOOD;
979                         break;
980                 }
981
982                 fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
983                 break;
984
985         case IPPROTO_IPV6:
986                 proto = htons(ETH_P_IPV6);
987
988                 key_control->flags |= FLOW_DIS_ENCAPSULATION;
989                 if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP) {
990                         fdret = FLOW_DISSECT_RET_OUT_GOOD;
991                         break;
992                 }
993
994                 fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
995                 break;
996
997
998         case IPPROTO_MPLS:
999                 proto = htons(ETH_P_MPLS_UC);
1000                 fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
1001                 break;
1002
1003         case IPPROTO_TCP:
1004                 __skb_flow_dissect_tcp(skb, flow_dissector, target_container,
1005                                        data, nhoff, hlen);
1006                 break;
1007
1008         default:
1009                 break;
1010         }
1011
1012         if (dissector_uses_key(flow_dissector,
1013                                FLOW_DISSECTOR_KEY_PORTS)) {
1014                 key_ports = skb_flow_dissector_target(flow_dissector,
1015                                                       FLOW_DISSECTOR_KEY_PORTS,
1016                                                       target_container);
1017                 key_ports->ports = __skb_flow_get_ports(skb, nhoff, ip_proto,
1018                                                         data, hlen);
1019         }
1020
1021         if (dissector_uses_key(flow_dissector,
1022                                FLOW_DISSECTOR_KEY_ICMP)) {
1023                 key_icmp = skb_flow_dissector_target(flow_dissector,
1024                                                      FLOW_DISSECTOR_KEY_ICMP,
1025                                                      target_container);
1026                 key_icmp->icmp = skb_flow_get_be16(skb, nhoff, data, hlen);
1027         }
1028
1029         /* Process result of IP proto processing */
1030         switch (fdret) {
1031         case FLOW_DISSECT_RET_PROTO_AGAIN:
1032                 if (skb_flow_dissect_allowed(&num_hdrs))
1033                         goto proto_again;
1034                 break;
1035         case FLOW_DISSECT_RET_IPPROTO_AGAIN:
1036                 if (skb_flow_dissect_allowed(&num_hdrs))
1037                         goto ip_proto_again;
1038                 break;
1039         case FLOW_DISSECT_RET_OUT_GOOD:
1040         case FLOW_DISSECT_RET_CONTINUE:
1041                 break;
1042         case FLOW_DISSECT_RET_OUT_BAD:
1043         default:
1044                 goto out_bad;
1045         }
1046
1047 out_good:
1048         ret = true;
1049
1050 out:
1051         key_control->thoff = min_t(u16, nhoff, skb ? skb->len : hlen);
1052         key_basic->n_proto = proto;
1053         key_basic->ip_proto = ip_proto;
1054
1055         return ret;
1056
1057 out_bad:
1058         ret = false;
1059         goto out;
1060 }
1061 EXPORT_SYMBOL(__skb_flow_dissect);
1062
1063 static u32 hashrnd __read_mostly;
1064 static __always_inline void __flow_hash_secret_init(void)
1065 {
1066         net_get_random_once(&hashrnd, sizeof(hashrnd));
1067 }
1068
1069 static __always_inline u32 __flow_hash_words(const u32 *words, u32 length,
1070                                              u32 keyval)
1071 {
1072         return jhash2(words, length, keyval);
1073 }
1074
1075 static inline const u32 *flow_keys_hash_start(const struct flow_keys *flow)
1076 {
1077         const void *p = flow;
1078
1079         BUILD_BUG_ON(FLOW_KEYS_HASH_OFFSET % sizeof(u32));
1080         return (const u32 *)(p + FLOW_KEYS_HASH_OFFSET);
1081 }
1082
1083 static inline size_t flow_keys_hash_length(const struct flow_keys *flow)
1084 {
1085         size_t diff = FLOW_KEYS_HASH_OFFSET + sizeof(flow->addrs);
1086         BUILD_BUG_ON((sizeof(*flow) - FLOW_KEYS_HASH_OFFSET) % sizeof(u32));
1087         BUILD_BUG_ON(offsetof(typeof(*flow), addrs) !=
1088                      sizeof(*flow) - sizeof(flow->addrs));
1089
1090         switch (flow->control.addr_type) {
1091         case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
1092                 diff -= sizeof(flow->addrs.v4addrs);
1093                 break;
1094         case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
1095                 diff -= sizeof(flow->addrs.v6addrs);
1096                 break;
1097         case FLOW_DISSECTOR_KEY_TIPC:
1098                 diff -= sizeof(flow->addrs.tipckey);
1099                 break;
1100         }
1101         return (sizeof(*flow) - diff) / sizeof(u32);
1102 }
1103
1104 __be32 flow_get_u32_src(const struct flow_keys *flow)
1105 {
1106         switch (flow->control.addr_type) {
1107         case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
1108                 return flow->addrs.v4addrs.src;
1109         case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
1110                 return (__force __be32)ipv6_addr_hash(
1111                         &flow->addrs.v6addrs.src);
1112         case FLOW_DISSECTOR_KEY_TIPC:
1113                 return flow->addrs.tipckey.key;
1114         default:
1115                 return 0;
1116         }
1117 }
1118 EXPORT_SYMBOL(flow_get_u32_src);
1119
1120 __be32 flow_get_u32_dst(const struct flow_keys *flow)
1121 {
1122         switch (flow->control.addr_type) {
1123         case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
1124                 return flow->addrs.v4addrs.dst;
1125         case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
1126                 return (__force __be32)ipv6_addr_hash(
1127                         &flow->addrs.v6addrs.dst);
1128         default:
1129                 return 0;
1130         }
1131 }
1132 EXPORT_SYMBOL(flow_get_u32_dst);
1133
1134 static inline void __flow_hash_consistentify(struct flow_keys *keys)
1135 {
1136         int addr_diff, i;
1137
1138         switch (keys->control.addr_type) {
1139         case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
1140                 addr_diff = (__force u32)keys->addrs.v4addrs.dst -
1141                             (__force u32)keys->addrs.v4addrs.src;
1142                 if ((addr_diff < 0) ||
1143                     (addr_diff == 0 &&
1144                      ((__force u16)keys->ports.dst <
1145                       (__force u16)keys->ports.src))) {
1146                         swap(keys->addrs.v4addrs.src, keys->addrs.v4addrs.dst);
1147                         swap(keys->ports.src, keys->ports.dst);
1148                 }
1149                 break;
1150         case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
1151                 addr_diff = memcmp(&keys->addrs.v6addrs.dst,
1152                                    &keys->addrs.v6addrs.src,
1153                                    sizeof(keys->addrs.v6addrs.dst));
1154                 if ((addr_diff < 0) ||
1155                     (addr_diff == 0 &&
1156                      ((__force u16)keys->ports.dst <
1157                       (__force u16)keys->ports.src))) {
1158                         for (i = 0; i < 4; i++)
1159                                 swap(keys->addrs.v6addrs.src.s6_addr32[i],
1160                                      keys->addrs.v6addrs.dst.s6_addr32[i]);
1161                         swap(keys->ports.src, keys->ports.dst);
1162                 }
1163                 break;
1164         }
1165 }
1166
1167 static inline u32 __flow_hash_from_keys(struct flow_keys *keys, u32 keyval)
1168 {
1169         u32 hash;
1170
1171         __flow_hash_consistentify(keys);
1172
1173         hash = __flow_hash_words(flow_keys_hash_start(keys),
1174                                  flow_keys_hash_length(keys), keyval);
1175         if (!hash)
1176                 hash = 1;
1177
1178         return hash;
1179 }
1180
1181 u32 flow_hash_from_keys(struct flow_keys *keys)
1182 {
1183         __flow_hash_secret_init();
1184         return __flow_hash_from_keys(keys, hashrnd);
1185 }
1186 EXPORT_SYMBOL(flow_hash_from_keys);
1187
1188 static inline u32 ___skb_get_hash(const struct sk_buff *skb,
1189                                   struct flow_keys *keys, u32 keyval)
1190 {
1191         skb_flow_dissect_flow_keys(skb, keys,
1192                                    FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
1193
1194         return __flow_hash_from_keys(keys, keyval);
1195 }
1196
1197 struct _flow_keys_digest_data {
1198         __be16  n_proto;
1199         u8      ip_proto;
1200         u8      padding;
1201         __be32  ports;
1202         __be32  src;
1203         __be32  dst;
1204 };
1205
1206 void make_flow_keys_digest(struct flow_keys_digest *digest,
1207                            const struct flow_keys *flow)
1208 {
1209         struct _flow_keys_digest_data *data =
1210             (struct _flow_keys_digest_data *)digest;
1211
1212         BUILD_BUG_ON(sizeof(*data) > sizeof(*digest));
1213
1214         memset(digest, 0, sizeof(*digest));
1215
1216         data->n_proto = flow->basic.n_proto;
1217         data->ip_proto = flow->basic.ip_proto;
1218         data->ports = flow->ports.ports;
1219         data->src = flow->addrs.v4addrs.src;
1220         data->dst = flow->addrs.v4addrs.dst;
1221 }
1222 EXPORT_SYMBOL(make_flow_keys_digest);
1223
1224 static struct flow_dissector flow_keys_dissector_symmetric __read_mostly;
1225
1226 u32 __skb_get_hash_symmetric(const struct sk_buff *skb)
1227 {
1228         struct flow_keys keys;
1229
1230         __flow_hash_secret_init();
1231
1232         memset(&keys, 0, sizeof(keys));
1233         __skb_flow_dissect(skb, &flow_keys_dissector_symmetric, &keys,
1234                            NULL, 0, 0, 0,
1235                            FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
1236
1237         return __flow_hash_from_keys(&keys, hashrnd);
1238 }
1239 EXPORT_SYMBOL_GPL(__skb_get_hash_symmetric);
1240
1241 /**
1242  * __skb_get_hash: calculate a flow hash
1243  * @skb: sk_buff to calculate flow hash from
1244  *
1245  * This function calculates a flow hash based on src/dst addresses
1246  * and src/dst port numbers.  Sets hash in skb to non-zero hash value
1247  * on success, zero indicates no valid hash.  Also, sets l4_hash in skb
1248  * if hash is a canonical 4-tuple hash over transport ports.
1249  */
1250 void __skb_get_hash(struct sk_buff *skb)
1251 {
1252         struct flow_keys keys;
1253         u32 hash;
1254
1255         __flow_hash_secret_init();
1256
1257         hash = ___skb_get_hash(skb, &keys, hashrnd);
1258
1259         __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
1260 }
1261 EXPORT_SYMBOL(__skb_get_hash);
1262
1263 __u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb)
1264 {
1265         struct flow_keys keys;
1266
1267         return ___skb_get_hash(skb, &keys, perturb);
1268 }
1269 EXPORT_SYMBOL(skb_get_hash_perturb);
1270
1271 u32 __skb_get_poff(const struct sk_buff *skb, void *data,
1272                    const struct flow_keys_basic *keys, int hlen)
1273 {
1274         u32 poff = keys->control.thoff;
1275
1276         /* skip L4 headers for fragments after the first */
1277         if ((keys->control.flags & FLOW_DIS_IS_FRAGMENT) &&
1278             !(keys->control.flags & FLOW_DIS_FIRST_FRAG))
1279                 return poff;
1280
1281         switch (keys->basic.ip_proto) {
1282         case IPPROTO_TCP: {
1283                 /* access doff as u8 to avoid unaligned access */
1284                 const u8 *doff;
1285                 u8 _doff;
1286
1287                 doff = __skb_header_pointer(skb, poff + 12, sizeof(_doff),
1288                                             data, hlen, &_doff);
1289                 if (!doff)
1290                         return poff;
1291
1292                 poff += max_t(u32, sizeof(struct tcphdr), (*doff & 0xF0) >> 2);
1293                 break;
1294         }
1295         case IPPROTO_UDP:
1296         case IPPROTO_UDPLITE:
1297                 poff += sizeof(struct udphdr);
1298                 break;
1299         /* For the rest, we do not really care about header
1300          * extensions at this point for now.
1301          */
1302         case IPPROTO_ICMP:
1303                 poff += sizeof(struct icmphdr);
1304                 break;
1305         case IPPROTO_ICMPV6:
1306                 poff += sizeof(struct icmp6hdr);
1307                 break;
1308         case IPPROTO_IGMP:
1309                 poff += sizeof(struct igmphdr);
1310                 break;
1311         case IPPROTO_DCCP:
1312                 poff += sizeof(struct dccp_hdr);
1313                 break;
1314         case IPPROTO_SCTP:
1315                 poff += sizeof(struct sctphdr);
1316                 break;
1317         }
1318
1319         return poff;
1320 }
1321
1322 /**
1323  * skb_get_poff - get the offset to the payload
1324  * @skb: sk_buff to get the payload offset from
1325  *
1326  * The function will get the offset to the payload as far as it could
1327  * be dissected.  The main user is currently BPF, so that we can dynamically
1328  * truncate packets without needing to push actual payload to the user
1329  * space and can analyze headers only, instead.
1330  */
1331 u32 skb_get_poff(const struct sk_buff *skb)
1332 {
1333         struct flow_keys_basic keys;
1334
1335         if (!skb_flow_dissect_flow_keys_basic(skb, &keys, NULL, 0, 0, 0, 0))
1336                 return 0;
1337
1338         return __skb_get_poff(skb, skb->data, &keys, skb_headlen(skb));
1339 }
1340
1341 __u32 __get_hash_from_flowi6(const struct flowi6 *fl6, struct flow_keys *keys)
1342 {
1343         memset(keys, 0, sizeof(*keys));
1344
1345         memcpy(&keys->addrs.v6addrs.src, &fl6->saddr,
1346             sizeof(keys->addrs.v6addrs.src));
1347         memcpy(&keys->addrs.v6addrs.dst, &fl6->daddr,
1348             sizeof(keys->addrs.v6addrs.dst));
1349         keys->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1350         keys->ports.src = fl6->fl6_sport;
1351         keys->ports.dst = fl6->fl6_dport;
1352         keys->keyid.keyid = fl6->fl6_gre_key;
1353         keys->tags.flow_label = (__force u32)flowi6_get_flowlabel(fl6);
1354         keys->basic.ip_proto = fl6->flowi6_proto;
1355
1356         return flow_hash_from_keys(keys);
1357 }
1358 EXPORT_SYMBOL(__get_hash_from_flowi6);
1359
1360 static const struct flow_dissector_key flow_keys_dissector_keys[] = {
1361         {
1362                 .key_id = FLOW_DISSECTOR_KEY_CONTROL,
1363                 .offset = offsetof(struct flow_keys, control),
1364         },
1365         {
1366                 .key_id = FLOW_DISSECTOR_KEY_BASIC,
1367                 .offset = offsetof(struct flow_keys, basic),
1368         },
1369         {
1370                 .key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS,
1371                 .offset = offsetof(struct flow_keys, addrs.v4addrs),
1372         },
1373         {
1374                 .key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS,
1375                 .offset = offsetof(struct flow_keys, addrs.v6addrs),
1376         },
1377         {
1378                 .key_id = FLOW_DISSECTOR_KEY_TIPC,
1379                 .offset = offsetof(struct flow_keys, addrs.tipckey),
1380         },
1381         {
1382                 .key_id = FLOW_DISSECTOR_KEY_PORTS,
1383                 .offset = offsetof(struct flow_keys, ports),
1384         },
1385         {
1386                 .key_id = FLOW_DISSECTOR_KEY_VLAN,
1387                 .offset = offsetof(struct flow_keys, vlan),
1388         },
1389         {
1390                 .key_id = FLOW_DISSECTOR_KEY_FLOW_LABEL,
1391                 .offset = offsetof(struct flow_keys, tags),
1392         },
1393         {
1394                 .key_id = FLOW_DISSECTOR_KEY_GRE_KEYID,
1395                 .offset = offsetof(struct flow_keys, keyid),
1396         },
1397 };
1398
1399 static const struct flow_dissector_key flow_keys_dissector_symmetric_keys[] = {
1400         {
1401                 .key_id = FLOW_DISSECTOR_KEY_CONTROL,
1402                 .offset = offsetof(struct flow_keys, control),
1403         },
1404         {
1405                 .key_id = FLOW_DISSECTOR_KEY_BASIC,
1406                 .offset = offsetof(struct flow_keys, basic),
1407         },
1408         {
1409                 .key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS,
1410                 .offset = offsetof(struct flow_keys, addrs.v4addrs),
1411         },
1412         {
1413                 .key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS,
1414                 .offset = offsetof(struct flow_keys, addrs.v6addrs),
1415         },
1416         {
1417                 .key_id = FLOW_DISSECTOR_KEY_PORTS,
1418                 .offset = offsetof(struct flow_keys, ports),
1419         },
1420 };
1421
1422 static const struct flow_dissector_key flow_keys_basic_dissector_keys[] = {
1423         {
1424                 .key_id = FLOW_DISSECTOR_KEY_CONTROL,
1425                 .offset = offsetof(struct flow_keys, control),
1426         },
1427         {
1428                 .key_id = FLOW_DISSECTOR_KEY_BASIC,
1429                 .offset = offsetof(struct flow_keys, basic),
1430         },
1431 };
1432
1433 struct flow_dissector flow_keys_dissector __read_mostly;
1434 EXPORT_SYMBOL(flow_keys_dissector);
1435
1436 struct flow_dissector flow_keys_basic_dissector __read_mostly;
1437 EXPORT_SYMBOL(flow_keys_basic_dissector);
1438
1439 static int __init init_default_flow_dissectors(void)
1440 {
1441         skb_flow_dissector_init(&flow_keys_dissector,
1442                                 flow_keys_dissector_keys,
1443                                 ARRAY_SIZE(flow_keys_dissector_keys));
1444         skb_flow_dissector_init(&flow_keys_dissector_symmetric,
1445                                 flow_keys_dissector_symmetric_keys,
1446                                 ARRAY_SIZE(flow_keys_dissector_symmetric_keys));
1447         skb_flow_dissector_init(&flow_keys_basic_dissector,
1448                                 flow_keys_basic_dissector_keys,
1449                                 ARRAY_SIZE(flow_keys_basic_dissector_keys));
1450         return 0;
1451 }
1452
1453 core_initcall(init_default_flow_dissectors);