Merge branch 'ib-chrome-platform-atmel-mxt-ts-device-properties' into working-branch...
[sfrench/cifs-2.6.git] / drivers / net / ethernet / netronome / nfp / flower / match.c
1 /*
2  * Copyright (C) 2017 Netronome Systems, Inc.
3  *
4  * This software is dual licensed under the GNU General License Version 2,
5  * June 1991 as shown in the file COPYING in the top-level directory of this
6  * source tree or the BSD 2-Clause License provided below.  You have the
7  * option to license this software under the complete terms of either license.
8  *
9  * The BSD 2-Clause License:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      1. Redistributions of source code must retain the above
16  *         copyright notice, this list of conditions and the following
17  *         disclaimer.
18  *
19  *      2. Redistributions in binary form must reproduce the above
20  *         copyright notice, this list of conditions and the following
21  *         disclaimer in the documentation and/or other materials
22  *         provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33
34 #include <linux/bitfield.h>
35 #include <net/pkt_cls.h>
36
37 #include "cmsg.h"
38 #include "main.h"
39
40 static void
41 nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *frame,
42                             struct tc_cls_flower_offload *flow, u8 key_type,
43                             bool mask_version)
44 {
45         struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
46         struct flow_dissector_key_vlan *flow_vlan;
47         u16 tmp_tci;
48
49         memset(frame, 0, sizeof(struct nfp_flower_meta_tci));
50         /* Populate the metadata frame. */
51         frame->nfp_flow_key_layer = key_type;
52         frame->mask_id = ~0;
53
54         if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
55                 flow_vlan = skb_flow_dissector_target(flow->dissector,
56                                                       FLOW_DISSECTOR_KEY_VLAN,
57                                                       target);
58                 /* Populate the tci field. */
59                 if (flow_vlan->vlan_id) {
60                         tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
61                                              flow_vlan->vlan_priority) |
62                                   FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
63                                              flow_vlan->vlan_id) |
64                                   NFP_FLOWER_MASK_VLAN_CFI;
65                         frame->tci = cpu_to_be16(tmp_tci);
66                 }
67         }
68 }
69
70 static void
71 nfp_flower_compile_ext_meta(struct nfp_flower_ext_meta *frame, u32 key_ext)
72 {
73         frame->nfp_flow_key_layer2 = cpu_to_be32(key_ext);
74 }
75
76 static int
77 nfp_flower_compile_port(struct nfp_flower_in_port *frame, u32 cmsg_port,
78                         bool mask_version, enum nfp_flower_tun_type tun_type)
79 {
80         if (mask_version) {
81                 frame->in_port = cpu_to_be32(~0);
82                 return 0;
83         }
84
85         if (tun_type)
86                 frame->in_port = cpu_to_be32(NFP_FL_PORT_TYPE_TUN | tun_type);
87         else
88                 frame->in_port = cpu_to_be32(cmsg_port);
89
90         return 0;
91 }
92
93 static void
94 nfp_flower_compile_mac(struct nfp_flower_mac_mpls *frame,
95                        struct tc_cls_flower_offload *flow,
96                        bool mask_version)
97 {
98         struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
99         struct flow_dissector_key_eth_addrs *addr;
100
101         memset(frame, 0, sizeof(struct nfp_flower_mac_mpls));
102
103         if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
104                 addr = skb_flow_dissector_target(flow->dissector,
105                                                  FLOW_DISSECTOR_KEY_ETH_ADDRS,
106                                                  target);
107                 /* Populate mac frame. */
108                 ether_addr_copy(frame->mac_dst, &addr->dst[0]);
109                 ether_addr_copy(frame->mac_src, &addr->src[0]);
110         }
111
112         if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_MPLS)) {
113                 struct flow_dissector_key_mpls *mpls;
114                 u32 t_mpls;
115
116                 mpls = skb_flow_dissector_target(flow->dissector,
117                                                  FLOW_DISSECTOR_KEY_MPLS,
118                                                  target);
119
120                 t_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB, mpls->mpls_label) |
121                          FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC, mpls->mpls_tc) |
122                          FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS, mpls->mpls_bos) |
123                          NFP_FLOWER_MASK_MPLS_Q;
124
125                 frame->mpls_lse = cpu_to_be32(t_mpls);
126         }
127 }
128
129 static void
130 nfp_flower_compile_tport(struct nfp_flower_tp_ports *frame,
131                          struct tc_cls_flower_offload *flow,
132                          bool mask_version)
133 {
134         struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
135         struct flow_dissector_key_ports *tp;
136
137         memset(frame, 0, sizeof(struct nfp_flower_tp_ports));
138
139         if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
140                 tp = skb_flow_dissector_target(flow->dissector,
141                                                FLOW_DISSECTOR_KEY_PORTS,
142                                                target);
143                 frame->port_src = tp->src;
144                 frame->port_dst = tp->dst;
145         }
146 }
147
148 static void
149 nfp_flower_compile_ip_ext(struct nfp_flower_ip_ext *frame,
150                           struct tc_cls_flower_offload *flow,
151                           bool mask_version)
152 {
153         struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
154
155         if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
156                 struct flow_dissector_key_basic *basic;
157
158                 basic = skb_flow_dissector_target(flow->dissector,
159                                                   FLOW_DISSECTOR_KEY_BASIC,
160                                                   target);
161                 frame->proto = basic->ip_proto;
162         }
163
164         if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_IP)) {
165                 struct flow_dissector_key_ip *flow_ip;
166
167                 flow_ip = skb_flow_dissector_target(flow->dissector,
168                                                     FLOW_DISSECTOR_KEY_IP,
169                                                     target);
170                 frame->tos = flow_ip->tos;
171                 frame->ttl = flow_ip->ttl;
172         }
173
174         if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_TCP)) {
175                 struct flow_dissector_key_tcp *tcp;
176                 u32 tcp_flags;
177
178                 tcp = skb_flow_dissector_target(flow->dissector,
179                                                 FLOW_DISSECTOR_KEY_TCP, target);
180                 tcp_flags = be16_to_cpu(tcp->flags);
181
182                 if (tcp_flags & TCPHDR_FIN)
183                         frame->flags |= NFP_FL_TCP_FLAG_FIN;
184                 if (tcp_flags & TCPHDR_SYN)
185                         frame->flags |= NFP_FL_TCP_FLAG_SYN;
186                 if (tcp_flags & TCPHDR_RST)
187                         frame->flags |= NFP_FL_TCP_FLAG_RST;
188                 if (tcp_flags & TCPHDR_PSH)
189                         frame->flags |= NFP_FL_TCP_FLAG_PSH;
190                 if (tcp_flags & TCPHDR_URG)
191                         frame->flags |= NFP_FL_TCP_FLAG_URG;
192         }
193
194         if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
195                 struct flow_dissector_key_control *key;
196
197                 key = skb_flow_dissector_target(flow->dissector,
198                                                 FLOW_DISSECTOR_KEY_CONTROL,
199                                                 target);
200                 if (key->flags & FLOW_DIS_IS_FRAGMENT)
201                         frame->flags |= NFP_FL_IP_FRAGMENTED;
202                 if (key->flags & FLOW_DIS_FIRST_FRAG)
203                         frame->flags |= NFP_FL_IP_FRAG_FIRST;
204         }
205 }
206
207 static void
208 nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *frame,
209                         struct tc_cls_flower_offload *flow,
210                         bool mask_version)
211 {
212         struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
213         struct flow_dissector_key_ipv4_addrs *addr;
214
215         memset(frame, 0, sizeof(struct nfp_flower_ipv4));
216
217         if (dissector_uses_key(flow->dissector,
218                                FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
219                 addr = skb_flow_dissector_target(flow->dissector,
220                                                  FLOW_DISSECTOR_KEY_IPV4_ADDRS,
221                                                  target);
222                 frame->ipv4_src = addr->src;
223                 frame->ipv4_dst = addr->dst;
224         }
225
226         nfp_flower_compile_ip_ext(&frame->ip_ext, flow, mask_version);
227 }
228
229 static void
230 nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *frame,
231                         struct tc_cls_flower_offload *flow,
232                         bool mask_version)
233 {
234         struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
235         struct flow_dissector_key_ipv6_addrs *addr;
236
237         memset(frame, 0, sizeof(struct nfp_flower_ipv6));
238
239         if (dissector_uses_key(flow->dissector,
240                                FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
241                 addr = skb_flow_dissector_target(flow->dissector,
242                                                  FLOW_DISSECTOR_KEY_IPV6_ADDRS,
243                                                  target);
244                 frame->ipv6_src = addr->src;
245                 frame->ipv6_dst = addr->dst;
246         }
247
248         nfp_flower_compile_ip_ext(&frame->ip_ext, flow, mask_version);
249 }
250
251 static void
252 nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *frame,
253                                 struct tc_cls_flower_offload *flow,
254                                 bool mask_version)
255 {
256         struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
257         struct flow_dissector_key_ipv4_addrs *tun_ips;
258         struct flow_dissector_key_keyid *vni;
259
260         memset(frame, 0, sizeof(struct nfp_flower_ipv4_udp_tun));
261
262         if (dissector_uses_key(flow->dissector,
263                                FLOW_DISSECTOR_KEY_ENC_KEYID)) {
264                 u32 temp_vni;
265
266                 vni = skb_flow_dissector_target(flow->dissector,
267                                                 FLOW_DISSECTOR_KEY_ENC_KEYID,
268                                                 target);
269                 temp_vni = be32_to_cpu(vni->keyid) << NFP_FL_TUN_VNI_OFFSET;
270                 frame->tun_id = cpu_to_be32(temp_vni);
271         }
272
273         if (dissector_uses_key(flow->dissector,
274                                FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
275                 tun_ips =
276                    skb_flow_dissector_target(flow->dissector,
277                                              FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
278                                              target);
279                 frame->ip_src = tun_ips->src;
280                 frame->ip_dst = tun_ips->dst;
281         }
282 }
283
284 int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
285                                   struct nfp_fl_key_ls *key_ls,
286                                   struct net_device *netdev,
287                                   struct nfp_fl_payload *nfp_flow,
288                                   enum nfp_flower_tun_type tun_type)
289 {
290         struct nfp_repr *netdev_repr;
291         int err;
292         u8 *ext;
293         u8 *msk;
294
295         memset(nfp_flow->unmasked_data, 0, key_ls->key_size);
296         memset(nfp_flow->mask_data, 0, key_ls->key_size);
297
298         ext = nfp_flow->unmasked_data;
299         msk = nfp_flow->mask_data;
300
301         /* Populate Exact Metadata. */
302         nfp_flower_compile_meta_tci((struct nfp_flower_meta_tci *)ext,
303                                     flow, key_ls->key_layer, false);
304         /* Populate Mask Metadata. */
305         nfp_flower_compile_meta_tci((struct nfp_flower_meta_tci *)msk,
306                                     flow, key_ls->key_layer, true);
307         ext += sizeof(struct nfp_flower_meta_tci);
308         msk += sizeof(struct nfp_flower_meta_tci);
309
310         /* Populate Extended Metadata if Required. */
311         if (NFP_FLOWER_LAYER_EXT_META & key_ls->key_layer) {
312                 nfp_flower_compile_ext_meta((struct nfp_flower_ext_meta *)ext,
313                                             key_ls->key_layer_two);
314                 nfp_flower_compile_ext_meta((struct nfp_flower_ext_meta *)msk,
315                                             key_ls->key_layer_two);
316                 ext += sizeof(struct nfp_flower_ext_meta);
317                 msk += sizeof(struct nfp_flower_ext_meta);
318         }
319
320         /* Populate Exact Port data. */
321         err = nfp_flower_compile_port((struct nfp_flower_in_port *)ext,
322                                       nfp_repr_get_port_id(netdev),
323                                       false, tun_type);
324         if (err)
325                 return err;
326
327         /* Populate Mask Port Data. */
328         err = nfp_flower_compile_port((struct nfp_flower_in_port *)msk,
329                                       nfp_repr_get_port_id(netdev),
330                                       true, tun_type);
331         if (err)
332                 return err;
333
334         ext += sizeof(struct nfp_flower_in_port);
335         msk += sizeof(struct nfp_flower_in_port);
336
337         if (NFP_FLOWER_LAYER_MAC & key_ls->key_layer) {
338                 /* Populate Exact MAC Data. */
339                 nfp_flower_compile_mac((struct nfp_flower_mac_mpls *)ext,
340                                        flow, false);
341                 /* Populate Mask MAC Data. */
342                 nfp_flower_compile_mac((struct nfp_flower_mac_mpls *)msk,
343                                        flow, true);
344                 ext += sizeof(struct nfp_flower_mac_mpls);
345                 msk += sizeof(struct nfp_flower_mac_mpls);
346         }
347
348         if (NFP_FLOWER_LAYER_TP & key_ls->key_layer) {
349                 /* Populate Exact TP Data. */
350                 nfp_flower_compile_tport((struct nfp_flower_tp_ports *)ext,
351                                          flow, false);
352                 /* Populate Mask TP Data. */
353                 nfp_flower_compile_tport((struct nfp_flower_tp_ports *)msk,
354                                          flow, true);
355                 ext += sizeof(struct nfp_flower_tp_ports);
356                 msk += sizeof(struct nfp_flower_tp_ports);
357         }
358
359         if (NFP_FLOWER_LAYER_IPV4 & key_ls->key_layer) {
360                 /* Populate Exact IPv4 Data. */
361                 nfp_flower_compile_ipv4((struct nfp_flower_ipv4 *)ext,
362                                         flow, false);
363                 /* Populate Mask IPv4 Data. */
364                 nfp_flower_compile_ipv4((struct nfp_flower_ipv4 *)msk,
365                                         flow, true);
366                 ext += sizeof(struct nfp_flower_ipv4);
367                 msk += sizeof(struct nfp_flower_ipv4);
368         }
369
370         if (NFP_FLOWER_LAYER_IPV6 & key_ls->key_layer) {
371                 /* Populate Exact IPv4 Data. */
372                 nfp_flower_compile_ipv6((struct nfp_flower_ipv6 *)ext,
373                                         flow, false);
374                 /* Populate Mask IPv4 Data. */
375                 nfp_flower_compile_ipv6((struct nfp_flower_ipv6 *)msk,
376                                         flow, true);
377                 ext += sizeof(struct nfp_flower_ipv6);
378                 msk += sizeof(struct nfp_flower_ipv6);
379         }
380
381         if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN ||
382             key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE) {
383                 __be32 tun_dst;
384
385                 /* Populate Exact VXLAN Data. */
386                 nfp_flower_compile_ipv4_udp_tun((void *)ext, flow, false);
387                 /* Populate Mask VXLAN Data. */
388                 nfp_flower_compile_ipv4_udp_tun((void *)msk, flow, true);
389                 tun_dst = ((struct nfp_flower_ipv4_udp_tun *)ext)->ip_dst;
390                 ext += sizeof(struct nfp_flower_ipv4_udp_tun);
391                 msk += sizeof(struct nfp_flower_ipv4_udp_tun);
392
393                 /* Configure tunnel end point MAC. */
394                 if (nfp_netdev_is_nfp_repr(netdev)) {
395                         netdev_repr = netdev_priv(netdev);
396                         nfp_tunnel_write_macs(netdev_repr->app);
397
398                         /* Store the tunnel destination in the rule data.
399                          * This must be present and be an exact match.
400                          */
401                         nfp_flow->nfp_tun_ipv4_addr = tun_dst;
402                         nfp_tunnel_add_ipv4_off(netdev_repr->app, tun_dst);
403                 }
404         }
405
406         return 0;
407 }