1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */
4 #include <linux/bitfield.h>
5 #include <net/pkt_cls.h>
11 nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *ext,
12 struct nfp_flower_meta_tci *msk,
13 struct tc_cls_flower_offload *flow, u8 key_type)
15 struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
18 memset(ext, 0, sizeof(struct nfp_flower_meta_tci));
19 memset(msk, 0, sizeof(struct nfp_flower_meta_tci));
21 /* Populate the metadata frame. */
22 ext->nfp_flow_key_layer = key_type;
25 msk->nfp_flow_key_layer = key_type;
28 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
29 struct flow_match_vlan match;
31 flow_rule_match_vlan(rule, &match);
32 /* Populate the tci field. */
33 tmp_tci = NFP_FLOWER_MASK_VLAN_PRESENT;
34 tmp_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
35 match.key->vlan_priority) |
36 FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
38 ext->tci = cpu_to_be16(tmp_tci);
40 tmp_tci = NFP_FLOWER_MASK_VLAN_PRESENT;
41 tmp_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
42 match.mask->vlan_priority) |
43 FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
45 msk->tci = cpu_to_be16(tmp_tci);
50 nfp_flower_compile_ext_meta(struct nfp_flower_ext_meta *frame, u32 key_ext)
52 frame->nfp_flow_key_layer2 = cpu_to_be32(key_ext);
56 nfp_flower_compile_port(struct nfp_flower_in_port *frame, u32 cmsg_port,
57 bool mask_version, enum nfp_flower_tun_type tun_type)
60 frame->in_port = cpu_to_be32(~0);
65 frame->in_port = cpu_to_be32(NFP_FL_PORT_TYPE_TUN | tun_type);
69 frame->in_port = cpu_to_be32(cmsg_port);
76 nfp_flower_compile_mac(struct nfp_flower_mac_mpls *ext,
77 struct nfp_flower_mac_mpls *msk,
78 struct tc_cls_flower_offload *flow)
80 struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
82 memset(ext, 0, sizeof(struct nfp_flower_mac_mpls));
83 memset(msk, 0, sizeof(struct nfp_flower_mac_mpls));
85 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
86 struct flow_match_eth_addrs match;
88 flow_rule_match_eth_addrs(rule, &match);
89 /* Populate mac frame. */
90 ether_addr_copy(ext->mac_dst, &match.key->dst[0]);
91 ether_addr_copy(ext->mac_src, &match.key->src[0]);
92 ether_addr_copy(msk->mac_dst, &match.mask->dst[0]);
93 ether_addr_copy(msk->mac_src, &match.mask->src[0]);
96 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS)) {
97 struct flow_match_mpls match;
100 flow_rule_match_mpls(rule, &match);
101 t_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB, match.key->mpls_label) |
102 FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC, match.key->mpls_tc) |
103 FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS, match.key->mpls_bos) |
104 NFP_FLOWER_MASK_MPLS_Q;
105 ext->mpls_lse = cpu_to_be32(t_mpls);
106 t_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB, match.mask->mpls_label) |
107 FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC, match.mask->mpls_tc) |
108 FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS, match.mask->mpls_bos) |
109 NFP_FLOWER_MASK_MPLS_Q;
110 msk->mpls_lse = cpu_to_be32(t_mpls);
111 } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
112 /* Check for mpls ether type and set NFP_FLOWER_MASK_MPLS_Q
113 * bit, which indicates an mpls ether type but without any
116 struct flow_match_basic match;
118 flow_rule_match_basic(rule, &match);
119 if (match.key->n_proto == cpu_to_be16(ETH_P_MPLS_UC) ||
120 match.key->n_proto == cpu_to_be16(ETH_P_MPLS_MC)) {
121 ext->mpls_lse = cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q);
122 msk->mpls_lse = cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q);
128 nfp_flower_compile_tport(struct nfp_flower_tp_ports *ext,
129 struct nfp_flower_tp_ports *msk,
130 struct tc_cls_flower_offload *flow)
132 struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
134 memset(ext, 0, sizeof(struct nfp_flower_tp_ports));
135 memset(msk, 0, sizeof(struct nfp_flower_tp_ports));
137 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
138 struct flow_match_ports match;
140 flow_rule_match_ports(rule, &match);
141 ext->port_src = match.key->src;
142 ext->port_dst = match.key->dst;
143 msk->port_src = match.mask->src;
144 msk->port_dst = match.mask->dst;
149 nfp_flower_compile_ip_ext(struct nfp_flower_ip_ext *ext,
150 struct nfp_flower_ip_ext *msk,
151 struct tc_cls_flower_offload *flow)
153 struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
155 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
156 struct flow_match_basic match;
158 flow_rule_match_basic(rule, &match);
159 ext->proto = match.key->ip_proto;
160 msk->proto = match.mask->ip_proto;
163 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
164 struct flow_match_ip match;
166 flow_rule_match_ip(rule, &match);
167 ext->tos = match.key->tos;
168 ext->ttl = match.key->ttl;
169 msk->tos = match.mask->tos;
170 msk->ttl = match.mask->ttl;
173 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) {
174 u16 tcp_flags, tcp_flags_mask;
175 struct flow_match_tcp match;
177 flow_rule_match_tcp(rule, &match);
178 tcp_flags = be16_to_cpu(match.key->flags);
179 tcp_flags_mask = be16_to_cpu(match.mask->flags);
181 if (tcp_flags & TCPHDR_FIN)
182 ext->flags |= NFP_FL_TCP_FLAG_FIN;
183 if (tcp_flags_mask & TCPHDR_FIN)
184 msk->flags |= NFP_FL_TCP_FLAG_FIN;
186 if (tcp_flags & TCPHDR_SYN)
187 ext->flags |= NFP_FL_TCP_FLAG_SYN;
188 if (tcp_flags_mask & TCPHDR_SYN)
189 msk->flags |= NFP_FL_TCP_FLAG_SYN;
191 if (tcp_flags & TCPHDR_RST)
192 ext->flags |= NFP_FL_TCP_FLAG_RST;
193 if (tcp_flags_mask & TCPHDR_RST)
194 msk->flags |= NFP_FL_TCP_FLAG_RST;
196 if (tcp_flags & TCPHDR_PSH)
197 ext->flags |= NFP_FL_TCP_FLAG_PSH;
198 if (tcp_flags_mask & TCPHDR_PSH)
199 msk->flags |= NFP_FL_TCP_FLAG_PSH;
201 if (tcp_flags & TCPHDR_URG)
202 ext->flags |= NFP_FL_TCP_FLAG_URG;
203 if (tcp_flags_mask & TCPHDR_URG)
204 msk->flags |= NFP_FL_TCP_FLAG_URG;
207 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
208 struct flow_match_control match;
210 flow_rule_match_control(rule, &match);
211 if (match.key->flags & FLOW_DIS_IS_FRAGMENT)
212 ext->flags |= NFP_FL_IP_FRAGMENTED;
213 if (match.mask->flags & FLOW_DIS_IS_FRAGMENT)
214 msk->flags |= NFP_FL_IP_FRAGMENTED;
215 if (match.key->flags & FLOW_DIS_FIRST_FRAG)
216 ext->flags |= NFP_FL_IP_FRAG_FIRST;
217 if (match.mask->flags & FLOW_DIS_FIRST_FRAG)
218 msk->flags |= NFP_FL_IP_FRAG_FIRST;
223 nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *ext,
224 struct nfp_flower_ipv4 *msk,
225 struct tc_cls_flower_offload *flow)
227 struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
228 struct flow_match_ipv4_addrs match;
230 memset(ext, 0, sizeof(struct nfp_flower_ipv4));
231 memset(msk, 0, sizeof(struct nfp_flower_ipv4));
233 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
234 flow_rule_match_ipv4_addrs(rule, &match);
235 ext->ipv4_src = match.key->src;
236 ext->ipv4_dst = match.key->dst;
237 msk->ipv4_src = match.mask->src;
238 msk->ipv4_dst = match.mask->dst;
241 nfp_flower_compile_ip_ext(&ext->ip_ext, &msk->ip_ext, flow);
245 nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *ext,
246 struct nfp_flower_ipv6 *msk,
247 struct tc_cls_flower_offload *flow)
249 struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
251 memset(ext, 0, sizeof(struct nfp_flower_ipv6));
252 memset(msk, 0, sizeof(struct nfp_flower_ipv6));
254 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
255 struct flow_match_ipv6_addrs match;
257 flow_rule_match_ipv6_addrs(rule, &match);
258 ext->ipv6_src = match.key->src;
259 ext->ipv6_dst = match.key->dst;
260 msk->ipv6_src = match.mask->src;
261 msk->ipv6_dst = match.mask->dst;
264 nfp_flower_compile_ip_ext(&ext->ip_ext, &msk->ip_ext, flow);
268 nfp_flower_compile_geneve_opt(void *ext, void *msk,
269 struct tc_cls_flower_offload *flow)
271 struct flow_match_enc_opts match;
273 flow_rule_match_enc_opts(flow->rule, &match);
274 memcpy(ext, match.key->data, match.key->len);
275 memcpy(msk, match.mask->data, match.mask->len);
281 nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *ext,
282 struct nfp_flower_ipv4_udp_tun *msk,
283 struct tc_cls_flower_offload *flow)
285 struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
287 memset(ext, 0, sizeof(struct nfp_flower_ipv4_udp_tun));
288 memset(msk, 0, sizeof(struct nfp_flower_ipv4_udp_tun));
290 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
291 struct flow_match_enc_keyid match;
294 flow_rule_match_enc_keyid(rule, &match);
295 temp_vni = be32_to_cpu(match.key->keyid) << NFP_FL_TUN_VNI_OFFSET;
296 ext->tun_id = cpu_to_be32(temp_vni);
297 temp_vni = be32_to_cpu(match.mask->keyid) << NFP_FL_TUN_VNI_OFFSET;
298 msk->tun_id = cpu_to_be32(temp_vni);
301 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
302 struct flow_match_ipv4_addrs match;
304 flow_rule_match_enc_ipv4_addrs(rule, &match);
305 ext->ip_src = match.key->src;
306 ext->ip_dst = match.key->dst;
307 msk->ip_src = match.mask->src;
308 msk->ip_dst = match.mask->dst;
311 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) {
312 struct flow_match_ip match;
314 flow_rule_match_enc_ip(rule, &match);
315 ext->tos = match.key->tos;
316 ext->ttl = match.key->ttl;
317 msk->tos = match.mask->tos;
318 msk->ttl = match.mask->ttl;
322 int nfp_flower_compile_flow_match(struct nfp_app *app,
323 struct tc_cls_flower_offload *flow,
324 struct nfp_fl_key_ls *key_ls,
325 struct net_device *netdev,
326 struct nfp_fl_payload *nfp_flow,
327 enum nfp_flower_tun_type tun_type)
334 if (nfp_netdev_is_nfp_repr(netdev))
335 cmsg_port = nfp_repr_get_port_id(netdev);
337 memset(nfp_flow->unmasked_data, 0, key_ls->key_size);
338 memset(nfp_flow->mask_data, 0, key_ls->key_size);
340 ext = nfp_flow->unmasked_data;
341 msk = nfp_flow->mask_data;
343 nfp_flower_compile_meta_tci((struct nfp_flower_meta_tci *)ext,
344 (struct nfp_flower_meta_tci *)msk,
345 flow, key_ls->key_layer);
346 ext += sizeof(struct nfp_flower_meta_tci);
347 msk += sizeof(struct nfp_flower_meta_tci);
349 /* Populate Extended Metadata if Required. */
350 if (NFP_FLOWER_LAYER_EXT_META & key_ls->key_layer) {
351 nfp_flower_compile_ext_meta((struct nfp_flower_ext_meta *)ext,
352 key_ls->key_layer_two);
353 nfp_flower_compile_ext_meta((struct nfp_flower_ext_meta *)msk,
354 key_ls->key_layer_two);
355 ext += sizeof(struct nfp_flower_ext_meta);
356 msk += sizeof(struct nfp_flower_ext_meta);
359 /* Populate Exact Port data. */
360 err = nfp_flower_compile_port((struct nfp_flower_in_port *)ext,
361 cmsg_port, false, tun_type);
365 /* Populate Mask Port Data. */
366 err = nfp_flower_compile_port((struct nfp_flower_in_port *)msk,
367 cmsg_port, true, tun_type);
371 ext += sizeof(struct nfp_flower_in_port);
372 msk += sizeof(struct nfp_flower_in_port);
374 if (NFP_FLOWER_LAYER_MAC & key_ls->key_layer) {
375 nfp_flower_compile_mac((struct nfp_flower_mac_mpls *)ext,
376 (struct nfp_flower_mac_mpls *)msk,
378 ext += sizeof(struct nfp_flower_mac_mpls);
379 msk += sizeof(struct nfp_flower_mac_mpls);
382 if (NFP_FLOWER_LAYER_TP & key_ls->key_layer) {
383 nfp_flower_compile_tport((struct nfp_flower_tp_ports *)ext,
384 (struct nfp_flower_tp_ports *)msk,
386 ext += sizeof(struct nfp_flower_tp_ports);
387 msk += sizeof(struct nfp_flower_tp_ports);
390 if (NFP_FLOWER_LAYER_IPV4 & key_ls->key_layer) {
391 nfp_flower_compile_ipv4((struct nfp_flower_ipv4 *)ext,
392 (struct nfp_flower_ipv4 *)msk,
394 ext += sizeof(struct nfp_flower_ipv4);
395 msk += sizeof(struct nfp_flower_ipv4);
398 if (NFP_FLOWER_LAYER_IPV6 & key_ls->key_layer) {
399 nfp_flower_compile_ipv6((struct nfp_flower_ipv6 *)ext,
400 (struct nfp_flower_ipv6 *)msk,
402 ext += sizeof(struct nfp_flower_ipv6);
403 msk += sizeof(struct nfp_flower_ipv6);
406 if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN ||
407 key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE) {
410 nfp_flower_compile_ipv4_udp_tun((void *)ext, (void *)msk, flow);
411 tun_dst = ((struct nfp_flower_ipv4_udp_tun *)ext)->ip_dst;
412 ext += sizeof(struct nfp_flower_ipv4_udp_tun);
413 msk += sizeof(struct nfp_flower_ipv4_udp_tun);
415 /* Store the tunnel destination in the rule data.
416 * This must be present and be an exact match.
418 nfp_flow->nfp_tun_ipv4_addr = tun_dst;
419 nfp_tunnel_add_ipv4_off(app, tun_dst);
421 if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE_OP) {
422 err = nfp_flower_compile_geneve_opt(ext, msk, flow);