2 * Broadcom Starfighter 2 DSA switch CFP support
4 * Copyright (C) 2016, Broadcom
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
12 #include <linux/list.h>
13 #include <linux/ethtool.h>
14 #include <linux/if_ether.h>
16 #include <linux/netdevice.h>
18 #include <linux/bitmap.h>
21 #include "bcm_sf2_regs.h"
23 struct cfp_udf_slice_layout {
24 u8 slices[UDFS_PER_SLICE];
29 struct cfp_udf_layout {
30 struct cfp_udf_slice_layout udfs[UDF_NUM_SLICES];
33 static const u8 zero_slice[UDFS_PER_SLICE] = { };
35 /* UDF slices layout for a TCPv4/UDPv4 specification */
36 static const struct cfp_udf_layout udf_tcpip4_layout = {
40 /* End of L2, byte offset 12, src IP[0:15] */
42 /* End of L2, byte offset 14, src IP[16:31] */
44 /* End of L2, byte offset 16, dst IP[0:15] */
46 /* End of L2, byte offset 18, dst IP[16:31] */
48 /* End of L3, byte offset 0, src port */
50 /* End of L3, byte offset 2, dst port */
54 .mask_value = L3_FRAMING_MASK | IPPROTO_MASK | IP_FRAG,
55 .base_offset = CORE_UDF_0_A_0_8_PORT_0 + UDF_SLICE_OFFSET,
60 /* UDF slices layout for a TCPv6/UDPv6 specification */
61 static const struct cfp_udf_layout udf_tcpip6_layout = {
65 /* End of L2, byte offset 8, src IP[0:15] */
67 /* End of L2, byte offset 10, src IP[16:31] */
69 /* End of L2, byte offset 12, src IP[32:47] */
71 /* End of L2, byte offset 14, src IP[48:63] */
73 /* End of L2, byte offset 16, src IP[64:79] */
75 /* End of L2, byte offset 18, src IP[80:95] */
77 /* End of L2, byte offset 20, src IP[96:111] */
79 /* End of L2, byte offset 22, src IP[112:127] */
81 /* End of L3, byte offset 0, src port */
84 .mask_value = L3_FRAMING_MASK | IPPROTO_MASK | IP_FRAG,
85 .base_offset = CORE_UDF_0_B_0_8_PORT_0,
89 /* End of L2, byte offset 24, dst IP[0:15] */
91 /* End of L2, byte offset 26, dst IP[16:31] */
93 /* End of L2, byte offset 28, dst IP[32:47] */
95 /* End of L2, byte offset 30, dst IP[48:63] */
97 /* End of L2, byte offset 32, dst IP[64:79] */
99 /* End of L2, byte offset 34, dst IP[80:95] */
101 /* End of L2, byte offset 36, dst IP[96:111] */
103 /* End of L2, byte offset 38, dst IP[112:127] */
105 /* End of L3, byte offset 2, dst port */
108 .mask_value = L3_FRAMING_MASK | IPPROTO_MASK | IP_FRAG,
109 .base_offset = CORE_UDF_0_D_0_11_PORT_0,
114 static inline unsigned int bcm_sf2_get_num_udf_slices(const u8 *layout)
116 unsigned int i, count = 0;
118 for (i = 0; i < UDFS_PER_SLICE; i++) {
126 static inline u32 udf_upper_bits(unsigned int num_udf)
128 return GENMASK(num_udf - 1, 0) >> (UDFS_PER_SLICE - 1);
131 static inline u32 udf_lower_bits(unsigned int num_udf)
133 return (u8)GENMASK(num_udf - 1, 0);
136 static unsigned int bcm_sf2_get_slice_number(const struct cfp_udf_layout *l,
139 const struct cfp_udf_slice_layout *slice_layout;
140 unsigned int slice_idx;
142 for (slice_idx = start; slice_idx < UDF_NUM_SLICES; slice_idx++) {
143 slice_layout = &l->udfs[slice_idx];
144 if (memcmp(slice_layout->slices, zero_slice,
152 static void bcm_sf2_cfp_udf_set(struct bcm_sf2_priv *priv,
153 const struct cfp_udf_layout *layout,
154 unsigned int slice_num)
156 u32 offset = layout->udfs[slice_num].base_offset;
159 for (i = 0; i < UDFS_PER_SLICE; i++)
160 core_writel(priv, layout->udfs[slice_num].slices[i],
164 static int bcm_sf2_cfp_op(struct bcm_sf2_priv *priv, unsigned int op)
166 unsigned int timeout = 1000;
169 reg = core_readl(priv, CORE_CFP_ACC);
170 reg &= ~(OP_SEL_MASK | RAM_SEL_MASK);
171 reg |= OP_STR_DONE | op;
172 core_writel(priv, reg, CORE_CFP_ACC);
175 reg = core_readl(priv, CORE_CFP_ACC);
176 if (!(reg & OP_STR_DONE))
188 static inline void bcm_sf2_cfp_rule_addr_set(struct bcm_sf2_priv *priv,
193 WARN_ON(addr >= priv->num_cfp_rules);
195 reg = core_readl(priv, CORE_CFP_ACC);
196 reg &= ~(XCESS_ADDR_MASK << XCESS_ADDR_SHIFT);
197 reg |= addr << XCESS_ADDR_SHIFT;
198 core_writel(priv, reg, CORE_CFP_ACC);
201 static inline unsigned int bcm_sf2_cfp_rule_size(struct bcm_sf2_priv *priv)
203 /* Entry #0 is reserved */
204 return priv->num_cfp_rules - 1;
207 static int bcm_sf2_cfp_act_pol_set(struct bcm_sf2_priv *priv,
208 unsigned int rule_index,
209 unsigned int port_num,
210 unsigned int queue_num,
216 /* Replace ARL derived destination with DST_MAP derived, define
217 * which port and queue this should be forwarded to.
220 reg = CHANGE_FWRD_MAP_IB_REP_ARL |
221 BIT(port_num + DST_MAP_IB_SHIFT) |
222 CHANGE_TC | queue_num << NEW_TC_SHIFT;
226 core_writel(priv, reg, CORE_ACT_POL_DATA0);
228 /* Set classification ID that needs to be put in Broadcom tag */
229 core_writel(priv, rule_index << CHAIN_ID_SHIFT, CORE_ACT_POL_DATA1);
231 core_writel(priv, 0, CORE_ACT_POL_DATA2);
233 /* Configure policer RAM now */
234 ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | ACT_POL_RAM);
236 pr_err("Policer entry at %d failed\n", rule_index);
240 /* Disable the policer */
241 core_writel(priv, POLICER_MODE_DISABLE, CORE_RATE_METER0);
243 /* Now the rate meter */
244 ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | RATE_METER_RAM);
246 pr_err("Meter entry at %d failed\n", rule_index);
253 static void bcm_sf2_cfp_slice_ipv4(struct bcm_sf2_priv *priv,
254 struct ethtool_tcpip4_spec *v4_spec,
255 unsigned int slice_num,
266 offset = CORE_CFP_MASK_PORT(4);
268 offset = CORE_CFP_DATA_PORT(4);
269 core_writel(priv, reg, offset);
275 reg = be16_to_cpu(v4_spec->pdst) >> 8;
277 offset = CORE_CFP_MASK_PORT(3);
279 offset = CORE_CFP_DATA_PORT(3);
280 core_writel(priv, reg, offset);
286 reg = (be16_to_cpu(v4_spec->pdst) & 0xff) << 24 |
287 (u32)be16_to_cpu(v4_spec->psrc) << 8 |
288 (be32_to_cpu(v4_spec->ip4dst) & 0x0000ff00) >> 8;
290 offset = CORE_CFP_MASK_PORT(2);
292 offset = CORE_CFP_DATA_PORT(2);
293 core_writel(priv, reg, offset);
299 reg = (u32)(be32_to_cpu(v4_spec->ip4dst) & 0xff) << 24 |
300 (u32)(be32_to_cpu(v4_spec->ip4dst) >> 16) << 8 |
301 (be32_to_cpu(v4_spec->ip4src) & 0x0000ff00) >> 8;
303 offset = CORE_CFP_MASK_PORT(1);
305 offset = CORE_CFP_DATA_PORT(1);
306 core_writel(priv, reg, offset);
314 reg = (u32)(be32_to_cpu(v4_spec->ip4src) & 0xff) << 24 |
315 (u32)(be32_to_cpu(v4_spec->ip4src) >> 16) << 8 |
316 SLICE_NUM(slice_num) | SLICE_VALID;
318 offset = CORE_CFP_MASK_PORT(0);
320 offset = CORE_CFP_DATA_PORT(0);
321 core_writel(priv, reg, offset);
324 static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
325 unsigned int port_num,
326 unsigned int queue_num,
327 struct ethtool_rx_flow_spec *fs)
329 struct ethtool_tcpip4_spec *v4_spec, *v4_m_spec;
330 const struct cfp_udf_layout *layout;
331 unsigned int slice_num, rule_index;
332 u8 ip_proto, ip_frag;
337 switch (fs->flow_type & ~FLOW_EXT) {
339 ip_proto = IPPROTO_TCP;
340 v4_spec = &fs->h_u.tcp_ip4_spec;
341 v4_m_spec = &fs->m_u.tcp_ip4_spec;
344 ip_proto = IPPROTO_UDP;
345 v4_spec = &fs->h_u.udp_ip4_spec;
346 v4_m_spec = &fs->m_u.udp_ip4_spec;
352 ip_frag = be32_to_cpu(fs->m_ext.data[0]);
354 /* Locate the first rule available */
355 if (fs->location == RX_CLS_LOC_ANY)
356 rule_index = find_first_zero_bit(priv->cfp.used,
357 priv->num_cfp_rules);
359 rule_index = fs->location;
361 if (rule_index > bcm_sf2_cfp_rule_size(priv))
364 layout = &udf_tcpip4_layout;
365 /* We only use one UDF slice for now */
366 slice_num = bcm_sf2_get_slice_number(layout, 0);
367 if (slice_num == UDF_NUM_SLICES)
370 num_udf = bcm_sf2_get_num_udf_slices(layout->udfs[slice_num].slices);
372 /* Apply the UDF layout for this filter */
373 bcm_sf2_cfp_udf_set(priv, layout, slice_num);
375 /* Apply to all packets received through this port */
376 core_writel(priv, BIT(port), CORE_CFP_DATA_PORT(7));
378 /* Source port map match */
379 core_writel(priv, 0xff, CORE_CFP_MASK_PORT(7));
381 /* S-Tag status [31:30]
382 * C-Tag status [29:28]
395 core_writel(priv, v4_spec->tos << IPTOS_SHIFT |
396 ip_proto << IPPROTO_SHIFT | ip_frag << IP_FRAG_SHIFT |
397 udf_upper_bits(num_udf),
398 CORE_CFP_DATA_PORT(6));
400 /* Mask with the specific layout for IPv4 packets */
401 core_writel(priv, layout->udfs[slice_num].mask_value |
402 udf_upper_bits(num_udf), CORE_CFP_MASK_PORT(6));
404 /* UDF_Valid[7:0] [31:24]
408 core_writel(priv, udf_lower_bits(num_udf) << 24, CORE_CFP_DATA_PORT(5));
410 /* Mask all but valid UDFs */
411 core_writel(priv, udf_lower_bits(num_udf) << 24, CORE_CFP_MASK_PORT(5));
413 /* Program the match and the mask */
414 bcm_sf2_cfp_slice_ipv4(priv, v4_spec, slice_num, false);
415 bcm_sf2_cfp_slice_ipv4(priv, v4_m_spec, SLICE_NUM_MASK, true);
417 /* Insert into TCAM now */
418 bcm_sf2_cfp_rule_addr_set(priv, rule_index);
420 ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL);
422 pr_err("TCAM entry at addr %d failed\n", rule_index);
426 /* Insert into Action and policer RAMs now */
427 ret = bcm_sf2_cfp_act_pol_set(priv, rule_index, port_num,
432 /* Turn on CFP for this rule now */
433 reg = core_readl(priv, CORE_CFP_CTL_REG);
435 core_writel(priv, reg, CORE_CFP_CTL_REG);
437 /* Flag the rule as being used and return it */
438 set_bit(rule_index, priv->cfp.used);
439 set_bit(rule_index, priv->cfp.unique);
440 fs->location = rule_index;
445 static void bcm_sf2_cfp_slice_ipv6(struct bcm_sf2_priv *priv,
446 const __be32 *ip6_addr, const __be16 port,
447 unsigned int slice_num,
450 u32 reg, tmp, val, offset;
453 * UDF_n_B8 [23:8] (port)
454 * UDF_n_B7 (upper) [7:0] (addr[15:8])
456 reg = be32_to_cpu(ip6_addr[3]);
457 val = (u32)be16_to_cpu(port) << 8 | ((reg >> 8) & 0xff);
459 offset = CORE_CFP_MASK_PORT(4);
461 offset = CORE_CFP_DATA_PORT(4);
462 core_writel(priv, val, offset);
464 /* UDF_n_B7 (lower) [31:24] (addr[7:0])
465 * UDF_n_B6 [23:8] (addr[31:16])
466 * UDF_n_B5 (upper) [7:0] (addr[47:40])
468 tmp = be32_to_cpu(ip6_addr[2]);
469 val = (u32)(reg & 0xff) << 24 | (u32)(reg >> 16) << 8 |
472 offset = CORE_CFP_MASK_PORT(3);
474 offset = CORE_CFP_DATA_PORT(3);
475 core_writel(priv, val, offset);
477 /* UDF_n_B5 (lower) [31:24] (addr[39:32])
478 * UDF_n_B4 [23:8] (addr[63:48])
479 * UDF_n_B3 (upper) [7:0] (addr[79:72])
481 reg = be32_to_cpu(ip6_addr[1]);
482 val = (u32)(tmp & 0xff) << 24 | (u32)(tmp >> 16) << 8 |
485 offset = CORE_CFP_MASK_PORT(2);
487 offset = CORE_CFP_DATA_PORT(2);
488 core_writel(priv, val, offset);
490 /* UDF_n_B3 (lower) [31:24] (addr[71:64])
491 * UDF_n_B2 [23:8] (addr[95:80])
492 * UDF_n_B1 (upper) [7:0] (addr[111:104])
494 tmp = be32_to_cpu(ip6_addr[0]);
495 val = (u32)(reg & 0xff) << 24 | (u32)(reg >> 16) << 8 |
498 offset = CORE_CFP_MASK_PORT(1);
500 offset = CORE_CFP_DATA_PORT(1);
501 core_writel(priv, val, offset);
503 /* UDF_n_B1 (lower) [31:24] (addr[103:96])
504 * UDF_n_B0 [23:8] (addr[127:112])
509 reg = (u32)(tmp & 0xff) << 24 | (u32)(tmp >> 16) << 8 |
510 SLICE_NUM(slice_num) | SLICE_VALID;
512 offset = CORE_CFP_MASK_PORT(0);
514 offset = CORE_CFP_DATA_PORT(0);
515 core_writel(priv, reg, offset);
518 static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
519 unsigned int port_num,
520 unsigned int queue_num,
521 struct ethtool_rx_flow_spec *fs)
523 struct ethtool_tcpip6_spec *v6_spec, *v6_m_spec;
524 unsigned int slice_num, rule_index[2];
525 const struct cfp_udf_layout *layout;
526 u8 ip_proto, ip_frag;
531 switch (fs->flow_type & ~FLOW_EXT) {
533 ip_proto = IPPROTO_TCP;
534 v6_spec = &fs->h_u.tcp_ip6_spec;
535 v6_m_spec = &fs->m_u.tcp_ip6_spec;
538 ip_proto = IPPROTO_UDP;
539 v6_spec = &fs->h_u.udp_ip6_spec;
540 v6_m_spec = &fs->m_u.udp_ip6_spec;
546 ip_frag = be32_to_cpu(fs->m_ext.data[0]);
548 layout = &udf_tcpip6_layout;
549 slice_num = bcm_sf2_get_slice_number(layout, 0);
550 if (slice_num == UDF_NUM_SLICES)
553 num_udf = bcm_sf2_get_num_udf_slices(layout->udfs[slice_num].slices);
555 /* Negotiate two indexes, one for the second half which we are chained
556 * from, which is what we will return to user-space, and a second one
557 * which is used to store its first half. That first half does not
558 * allow any choice of placement, so it just needs to find the next
559 * available bit. We return the second half as fs->location because
560 * that helps with the rule lookup later on since the second half is
561 * chained from its first half, we can easily identify IPv6 CFP rules
562 * by looking whether they carry a CHAIN_ID.
564 * We also want the second half to have a lower rule_index than its
565 * first half because the HW search is by incrementing addresses.
567 if (fs->location == RX_CLS_LOC_ANY)
568 rule_index[1] = find_first_zero_bit(priv->cfp.used,
569 priv->num_cfp_rules);
571 rule_index[1] = fs->location;
572 if (rule_index[1] > bcm_sf2_cfp_rule_size(priv))
575 /* Flag it as used (cleared on error path) such that we can immediately
576 * obtain a second one to chain from.
578 set_bit(rule_index[1], priv->cfp.used);
580 rule_index[0] = find_first_zero_bit(priv->cfp.used,
581 priv->num_cfp_rules);
582 if (rule_index[0] > bcm_sf2_cfp_rule_size(priv)) {
587 /* Apply the UDF layout for this filter */
588 bcm_sf2_cfp_udf_set(priv, layout, slice_num);
590 /* Apply to all packets received through this port */
591 core_writel(priv, BIT(port), CORE_CFP_DATA_PORT(7));
593 /* Source port map match */
594 core_writel(priv, 0xff, CORE_CFP_MASK_PORT(7));
596 /* S-Tag status [31:30]
597 * C-Tag status [29:28]
610 reg = 1 << L3_FRAMING_SHIFT | ip_proto << IPPROTO_SHIFT |
611 ip_frag << IP_FRAG_SHIFT | udf_upper_bits(num_udf);
612 core_writel(priv, reg, CORE_CFP_DATA_PORT(6));
614 /* Mask with the specific layout for IPv6 packets including
617 reg = layout->udfs[slice_num].mask_value | udf_upper_bits(num_udf);
618 core_writel(priv, reg, CORE_CFP_MASK_PORT(6));
620 /* UDF_Valid[7:0] [31:24]
624 core_writel(priv, udf_lower_bits(num_udf) << 24, CORE_CFP_DATA_PORT(5));
626 /* Mask all but valid UDFs */
627 core_writel(priv, udf_lower_bits(num_udf) << 24, CORE_CFP_MASK_PORT(5));
629 /* Slice the IPv6 source address and port */
630 bcm_sf2_cfp_slice_ipv6(priv, v6_spec->ip6src, v6_spec->psrc,
632 bcm_sf2_cfp_slice_ipv6(priv, v6_m_spec->ip6src, v6_m_spec->psrc,
633 SLICE_NUM_MASK, true);
635 /* Insert into TCAM now because we need to insert a second rule */
636 bcm_sf2_cfp_rule_addr_set(priv, rule_index[0]);
638 ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL);
640 pr_err("TCAM entry at addr %d failed\n", rule_index[0]);
644 /* Insert into Action and policer RAMs now */
645 ret = bcm_sf2_cfp_act_pol_set(priv, rule_index[0], port_num,
650 /* Now deal with the second slice to chain this rule */
651 slice_num = bcm_sf2_get_slice_number(layout, slice_num + 1);
652 if (slice_num == UDF_NUM_SLICES) {
657 num_udf = bcm_sf2_get_num_udf_slices(layout->udfs[slice_num].slices);
659 /* Apply the UDF layout for this filter */
660 bcm_sf2_cfp_udf_set(priv, layout, slice_num);
662 /* Chained rule, source port match is coming from the rule we are
665 core_writel(priv, 0, CORE_CFP_DATA_PORT(7));
666 core_writel(priv, 0, CORE_CFP_MASK_PORT(7));
669 * CHAIN ID [31:24] chain to previous slice
671 * UDF_Valid[11:8] [19:16]
672 * UDF_Valid[7:0] [15:8]
675 reg = rule_index[0] << 24 | udf_upper_bits(num_udf) << 16 |
676 udf_lower_bits(num_udf) << 8;
677 core_writel(priv, reg, CORE_CFP_DATA_PORT(6));
679 /* Mask all except chain ID, UDF Valid[8] and UDF Valid[7:0] */
680 reg = XCESS_ADDR_MASK << 24 | udf_upper_bits(num_udf) << 16 |
681 udf_lower_bits(num_udf) << 8;
682 core_writel(priv, reg, CORE_CFP_MASK_PORT(6));
685 core_writel(priv, 0, CORE_CFP_DATA_PORT(5));
688 core_writel(priv, 0, CORE_CFP_MASK_PORT(5));
690 bcm_sf2_cfp_slice_ipv6(priv, v6_spec->ip6dst, v6_spec->pdst, slice_num,
692 bcm_sf2_cfp_slice_ipv6(priv, v6_m_spec->ip6dst, v6_m_spec->pdst,
693 SLICE_NUM_MASK, true);
695 /* Insert into TCAM now */
696 bcm_sf2_cfp_rule_addr_set(priv, rule_index[1]);
698 ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL);
700 pr_err("TCAM entry at addr %d failed\n", rule_index[1]);
704 /* Insert into Action and policer RAMs now, set chain ID to
705 * the one we are chained to
707 ret = bcm_sf2_cfp_act_pol_set(priv, rule_index[1], port_num,
712 /* Turn on CFP for this rule now */
713 reg = core_readl(priv, CORE_CFP_CTL_REG);
715 core_writel(priv, reg, CORE_CFP_CTL_REG);
717 /* Flag the second half rule as being used now, return it as the
718 * location, and flag it as unique while dumping rules
720 set_bit(rule_index[0], priv->cfp.used);
721 set_bit(rule_index[1], priv->cfp.unique);
722 fs->location = rule_index[1];
727 clear_bit(rule_index[1], priv->cfp.used);
731 static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port,
732 struct ethtool_rx_flow_spec *fs)
734 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
735 unsigned int queue_num, port_num;
738 /* Check for unsupported extensions */
739 if ((fs->flow_type & FLOW_EXT) && (fs->m_ext.vlan_etype ||
743 if (fs->location != RX_CLS_LOC_ANY &&
744 test_bit(fs->location, priv->cfp.used))
747 if (fs->location != RX_CLS_LOC_ANY &&
748 fs->location > bcm_sf2_cfp_rule_size(priv))
751 /* We do not support discarding packets, check that the
752 * destination port is enabled and that we are within the
753 * number of ports supported by the switch
755 port_num = fs->ring_cookie / SF2_NUM_EGRESS_QUEUES;
757 if (fs->ring_cookie == RX_CLS_FLOW_DISC ||
758 !dsa_is_user_port(ds, port_num) ||
759 port_num >= priv->hw_params.num_ports)
762 * We have a small oddity where Port 6 just does not have a
763 * valid bit here (so we substract by one).
765 queue_num = fs->ring_cookie % SF2_NUM_EGRESS_QUEUES;
769 switch (fs->flow_type & ~FLOW_EXT) {
772 ret = bcm_sf2_cfp_ipv4_rule_set(priv, port, port_num,
777 ret = bcm_sf2_cfp_ipv6_rule_set(priv, port, port_num,
787 static int bcm_sf2_cfp_rule_del_one(struct bcm_sf2_priv *priv, int port,
788 u32 loc, u32 *next_loc)
793 /* Indicate which rule we want to read */
794 bcm_sf2_cfp_rule_addr_set(priv, loc);
796 ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | TCAM_SEL);
800 /* Check if this is possibly an IPv6 rule that would
801 * indicate we need to delete its companion rule
804 reg = core_readl(priv, CORE_CFP_DATA_PORT(6));
806 *next_loc = (reg >> 24) & CHAIN_ID_MASK;
808 /* Clear its valid bits */
809 reg = core_readl(priv, CORE_CFP_DATA_PORT(0));
811 core_writel(priv, reg, CORE_CFP_DATA_PORT(0));
813 /* Write back this entry into the TCAM now */
814 ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL);
818 clear_bit(loc, priv->cfp.used);
819 clear_bit(loc, priv->cfp.unique);
824 static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port,
830 /* Refuse deleting unused rules, and those that are not unique since
831 * that could leave IPv6 rules with one of the chained rule in the
834 if (!test_bit(loc, priv->cfp.unique) || loc == 0)
837 ret = bcm_sf2_cfp_rule_del_one(priv, port, loc, &next_loc);
841 /* If this was an IPv6 rule, delete is companion rule too */
843 ret = bcm_sf2_cfp_rule_del_one(priv, port, next_loc, NULL);
848 static void bcm_sf2_invert_masks(struct ethtool_rx_flow_spec *flow)
852 for (i = 0; i < sizeof(flow->m_u); i++)
853 flow->m_u.hdata[i] ^= 0xff;
855 flow->m_ext.vlan_etype ^= cpu_to_be16(~0);
856 flow->m_ext.vlan_tci ^= cpu_to_be16(~0);
857 flow->m_ext.data[0] ^= cpu_to_be32(~0);
858 flow->m_ext.data[1] ^= cpu_to_be32(~0);
861 static int bcm_sf2_cfp_unslice_ipv4(struct bcm_sf2_priv *priv,
862 struct ethtool_tcpip4_spec *v4_spec,
865 u32 reg, offset, ipv4;
869 offset = CORE_CFP_MASK_PORT(3);
871 offset = CORE_CFP_DATA_PORT(3);
873 reg = core_readl(priv, offset);
874 /* src port [15:8] */
875 src_dst_port = reg << 8;
878 offset = CORE_CFP_MASK_PORT(2);
880 offset = CORE_CFP_DATA_PORT(2);
882 reg = core_readl(priv, offset);
884 src_dst_port |= (reg >> 24);
886 v4_spec->pdst = cpu_to_be16(src_dst_port);
887 v4_spec->psrc = cpu_to_be16((u16)(reg >> 8));
889 /* IPv4 dst [15:8] */
890 ipv4 = (reg & 0xff) << 8;
893 offset = CORE_CFP_MASK_PORT(1);
895 offset = CORE_CFP_DATA_PORT(1);
897 reg = core_readl(priv, offset);
898 /* IPv4 dst [31:16] */
899 ipv4 |= ((reg >> 8) & 0xffff) << 16;
901 ipv4 |= (reg >> 24) & 0xff;
902 v4_spec->ip4dst = cpu_to_be32(ipv4);
904 /* IPv4 src [15:8] */
905 ipv4 = (reg & 0xff) << 8;
908 offset = CORE_CFP_MASK_PORT(0);
910 offset = CORE_CFP_DATA_PORT(0);
911 reg = core_readl(priv, offset);
913 /* Once the TCAM is programmed, the mask reflects the slice number
914 * being matched, don't bother checking it when reading back the
917 if (!mask && !(reg & SLICE_VALID))
921 ipv4 |= (reg >> 24) & 0xff;
922 /* IPv4 src [31:16] */
923 ipv4 |= ((reg >> 8) & 0xffff) << 16;
924 v4_spec->ip4src = cpu_to_be32(ipv4);
929 static int bcm_sf2_cfp_ipv4_rule_get(struct bcm_sf2_priv *priv, int port,
930 struct ethtool_rx_flow_spec *fs)
932 struct ethtool_tcpip4_spec *v4_spec = NULL, *v4_m_spec = NULL;
936 reg = core_readl(priv, CORE_CFP_DATA_PORT(6));
938 switch ((reg & IPPROTO_MASK) >> IPPROTO_SHIFT) {
940 fs->flow_type = TCP_V4_FLOW;
941 v4_spec = &fs->h_u.tcp_ip4_spec;
942 v4_m_spec = &fs->m_u.tcp_ip4_spec;
945 fs->flow_type = UDP_V4_FLOW;
946 v4_spec = &fs->h_u.udp_ip4_spec;
947 v4_m_spec = &fs->m_u.udp_ip4_spec;
953 fs->m_ext.data[0] = cpu_to_be32((reg >> IP_FRAG_SHIFT) & 1);
954 v4_spec->tos = (reg >> IPTOS_SHIFT) & IPTOS_MASK;
956 ret = bcm_sf2_cfp_unslice_ipv4(priv, v4_spec, false);
960 return bcm_sf2_cfp_unslice_ipv4(priv, v4_m_spec, true);
963 static int bcm_sf2_cfp_unslice_ipv6(struct bcm_sf2_priv *priv,
964 __be32 *ip6_addr, __be16 *port,
967 u32 reg, tmp, offset;
970 * UDF_n_B8 [23:8] (port)
971 * UDF_n_B7 (upper) [7:0] (addr[15:8])
974 offset = CORE_CFP_MASK_PORT(4);
976 offset = CORE_CFP_DATA_PORT(4);
977 reg = core_readl(priv, offset);
978 *port = cpu_to_be32(reg) >> 8;
979 tmp = (u32)(reg & 0xff) << 8;
981 /* UDF_n_B7 (lower) [31:24] (addr[7:0])
982 * UDF_n_B6 [23:8] (addr[31:16])
983 * UDF_n_B5 (upper) [7:0] (addr[47:40])
986 offset = CORE_CFP_MASK_PORT(3);
988 offset = CORE_CFP_DATA_PORT(3);
989 reg = core_readl(priv, offset);
990 tmp |= (reg >> 24) & 0xff;
991 tmp |= (u32)((reg >> 8) << 16);
992 ip6_addr[3] = cpu_to_be32(tmp);
993 tmp = (u32)(reg & 0xff) << 8;
995 /* UDF_n_B5 (lower) [31:24] (addr[39:32])
996 * UDF_n_B4 [23:8] (addr[63:48])
997 * UDF_n_B3 (upper) [7:0] (addr[79:72])
1000 offset = CORE_CFP_MASK_PORT(2);
1002 offset = CORE_CFP_DATA_PORT(2);
1003 reg = core_readl(priv, offset);
1004 tmp |= (reg >> 24) & 0xff;
1005 tmp |= (u32)((reg >> 8) << 16);
1006 ip6_addr[2] = cpu_to_be32(tmp);
1007 tmp = (u32)(reg & 0xff) << 8;
1009 /* UDF_n_B3 (lower) [31:24] (addr[71:64])
1010 * UDF_n_B2 [23:8] (addr[95:80])
1011 * UDF_n_B1 (upper) [7:0] (addr[111:104])
1014 offset = CORE_CFP_MASK_PORT(1);
1016 offset = CORE_CFP_DATA_PORT(1);
1017 reg = core_readl(priv, offset);
1018 tmp |= (reg >> 24) & 0xff;
1019 tmp |= (u32)((reg >> 8) << 16);
1020 ip6_addr[1] = cpu_to_be32(tmp);
1021 tmp = (u32)(reg & 0xff) << 8;
1023 /* UDF_n_B1 (lower) [31:24] (addr[103:96])
1024 * UDF_n_B0 [23:8] (addr[127:112])
1030 offset = CORE_CFP_MASK_PORT(0);
1032 offset = CORE_CFP_DATA_PORT(0);
1033 reg = core_readl(priv, offset);
1034 tmp |= (reg >> 24) & 0xff;
1035 tmp |= (u32)((reg >> 8) << 16);
1036 ip6_addr[0] = cpu_to_be32(tmp);
1038 if (!mask && !(reg & SLICE_VALID))
1044 static int bcm_sf2_cfp_ipv6_rule_get(struct bcm_sf2_priv *priv, int port,
1045 struct ethtool_rx_flow_spec *fs,
1048 struct ethtool_tcpip6_spec *v6_spec = NULL, *v6_m_spec = NULL;
1052 /* UDPv6 and TCPv6 both use ethtool_tcpip6_spec so we are fine
1053 * assuming tcp_ip6_spec here being an union.
1055 v6_spec = &fs->h_u.tcp_ip6_spec;
1056 v6_m_spec = &fs->m_u.tcp_ip6_spec;
1058 /* Read the second half first */
1059 ret = bcm_sf2_cfp_unslice_ipv6(priv, v6_spec->ip6dst, &v6_spec->pdst,
1064 ret = bcm_sf2_cfp_unslice_ipv6(priv, v6_m_spec->ip6dst,
1065 &v6_m_spec->pdst, true);
1069 /* Read last to avoid next entry clobbering the results during search
1070 * operations. We would not have the port enabled for this rule, so
1071 * don't bother checking it.
1073 (void)core_readl(priv, CORE_CFP_DATA_PORT(7));
1075 /* The slice number is valid, so read the rule we are chained from now
1076 * which is our first half.
1078 bcm_sf2_cfp_rule_addr_set(priv, next_loc);
1079 ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | TCAM_SEL);
1083 reg = core_readl(priv, CORE_CFP_DATA_PORT(6));
1085 switch ((reg & IPPROTO_MASK) >> IPPROTO_SHIFT) {
1087 fs->flow_type = TCP_V6_FLOW;
1090 fs->flow_type = UDP_V6_FLOW;
1096 ret = bcm_sf2_cfp_unslice_ipv6(priv, v6_spec->ip6src, &v6_spec->psrc,
1101 return bcm_sf2_cfp_unslice_ipv6(priv, v6_m_spec->ip6src,
1102 &v6_m_spec->psrc, true);
1105 static int bcm_sf2_cfp_rule_get(struct bcm_sf2_priv *priv, int port,
1106 struct ethtool_rxnfc *nfc)
1108 u32 reg, ipv4_or_chain_id;
1109 unsigned int queue_num;
1112 bcm_sf2_cfp_rule_addr_set(priv, nfc->fs.location);
1114 ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | ACT_POL_RAM);
1118 reg = core_readl(priv, CORE_ACT_POL_DATA0);
1120 ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | TCAM_SEL);
1124 /* Extract the destination port */
1125 nfc->fs.ring_cookie = fls((reg >> DST_MAP_IB_SHIFT) &
1126 DST_MAP_IB_MASK) - 1;
1128 /* There is no Port 6, so we compensate for that here */
1129 if (nfc->fs.ring_cookie >= 6)
1130 nfc->fs.ring_cookie++;
1131 nfc->fs.ring_cookie *= SF2_NUM_EGRESS_QUEUES;
1133 /* Extract the destination queue */
1134 queue_num = (reg >> NEW_TC_SHIFT) & NEW_TC_MASK;
1135 nfc->fs.ring_cookie += queue_num;
1137 /* Extract the L3_FRAMING or CHAIN_ID */
1138 reg = core_readl(priv, CORE_CFP_DATA_PORT(6));
1140 /* With IPv6 rules this would contain a non-zero chain ID since
1141 * we reserve entry 0 and it cannot be used. So if we read 0 here
1142 * this means an IPv4 rule.
1144 ipv4_or_chain_id = (reg >> L3_FRAMING_SHIFT) & 0xff;
1145 if (ipv4_or_chain_id == 0)
1146 ret = bcm_sf2_cfp_ipv4_rule_get(priv, port, &nfc->fs);
1148 ret = bcm_sf2_cfp_ipv6_rule_get(priv, port, &nfc->fs,
1153 /* Read last to avoid next entry clobbering the results during search
1156 reg = core_readl(priv, CORE_CFP_DATA_PORT(7));
1157 if (!(reg & 1 << port))
1160 bcm_sf2_invert_masks(&nfc->fs);
1162 /* Put the TCAM size here */
1163 nfc->data = bcm_sf2_cfp_rule_size(priv);
1168 /* We implement the search doing a TCAM search operation */
1169 static int bcm_sf2_cfp_rule_get_all(struct bcm_sf2_priv *priv,
1170 int port, struct ethtool_rxnfc *nfc,
1173 unsigned int index = 1, rules_cnt = 0;
1175 for_each_set_bit_from(index, priv->cfp.unique, priv->num_cfp_rules) {
1176 rule_locs[rules_cnt] = index;
1180 /* Put the TCAM size here */
1181 nfc->data = bcm_sf2_cfp_rule_size(priv);
1182 nfc->rule_cnt = rules_cnt;
1187 int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port,
1188 struct ethtool_rxnfc *nfc, u32 *rule_locs)
1190 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
1193 mutex_lock(&priv->cfp.lock);
1196 case ETHTOOL_GRXCLSRLCNT:
1197 /* Subtract the default, unusable rule */
1198 nfc->rule_cnt = bitmap_weight(priv->cfp.unique,
1199 priv->num_cfp_rules) - 1;
1200 /* We support specifying rule locations */
1201 nfc->data |= RX_CLS_LOC_SPECIAL;
1203 case ETHTOOL_GRXCLSRULE:
1204 ret = bcm_sf2_cfp_rule_get(priv, port, nfc);
1206 case ETHTOOL_GRXCLSRLALL:
1207 ret = bcm_sf2_cfp_rule_get_all(priv, port, nfc, rule_locs);
1214 mutex_unlock(&priv->cfp.lock);
1219 int bcm_sf2_set_rxnfc(struct dsa_switch *ds, int port,
1220 struct ethtool_rxnfc *nfc)
1222 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
1225 mutex_lock(&priv->cfp.lock);
1228 case ETHTOOL_SRXCLSRLINS:
1229 ret = bcm_sf2_cfp_rule_set(ds, port, &nfc->fs);
1232 case ETHTOOL_SRXCLSRLDEL:
1233 ret = bcm_sf2_cfp_rule_del(priv, port, nfc->fs.location);
1240 mutex_unlock(&priv->cfp.lock);
1245 int bcm_sf2_cfp_rst(struct bcm_sf2_priv *priv)
1247 unsigned int timeout = 1000;
1250 reg = core_readl(priv, CORE_CFP_ACC);
1252 core_writel(priv, reg, CORE_CFP_ACC);
1255 reg = core_readl(priv, CORE_CFP_ACC);
1256 if (!(reg & TCAM_RESET))
1260 } while (timeout--);