dpaa2-eth: Add flow steering support without masking
authorIoana Ciocoi Radulescu <ruxandra.radulescu@nxp.com>
Tue, 16 Apr 2019 17:13:30 +0000 (17:13 +0000)
committerDavid S. Miller <davem@davemloft.net>
Wed, 17 Apr 2019 04:46:19 +0000 (21:46 -0700)
On platforms that lack a TCAM (like LS1088A), masking of
flow steering keys is not supported. Until now we didn't
offer flow steering capabilities at all on these platforms,
since our driver implementation configured a "comprehensive"
FS key (containing all supported header fields), with masks
used to ignore the fields not present in the rules provided
by the user.

We now allow ethtool rules that share a common key (i.e. have
the same header fields). The FS key is now kept in the driver
private data and initialized when the first rule is added to
an empty table, rather than at probe time. If a rule with a new
composition key is wanted, the user must first manually delete
all previous rules.

When building a FS table entry to pass to firmware, we still use
the old building algorithm, which assumes an all-supported-fields
key, and later collapse the fields which aren't actually needed.

Masked rules are not supported; if provided, the mask value
will be ignored. For firmware versions older than MC10.7.0
(that only offer the legacy ABIs for configuring distribution
keys) flow steering without masking support remains unavailable.

Signed-off-by: Ioana Radulescu <ruxandra.radulescu@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c

index 828bca2f32b6a02b6b786a6b9a962e12838877c0..63b1ecc18c26f7833988394835ee482a844ff6b8 100644 (file)
@@ -2692,12 +2692,15 @@ static int config_cls_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
 }
 
 /* Size of the Rx flow classification key */
-int dpaa2_eth_cls_key_size(void)
+int dpaa2_eth_cls_key_size(u64 fields)
 {
        int i, size = 0;
 
-       for (i = 0; i < ARRAY_SIZE(dist_fields); i++)
+       for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
+               if (!(fields & dist_fields[i].id))
+                       continue;
                size += dist_fields[i].size;
+       }
 
        return size;
 }
@@ -2718,6 +2721,24 @@ int dpaa2_eth_cls_fld_off(int prot, int field)
        return 0;
 }
 
+/* Prune unused fields from the classification rule.
+ * Used when masking is not supported
+ */
+void dpaa2_eth_cls_trim_rule(void *key_mem, u64 fields)
+{
+       int off = 0, new_off = 0;
+       int i, size;
+
+       for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
+               size = dist_fields[i].size;
+               if (dist_fields[i].id & fields) {
+                       memcpy(key_mem + new_off, key_mem + off, size);
+                       new_off += size;
+               }
+               off += size;
+       }
+}
+
 /* Set Rx distribution (hash or flow classification) key
  * flags is a combination of RXH_ bits
  */
@@ -2739,14 +2760,13 @@ static int dpaa2_eth_set_dist_key(struct net_device *net_dev,
                struct dpkg_extract *key =
                        &cls_cfg.extracts[cls_cfg.num_extracts];
 
-               /* For Rx hashing key we set only the selected fields.
-                * For Rx flow classification key we set all supported fields
+               /* For both Rx hashing and classification keys
+                * we set only the selected fields.
                 */
-               if (type == DPAA2_ETH_RX_DIST_HASH) {
-                       if (!(flags & dist_fields[i].id))
-                               continue;
+               if (!(flags & dist_fields[i].id))
+                       continue;
+               if (type == DPAA2_ETH_RX_DIST_HASH)
                        rx_hash_fields |= dist_fields[i].rxnfc_field;
-               }
 
                if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
                        dev_err(dev, "error adding key extraction rule, too many rules?\n");
@@ -2814,7 +2834,12 @@ int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags)
        return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_HASH, key);
 }
 
-static int dpaa2_eth_set_cls(struct dpaa2_eth_priv *priv)
+int dpaa2_eth_set_cls(struct net_device *net_dev, u64 flags)
+{
+       return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_CLS, flags);
+}
+
+static int dpaa2_eth_set_default_cls(struct dpaa2_eth_priv *priv)
 {
        struct device *dev = priv->net_dev->dev.parent;
        int err;
@@ -2825,7 +2850,7 @@ static int dpaa2_eth_set_cls(struct dpaa2_eth_priv *priv)
                return -EOPNOTSUPP;
        }
 
-       if (!dpaa2_eth_fs_enabled(priv) || !dpaa2_eth_fs_mask_enabled(priv)) {
+       if (!dpaa2_eth_fs_enabled(priv)) {
                dev_dbg(dev, "Rx cls disabled in DPNI options\n");
                return -EOPNOTSUPP;
        }
@@ -2835,10 +2860,18 @@ static int dpaa2_eth_set_cls(struct dpaa2_eth_priv *priv)
                return -EOPNOTSUPP;
        }
 
-       err = dpaa2_eth_set_dist_key(priv->net_dev, DPAA2_ETH_RX_DIST_CLS, 0);
+       /* If there is no support for masking in the classification table,
+        * we don't set a default key, as it will depend on the rules
+        * added by the user at runtime.
+        */
+       if (!dpaa2_eth_fs_mask_enabled(priv))
+               goto out;
+
+       err = dpaa2_eth_set_cls(priv->net_dev, DPAA2_ETH_DIST_ALL);
        if (err)
                return err;
 
+out:
        priv->rx_cls_enabled = 1;
 
        return 0;
@@ -2876,7 +2909,7 @@ static int bind_dpni(struct dpaa2_eth_priv *priv)
        /* Configure the flow classification key; it includes all
         * supported header fields and cannot be modified at runtime
         */
-       err = dpaa2_eth_set_cls(priv);
+       err = dpaa2_eth_set_default_cls(priv);
        if (err && err != -EOPNOTSUPP)
                dev_err(dev, "Failed to configure Rx classification key\n");
 
index ee9197ae28cd9be8c30768093ffa456082868f92..5fb8f5c0dc9f8572bc7145706d29cdfba74e001f 100644 (file)
@@ -395,6 +395,7 @@ struct dpaa2_eth_priv {
 
        /* enabled ethtool hashing bits */
        u64 rx_hash_fields;
+       u64 rx_cls_fields;
        struct dpaa2_eth_cls_rule *cls_rules;
        u8 rx_cls_enabled;
        struct bpf_prog *xdp_prog;
@@ -502,7 +503,9 @@ static inline unsigned int dpaa2_eth_rx_head_room(struct dpaa2_eth_priv *priv)
 }
 
 int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags);
-int dpaa2_eth_cls_key_size(void);
+int dpaa2_eth_set_cls(struct net_device *net_dev, u64 key);
+int dpaa2_eth_cls_key_size(u64 key);
 int dpaa2_eth_cls_fld_off(int prot, int field);
+void dpaa2_eth_cls_trim_rule(void *key_mem, u64 fields);
 
 #endif /* __DPAA2_H */
index 591dfcf76adbced88e4fd11e4ddcfcd532c9a46e..76bd8d2872cc8e2899e73a71d4f99822d60ed23a 100644 (file)
@@ -264,7 +264,7 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
 }
 
 static int prep_eth_rule(struct ethhdr *eth_value, struct ethhdr *eth_mask,
-                        void *key, void *mask)
+                        void *key, void *mask, u64 *fields)
 {
        int off;
 
@@ -272,18 +272,21 @@ static int prep_eth_rule(struct ethhdr *eth_value, struct ethhdr *eth_mask,
                off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE);
                *(__be16 *)(key + off) = eth_value->h_proto;
                *(__be16 *)(mask + off) = eth_mask->h_proto;
+               *fields |= DPAA2_ETH_DIST_ETHTYPE;
        }
 
        if (!is_zero_ether_addr(eth_mask->h_source)) {
                off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_SA);
                ether_addr_copy(key + off, eth_value->h_source);
                ether_addr_copy(mask + off, eth_mask->h_source);
+               *fields |= DPAA2_ETH_DIST_ETHSRC;
        }
 
        if (!is_zero_ether_addr(eth_mask->h_dest)) {
                off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_DA);
                ether_addr_copy(key + off, eth_value->h_dest);
                ether_addr_copy(mask + off, eth_mask->h_dest);
+               *fields |= DPAA2_ETH_DIST_ETHDST;
        }
 
        return 0;
@@ -291,7 +294,7 @@ static int prep_eth_rule(struct ethhdr *eth_value, struct ethhdr *eth_mask,
 
 static int prep_uip_rule(struct ethtool_usrip4_spec *uip_value,
                         struct ethtool_usrip4_spec *uip_mask,
-                        void *key, void *mask)
+                        void *key, void *mask, u64 *fields)
 {
        int off;
        u32 tmp_value, tmp_mask;
@@ -303,18 +306,21 @@ static int prep_uip_rule(struct ethtool_usrip4_spec *uip_value,
                off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_SRC);
                *(__be32 *)(key + off) = uip_value->ip4src;
                *(__be32 *)(mask + off) = uip_mask->ip4src;
+               *fields |= DPAA2_ETH_DIST_IPSRC;
        }
 
        if (uip_mask->ip4dst) {
                off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_DST);
                *(__be32 *)(key + off) = uip_value->ip4dst;
                *(__be32 *)(mask + off) = uip_mask->ip4dst;
+               *fields |= DPAA2_ETH_DIST_IPDST;
        }
 
        if (uip_mask->proto) {
                off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_PROTO);
                *(u8 *)(key + off) = uip_value->proto;
                *(u8 *)(mask + off) = uip_mask->proto;
+               *fields |= DPAA2_ETH_DIST_IPPROTO;
        }
 
        if (uip_mask->l4_4_bytes) {
@@ -324,23 +330,26 @@ static int prep_uip_rule(struct ethtool_usrip4_spec *uip_value,
                off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
                *(__be16 *)(key + off) = htons(tmp_value >> 16);
                *(__be16 *)(mask + off) = htons(tmp_mask >> 16);
+               *fields |= DPAA2_ETH_DIST_L4SRC;
 
                off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
                *(__be16 *)(key + off) = htons(tmp_value & 0xFFFF);
                *(__be16 *)(mask + off) = htons(tmp_mask & 0xFFFF);
+               *fields |= DPAA2_ETH_DIST_L4DST;
        }
 
        /* Only apply the rule for IPv4 frames */
        off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE);
        *(__be16 *)(key + off) = htons(ETH_P_IP);
        *(__be16 *)(mask + off) = htons(0xFFFF);
+       *fields |= DPAA2_ETH_DIST_ETHTYPE;
 
        return 0;
 }
 
 static int prep_l4_rule(struct ethtool_tcpip4_spec *l4_value,
                        struct ethtool_tcpip4_spec *l4_mask,
-                       void *key, void *mask, u8 l4_proto)
+                       void *key, void *mask, u8 l4_proto, u64 *fields)
 {
        int off;
 
@@ -351,41 +360,47 @@ static int prep_l4_rule(struct ethtool_tcpip4_spec *l4_value,
                off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_SRC);
                *(__be32 *)(key + off) = l4_value->ip4src;
                *(__be32 *)(mask + off) = l4_mask->ip4src;
+               *fields |= DPAA2_ETH_DIST_IPSRC;
        }
 
        if (l4_mask->ip4dst) {
                off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_DST);
                *(__be32 *)(key + off) = l4_value->ip4dst;
                *(__be32 *)(mask + off) = l4_mask->ip4dst;
+               *fields |= DPAA2_ETH_DIST_IPDST;
        }
 
        if (l4_mask->psrc) {
                off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
                *(__be16 *)(key + off) = l4_value->psrc;
                *(__be16 *)(mask + off) = l4_mask->psrc;
+               *fields |= DPAA2_ETH_DIST_L4SRC;
        }
 
        if (l4_mask->pdst) {
                off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
                *(__be16 *)(key + off) = l4_value->pdst;
                *(__be16 *)(mask + off) = l4_mask->pdst;
+               *fields |= DPAA2_ETH_DIST_L4DST;
        }
 
        /* Only apply the rule for IPv4 frames with the specified L4 proto */
        off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE);
        *(__be16 *)(key + off) = htons(ETH_P_IP);
        *(__be16 *)(mask + off) = htons(0xFFFF);
+       *fields |= DPAA2_ETH_DIST_ETHTYPE;
 
        off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_PROTO);
        *(u8 *)(key + off) = l4_proto;
        *(u8 *)(mask + off) = 0xFF;
+       *fields |= DPAA2_ETH_DIST_IPPROTO;
 
        return 0;
 }
 
 static int prep_ext_rule(struct ethtool_flow_ext *ext_value,
                         struct ethtool_flow_ext *ext_mask,
-                        void *key, void *mask)
+                        void *key, void *mask, u64 *fields)
 {
        int off;
 
@@ -396,6 +411,7 @@ static int prep_ext_rule(struct ethtool_flow_ext *ext_value,
                off = dpaa2_eth_cls_fld_off(NET_PROT_VLAN, NH_FLD_VLAN_TCI);
                *(__be16 *)(key + off) = ext_value->vlan_tci;
                *(__be16 *)(mask + off) = ext_mask->vlan_tci;
+               *fields |= DPAA2_ETH_DIST_VLAN;
        }
 
        return 0;
@@ -403,7 +419,7 @@ static int prep_ext_rule(struct ethtool_flow_ext *ext_value,
 
 static int prep_mac_ext_rule(struct ethtool_flow_ext *ext_value,
                             struct ethtool_flow_ext *ext_mask,
-                            void *key, void *mask)
+                            void *key, void *mask, u64 *fields)
 {
        int off;
 
@@ -411,36 +427,38 @@ static int prep_mac_ext_rule(struct ethtool_flow_ext *ext_value,
                off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_DA);
                ether_addr_copy(key + off, ext_value->h_dest);
                ether_addr_copy(mask + off, ext_mask->h_dest);
+               *fields |= DPAA2_ETH_DIST_ETHDST;
        }
 
        return 0;
 }
 
-static int prep_cls_rule(struct ethtool_rx_flow_spec *fs, void *key, void *mask)
+static int prep_cls_rule(struct ethtool_rx_flow_spec *fs, void *key, void *mask,
+                        u64 *fields)
 {
        int err;
 
        switch (fs->flow_type & 0xFF) {
        case ETHER_FLOW:
                err = prep_eth_rule(&fs->h_u.ether_spec, &fs->m_u.ether_spec,
-                                   key, mask);
+                                   key, mask, fields);
                break;
        case IP_USER_FLOW:
                err = prep_uip_rule(&fs->h_u.usr_ip4_spec,
-                                   &fs->m_u.usr_ip4_spec, key, mask);
+                                   &fs->m_u.usr_ip4_spec, key, mask, fields);
                break;
        case TCP_V4_FLOW:
                err = prep_l4_rule(&fs->h_u.tcp_ip4_spec, &fs->m_u.tcp_ip4_spec,
-                                  key, mask, IPPROTO_TCP);
+                                  key, mask, IPPROTO_TCP, fields);
                break;
        case UDP_V4_FLOW:
                err = prep_l4_rule(&fs->h_u.udp_ip4_spec, &fs->m_u.udp_ip4_spec,
-                                  key, mask, IPPROTO_UDP);
+                                  key, mask, IPPROTO_UDP, fields);
                break;
        case SCTP_V4_FLOW:
                err = prep_l4_rule(&fs->h_u.sctp_ip4_spec,
                                   &fs->m_u.sctp_ip4_spec, key, mask,
-                                  IPPROTO_SCTP);
+                                  IPPROTO_SCTP, fields);
                break;
        default:
                return -EOPNOTSUPP;
@@ -450,13 +468,14 @@ static int prep_cls_rule(struct ethtool_rx_flow_spec *fs, void *key, void *mask)
                return err;
 
        if (fs->flow_type & FLOW_EXT) {
-               err = prep_ext_rule(&fs->h_ext, &fs->m_ext, key, mask);
+               err = prep_ext_rule(&fs->h_ext, &fs->m_ext, key, mask, fields);
                if (err)
                        return err;
        }
 
        if (fs->flow_type & FLOW_MAC_EXT) {
-               err = prep_mac_ext_rule(&fs->h_ext, &fs->m_ext, key, mask);
+               err = prep_mac_ext_rule(&fs->h_ext, &fs->m_ext, key, mask,
+                                       fields);
                if (err)
                        return err;
        }
@@ -473,6 +492,7 @@ static int do_cls_rule(struct net_device *net_dev,
        struct dpni_rule_cfg rule_cfg = { 0 };
        struct dpni_fs_action_cfg fs_act = { 0 };
        dma_addr_t key_iova;
+       u64 fields = 0;
        void *key_buf;
        int err;
 
@@ -480,7 +500,7 @@ static int do_cls_rule(struct net_device *net_dev,
            fs->ring_cookie >= dpaa2_eth_queue_count(priv))
                return -EINVAL;
 
-       rule_cfg.key_size = dpaa2_eth_cls_key_size();
+       rule_cfg.key_size = dpaa2_eth_cls_key_size(DPAA2_ETH_DIST_ALL);
 
        /* allocate twice the key size, for the actual key and for mask */
        key_buf = kzalloc(rule_cfg.key_size * 2, GFP_KERNEL);
@@ -488,10 +508,36 @@ static int do_cls_rule(struct net_device *net_dev,
                return -ENOMEM;
 
        /* Fill the key and mask memory areas */
-       err = prep_cls_rule(fs, key_buf, key_buf + rule_cfg.key_size);
+       err = prep_cls_rule(fs, key_buf, key_buf + rule_cfg.key_size, &fields);
        if (err)
                goto free_mem;
 
+       if (!dpaa2_eth_fs_mask_enabled(priv)) {
+               /* Masking allows us to configure a maximal key during init and
+                * use it for all flow steering rules. Without it, we include
+                * in the key only the fields actually used, so we need to
+                * extract the others from the final key buffer.
+                *
+                * Program the FS key if needed, or return error if previously
+                * set key can't be used for the current rule. User needs to
+                * delete existing rules in this case to allow for the new one.
+                */
+               if (!priv->rx_cls_fields) {
+                       err = dpaa2_eth_set_cls(net_dev, fields);
+                       if (err)
+                               goto free_mem;
+
+                       priv->rx_cls_fields = fields;
+               } else if (priv->rx_cls_fields != fields) {
+                       netdev_err(net_dev, "No support for multiple FS keys, need to delete existing rules\n");
+                       err = -EOPNOTSUPP;
+                       goto free_mem;
+               }
+
+               dpaa2_eth_cls_trim_rule(key_buf, fields);
+               rule_cfg.key_size = dpaa2_eth_cls_key_size(fields);
+       }
+
        key_iova = dma_map_single(dev, key_buf, rule_cfg.key_size * 2,
                                  DMA_TO_DEVICE);
        if (dma_mapping_error(dev, key_iova)) {
@@ -500,7 +546,8 @@ static int do_cls_rule(struct net_device *net_dev,
        }
 
        rule_cfg.key_iova = key_iova;
-       rule_cfg.mask_iova = key_iova + rule_cfg.key_size;
+       if (dpaa2_eth_fs_mask_enabled(priv))
+               rule_cfg.mask_iova = key_iova + rule_cfg.key_size;
 
        if (add) {
                if (fs->ring_cookie == RX_CLS_FLOW_DISC)
@@ -522,6 +569,17 @@ free_mem:
        return err;
 }
 
+static int num_rules(struct dpaa2_eth_priv *priv)
+{
+       int i, rules = 0;
+
+       for (i = 0; i < dpaa2_eth_fs_count(priv); i++)
+               if (priv->cls_rules[i].in_use)
+                       rules++;
+
+       return rules;
+}
+
 static int update_cls_rule(struct net_device *net_dev,
                           struct ethtool_rx_flow_spec *new_fs,
                           int location)
@@ -545,6 +603,9 @@ static int update_cls_rule(struct net_device *net_dev,
                        return err;
 
                rule->in_use = 0;
+
+               if (!dpaa2_eth_fs_mask_enabled(priv) && !num_rules(priv))
+                       priv->rx_cls_fields = 0;
        }
 
        /* If no new entry to add, return here */
@@ -581,9 +642,7 @@ static int dpaa2_eth_get_rxnfc(struct net_device *net_dev,
                break;
        case ETHTOOL_GRXCLSRLCNT:
                rxnfc->rule_cnt = 0;
-               for (i = 0; i < max_rules; i++)
-                       if (priv->cls_rules[i].in_use)
-                               rxnfc->rule_cnt++;
+               rxnfc->rule_cnt = num_rules(priv);
                rxnfc->data = max_rules;
                break;
        case ETHTOOL_GRXCLSRULE: