#include <net/tc_act/tc_vlan.h>
#include <net/tc_act/tc_tunnel_key.h>
#include <net/tc_act/tc_pedit.h>
+#include <net/tc_act/tc_csum.h>
#include <net/vxlan.h>
#include <net/arp.h>
#include "en.h"
u64 cookie;
u8 flags;
struct mlx5_flow_handle *rule;
- struct list_head encap; /* flows sharing the same encap */
+ struct list_head encap; /* flows sharing the same encap ID */
+ struct list_head mod_hdr; /* flows sharing the same mod hdr ID */
union {
struct mlx5_esw_flow_attr esw_attr[0];
struct mlx5_nic_flow_attr nic_attr[0];
#define MLX5E_TC_TABLE_NUM_ENTRIES 1024
#define MLX5E_TC_TABLE_NUM_GROUPS 4
+struct mod_hdr_key {
+ int num_actions;
+ void *actions;
+};
+
+struct mlx5e_mod_hdr_entry {
+ /* a node of a hash table which keeps all the mod_hdr entries */
+ struct hlist_node mod_hdr_hlist;
+
+ /* flows sharing the same mod_hdr entry */
+ struct list_head flows;
+
+ struct mod_hdr_key key;
+
+ u32 mod_hdr_id;
+};
+
+#define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)
+
+static inline u32 hash_mod_hdr_info(struct mod_hdr_key *key)
+{
+ return jhash(key->actions,
+ key->num_actions * MLX5_MH_ACT_SZ, 0);
+}
+
+static inline int cmp_mod_hdr_info(struct mod_hdr_key *a,
+ struct mod_hdr_key *b)
+{
+ if (a->num_actions != b->num_actions)
+ return 1;
+
+ return memcmp(a->actions, b->actions, a->num_actions * MLX5_MH_ACT_SZ);
+}
+
+static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ struct mlx5e_tc_flow_parse_attr *parse_attr)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ int num_actions, actions_size, namespace, err;
+ struct mlx5e_mod_hdr_entry *mh;
+ struct mod_hdr_key key;
+ bool found = false;
+ u32 hash_key;
+
+ num_actions = parse_attr->num_mod_hdr_actions;
+ actions_size = MLX5_MH_ACT_SZ * num_actions;
+
+ key.actions = parse_attr->mod_hdr_actions;
+ key.num_actions = num_actions;
+
+ hash_key = hash_mod_hdr_info(&key);
+
+ if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
+ namespace = MLX5_FLOW_NAMESPACE_FDB;
+ hash_for_each_possible(esw->offloads.mod_hdr_tbl, mh,
+ mod_hdr_hlist, hash_key) {
+ if (!cmp_mod_hdr_info(&mh->key, &key)) {
+ found = true;
+ break;
+ }
+ }
+ } else {
+ namespace = MLX5_FLOW_NAMESPACE_KERNEL;
+ hash_for_each_possible(priv->fs.tc.mod_hdr_tbl, mh,
+ mod_hdr_hlist, hash_key) {
+ if (!cmp_mod_hdr_info(&mh->key, &key)) {
+ found = true;
+ break;
+ }
+ }
+ }
+
+ if (found)
+ goto attach_flow;
+
+ mh = kzalloc(sizeof(*mh) + actions_size, GFP_KERNEL);
+ if (!mh)
+ return -ENOMEM;
+
+ mh->key.actions = (void *)mh + sizeof(*mh);
+ memcpy(mh->key.actions, key.actions, actions_size);
+ mh->key.num_actions = num_actions;
+ INIT_LIST_HEAD(&mh->flows);
+
+ err = mlx5_modify_header_alloc(priv->mdev, namespace,
+ mh->key.num_actions,
+ mh->key.actions,
+ &mh->mod_hdr_id);
+ if (err)
+ goto out_err;
+
+ if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
+ hash_add(esw->offloads.mod_hdr_tbl, &mh->mod_hdr_hlist, hash_key);
+ else
+ hash_add(priv->fs.tc.mod_hdr_tbl, &mh->mod_hdr_hlist, hash_key);
+
+attach_flow:
+ list_add(&flow->mod_hdr, &mh->flows);
+ if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
+ flow->esw_attr->mod_hdr_id = mh->mod_hdr_id;
+ else
+ flow->nic_attr->mod_hdr_id = mh->mod_hdr_id;
+
+ return 0;
+
+out_err:
+ kfree(mh);
+ return err;
+}
+
+static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow)
+{
+ struct list_head *next = flow->mod_hdr.next;
+
+ list_del(&flow->mod_hdr);
+
+ if (list_empty(next)) {
+ struct mlx5e_mod_hdr_entry *mh;
+
+ mh = list_entry(next, struct mlx5e_mod_hdr_entry, flows);
+
+ mlx5_modify_header_dealloc(priv->mdev, mh->mod_hdr_id);
+ hash_del(&mh->mod_hdr_hlist);
+ kfree(mh);
+ }
+}
+
static struct mlx5_flow_handle *
mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow_parse_attr *parse_attr,
static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow)
{
+ struct mlx5_nic_flow_attr *attr = flow->nic_attr;
struct mlx5_fc *counter = NULL;
counter = mlx5_flow_rule_counter(flow->rule);
priv->fs.tc.t = NULL;
}
- if (flow->nic_attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
mlx5_modify_header_dealloc(priv->mdev,
- flow->nic_attr->mod_hdr_id);
+ attr->mod_hdr_id);
}
static void mlx5e_detach_encap(struct mlx5e_priv *priv,
}
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
- err = mlx5_modify_header_alloc(priv->mdev, MLX5_FLOW_NAMESPACE_FDB,
- parse_attr->num_mod_hdr_actions,
- parse_attr->mod_hdr_actions,
- &attr->mod_hdr_id);
+ err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
kfree(parse_attr->mod_hdr_actions);
if (err) {
rule = ERR_PTR(err);
return rule;
err_add_rule:
- if (flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
- mlx5_modify_header_dealloc(priv->mdev,
- attr->mod_hdr_id);
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
+ mlx5e_detach_mod_hdr(priv, flow);
err_mod_hdr:
mlx5_eswitch_del_vlan_action(esw, attr);
err_add_vlan:
if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
- mlx5_eswitch_del_offloaded_rule(esw, flow->rule, flow->esw_attr);
+ mlx5_eswitch_del_offloaded_rule(esw, flow->rule, attr);
}
- mlx5_eswitch_del_vlan_action(esw, flow->esw_attr);
+ mlx5_eswitch_del_vlan_action(esw, attr);
- if (flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) {
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) {
mlx5e_detach_encap(priv, flow);
- kvfree(flow->esw_attr->parse_attr);
+ kvfree(attr->parse_attr);
}
- if (flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
- mlx5_modify_header_dealloc(priv->mdev,
- attr->mod_hdr_id);
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
+ mlx5e_detach_mod_hdr(priv, flow);
}
void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
if (e->flags & MLX5_ENCAP_ENTRY_VALID)
mlx5_encap_dealloc(priv->mdev, e->encap_id);
- hlist_del_rcu(&e->encap_hlist);
+ hash_del_rcu(&e->encap_hlist);
kfree(e->encap_header);
kfree(e);
}
BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
- BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL))) {
+ BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
+ BIT(FLOW_DISSECTOR_KEY_TCP) |
+ BIT(FLOW_DISSECTOR_KEY_IP))) {
netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
f->dissector->used_keys);
return -EOPNOTSUPP;
*min_inline = MLX5_INLINE_MODE_TCP_UDP;
}
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_IP)) {
+ struct flow_dissector_key_ip *key =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_IP,
+ f->key);
+ struct flow_dissector_key_ip *mask =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_IP,
+ f->mask);
+
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn, mask->tos & 0x3);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, key->tos & 0x3);
+
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp, mask->tos >> 2);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, key->tos >> 2);
+
+ if (mask->tos)
+ *min_inline = MLX5_INLINE_MODE_IP;
+
+ if (mask->ttl) /* currently not supported */
+ return -EOPNOTSUPP;
+ }
+
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_TCP)) {
+ struct flow_dissector_key_tcp *key =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_TCP,
+ f->key);
+ struct flow_dissector_key_tcp *mask =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_TCP,
+ f->mask);
+
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_flags,
+ ntohs(mask->flags));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
+ ntohs(key->flags));
+
+ if (mask->flags)
+ *min_inline = MLX5_INLINE_MODE_TCP_UDP;
+ }
+
return 0;
}
struct mlx5e_tc_flow_parse_attr *parse_attr)
{
struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
- int i, action_size, nactions, max_actions, first, last;
+ int i, action_size, nactions, max_actions, first, last, first_z;
void *s_masks_p, *a_masks_p, *vals_p;
- u32 s_mask, a_mask, val;
struct mlx5_fields *f;
u8 cmd, field_bsize;
+ u32 s_mask, a_mask;
unsigned long mask;
void *action;
for (i = 0; i < ARRAY_SIZE(fields); i++) {
f = &fields[i];
/* avoid seeing bits set from previous iterations */
- s_mask = a_mask = mask = val = 0;
+ s_mask = 0;
+ a_mask = 0;
s_masks_p = (void *)set_masks + f->offset;
a_masks_p = (void *)add_masks + f->offset;
memset(a_masks_p, 0, f->size);
}
- memcpy(&val, vals_p, f->size);
-
field_bsize = f->size * BITS_PER_BYTE;
+
+ first_z = find_first_zero_bit(&mask, field_bsize);
first = find_first_bit(&mask, field_bsize);
last = find_last_bit(&mask, field_bsize);
- if (first > 0 || last != (field_bsize - 1)) {
+ if (first > 0 || last != (field_bsize - 1) || first_z < last) {
printk(KERN_WARNING "mlx5: partial rewrite (mask %lx) is currently not offloaded\n",
mask);
return -EOPNOTSUPP;
}
if (field_bsize == 32)
- MLX5_SET(set_action_in, action, data, ntohl(val));
+ MLX5_SET(set_action_in, action, data, ntohl(*(__be32 *)vals_p));
else if (field_bsize == 16)
- MLX5_SET(set_action_in, action, data, ntohs(val));
+ MLX5_SET(set_action_in, action, data, ntohs(*(__be16 *)vals_p));
else if (field_bsize == 8)
- MLX5_SET(set_action_in, action, data, val);
+ MLX5_SET(set_action_in, action, data, *(u8 *)vals_p);
action += action_size;
nactions++;
return err;
}
+static bool csum_offload_supported(struct mlx5e_priv *priv, u32 action, u32 update_flags)
+{
+ u32 prot_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR | TCA_CSUM_UPDATE_FLAG_TCP |
+ TCA_CSUM_UPDATE_FLAG_UDP;
+
+ /* The HW recalcs checksums only if re-writing headers */
+ if (!(action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)) {
+ netdev_warn(priv->netdev,
+ "TC csum action is only offloaded with pedit\n");
+ return false;
+ }
+
+ if (update_flags & ~prot_flags) {
+ netdev_warn(priv->netdev,
+ "can't offload TC csum action for some header/s - flags %#x\n",
+ update_flags);
+ return false;
+ }
+
+ return true;
+}
+
static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct mlx5e_tc_flow *flow)
tcf_exts_to_list(exts, &actions);
list_for_each_entry(a, &actions, list) {
- /* Only support a single action per rule */
- if (attr->action)
- return -EINVAL;
-
if (is_tcf_gact_shot(a)) {
attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
if (MLX5_CAP_FLOWTABLE(priv->mdev,
continue;
}
+ if (is_tcf_csum(a)) {
+ if (csum_offload_supported(priv, attr->action,
+ tcf_csum_update_flags(a)))
+ continue;
+
+ return -EOPNOTSUPP;
+ }
+
if (is_tcf_skbedit_mark(a)) {
u32 mark = tcf_skbedit_mark(a);
continue;
}
+ if (is_tcf_csum(a)) {
+ if (csum_offload_supported(priv, attr->action,
+ tcf_csum_update_flags(a)))
+ continue;
+
+ return -EOPNOTSUPP;
+ }
+
if (is_tcf_mirred_egress_redirect(a)) {
int ifindex = tcf_mirred_ifindex(a);
struct net_device *out_dev, *encap_dev = NULL;
{
struct mlx5e_tc_table *tc = &priv->fs.tc;
struct mlx5e_tc_flow *flow;
- struct tc_action *a;
struct mlx5_fc *counter;
- LIST_HEAD(actions);
u64 bytes;
u64 packets;
u64 lastuse;
mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
- preempt_disable();
-
- tcf_exts_to_list(f->exts, &actions);
- list_for_each_entry(a, &actions, list)
- tcf_action_stats_update(a, bytes, packets, lastuse);
-
- preempt_enable();
+ tcf_exts_stats_update(f->exts, bytes, packets, lastuse);
return 0;
}
{
struct mlx5e_tc_table *tc = &priv->fs.tc;
+ hash_init(tc->mod_hdr_tbl);
+
tc->ht_params = mlx5e_tc_flow_ht_params;
return rhashtable_init(&tc->ht, &tc->ht_params);
}