1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019 Mellanox Technologies */
4 #include <linux/mlx5/vport.h>
12 static bool dr_is_fw_term_table(struct mlx5_flow_table *ft)
14 if (ft->flags & MLX5_FLOW_TABLE_TERMINATION)
20 static int mlx5_cmd_dr_update_root_ft(struct mlx5_flow_root_namespace *ns,
21 struct mlx5_flow_table *ft,
25 return mlx5_fs_cmd_get_fw_cmds()->update_root_ft(ns, ft, underlay_qpn,
29 static int set_miss_action(struct mlx5_flow_root_namespace *ns,
30 struct mlx5_flow_table *ft,
31 struct mlx5_flow_table *next_ft)
33 struct mlx5dr_action *old_miss_action;
34 struct mlx5dr_action *action = NULL;
35 struct mlx5dr_table *next_tbl;
38 next_tbl = next_ft ? next_ft->fs_dr_table.dr_table : NULL;
40 action = mlx5dr_action_create_dest_table(next_tbl);
44 old_miss_action = ft->fs_dr_table.miss_action;
45 err = mlx5dr_table_set_miss_action(ft->fs_dr_table.dr_table, action);
47 err = mlx5dr_action_destroy(action);
49 mlx5_core_err(ns->dev,
50 "Failed to destroy action (%d)\n", err);
53 ft->fs_dr_table.miss_action = action;
54 if (old_miss_action) {
55 err = mlx5dr_action_destroy(old_miss_action);
57 mlx5_core_err(ns->dev, "Failed to destroy action (%d)\n",
64 static int mlx5_cmd_dr_create_flow_table(struct mlx5_flow_root_namespace *ns,
65 struct mlx5_flow_table *ft,
66 struct mlx5_flow_table_attr *ft_attr,
67 struct mlx5_flow_table *next_ft)
69 struct mlx5dr_table *tbl;
73 if (dr_is_fw_term_table(ft))
74 return mlx5_fs_cmd_get_fw_cmds()->create_flow_table(ns, ft,
78 /* turn off encap/decap if not supported for sw-str by fw */
79 if (!MLX5_CAP_FLOWTABLE(ns->dev, sw_owner_reformat_supported))
80 flags = ft->flags & ~(MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
81 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
83 tbl = mlx5dr_table_create(ns->fs_dr_domain.dr_domain, ft->level, flags,
86 mlx5_core_err(ns->dev, "Failed creating dr flow_table\n");
90 ft->fs_dr_table.dr_table = tbl;
91 ft->id = mlx5dr_table_get_id(tbl);
94 err = set_miss_action(ns, ft, next_ft);
96 mlx5dr_table_destroy(tbl);
97 ft->fs_dr_table.dr_table = NULL;
102 ft->max_fte = INT_MAX;
107 static int mlx5_cmd_dr_destroy_flow_table(struct mlx5_flow_root_namespace *ns,
108 struct mlx5_flow_table *ft)
110 struct mlx5dr_action *action = ft->fs_dr_table.miss_action;
113 if (dr_is_fw_term_table(ft))
114 return mlx5_fs_cmd_get_fw_cmds()->destroy_flow_table(ns, ft);
116 err = mlx5dr_table_destroy(ft->fs_dr_table.dr_table);
118 mlx5_core_err(ns->dev, "Failed to destroy flow_table (%d)\n",
123 err = mlx5dr_action_destroy(action);
125 mlx5_core_err(ns->dev, "Failed to destroy action(%d)\n",
134 static int mlx5_cmd_dr_modify_flow_table(struct mlx5_flow_root_namespace *ns,
135 struct mlx5_flow_table *ft,
136 struct mlx5_flow_table *next_ft)
138 if (dr_is_fw_term_table(ft))
139 return mlx5_fs_cmd_get_fw_cmds()->modify_flow_table(ns, ft, next_ft);
141 return set_miss_action(ns, ft, next_ft);
144 static int mlx5_cmd_dr_create_flow_group(struct mlx5_flow_root_namespace *ns,
145 struct mlx5_flow_table *ft,
147 struct mlx5_flow_group *fg)
149 struct mlx5dr_matcher *matcher;
150 u32 priority = MLX5_GET(create_flow_group_in, in,
152 u8 match_criteria_enable = MLX5_GET(create_flow_group_in,
154 match_criteria_enable);
155 struct mlx5dr_match_parameters mask;
157 if (dr_is_fw_term_table(ft))
158 return mlx5_fs_cmd_get_fw_cmds()->create_flow_group(ns, ft, in,
161 mask.match_buf = MLX5_ADDR_OF(create_flow_group_in,
163 mask.match_sz = sizeof(fg->mask.match_criteria);
165 matcher = mlx5dr_matcher_create(ft->fs_dr_table.dr_table,
167 match_criteria_enable,
170 mlx5_core_err(ns->dev, "Failed creating matcher\n");
174 fg->fs_dr_matcher.dr_matcher = matcher;
178 static int mlx5_cmd_dr_destroy_flow_group(struct mlx5_flow_root_namespace *ns,
179 struct mlx5_flow_table *ft,
180 struct mlx5_flow_group *fg)
182 if (dr_is_fw_term_table(ft))
183 return mlx5_fs_cmd_get_fw_cmds()->destroy_flow_group(ns, ft, fg);
185 return mlx5dr_matcher_destroy(fg->fs_dr_matcher.dr_matcher);
188 static struct mlx5dr_action *create_vport_action(struct mlx5dr_domain *domain,
189 struct mlx5_flow_rule *dst)
191 struct mlx5_flow_destination *dest_attr = &dst->dest_attr;
193 return mlx5dr_action_create_dest_vport(domain, dest_attr->vport.num,
194 dest_attr->vport.flags &
195 MLX5_FLOW_DEST_VPORT_VHCA_ID,
196 dest_attr->vport.vhca_id);
199 static struct mlx5dr_action *create_uplink_action(struct mlx5dr_domain *domain,
200 struct mlx5_flow_rule *dst)
202 struct mlx5_flow_destination *dest_attr = &dst->dest_attr;
204 return mlx5dr_action_create_dest_vport(domain, MLX5_VPORT_UPLINK, 1,
205 dest_attr->vport.vhca_id);
208 static struct mlx5dr_action *create_ft_action(struct mlx5dr_domain *domain,
209 struct mlx5_flow_rule *dst)
211 struct mlx5_flow_table *dest_ft = dst->dest_attr.ft;
212 struct mlx5dr_action *tbl_action;
214 if (mlx5dr_is_fw_table(dest_ft))
215 return mlx5dr_action_create_dest_flow_fw_table(domain, dest_ft);
217 tbl_action = mlx5dr_action_create_dest_table(dest_ft->fs_dr_table.dr_table);
219 tbl_action->dest_tbl->is_wire_ft =
220 dest_ft->flags & MLX5_FLOW_TABLE_UPLINK_VPORT ? 1 : 0;
225 static struct mlx5dr_action *create_range_action(struct mlx5dr_domain *domain,
226 struct mlx5_flow_rule *dst)
228 return mlx5dr_action_create_dest_match_range(domain,
229 dst->dest_attr.range.field,
230 dst->dest_attr.range.hit_ft,
231 dst->dest_attr.range.miss_ft,
232 dst->dest_attr.range.min,
233 dst->dest_attr.range.max);
236 static struct mlx5dr_action *create_action_push_vlan(struct mlx5dr_domain *domain,
237 struct mlx5_fs_vlan *vlan)
239 u16 n_ethtype = vlan->ethtype;
240 u8 prio = vlan->prio;
244 vlan_hdr = (u32)n_ethtype << 16 | (u32)(prio) << 12 | (u32)vid;
245 return mlx5dr_action_create_push_vlan(domain, htonl(vlan_hdr));
248 static bool contain_vport_reformat_action(struct mlx5_flow_rule *dst)
250 return (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_VPORT ||
251 dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_UPLINK) &&
252 dst->dest_attr.vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
255 /* We want to support a rule with 32 destinations, which means we need to
256 * account for 32 destinations plus usually a counter plus one more action
257 * for a multi-destination flow table.
259 #define MLX5_FLOW_CONTEXT_ACTION_MAX 34
260 static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
261 struct mlx5_flow_table *ft,
262 struct mlx5_flow_group *group,
265 struct mlx5dr_domain *domain = ns->fs_dr_domain.dr_domain;
266 struct mlx5dr_action_dest *term_actions;
267 struct mlx5dr_match_parameters params;
268 struct mlx5_core_dev *dev = ns->dev;
269 struct mlx5dr_action **fs_dr_actions;
270 struct mlx5dr_action *tmp_action;
271 struct mlx5dr_action **actions;
272 bool delay_encap_set = false;
273 struct mlx5dr_rule *rule;
274 struct mlx5_flow_rule *dst;
275 int fs_dr_num_actions = 0;
276 int num_term_actions = 0;
282 if (dr_is_fw_term_table(ft))
283 return mlx5_fs_cmd_get_fw_cmds()->create_fte(ns, ft, group, fte);
285 actions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX, sizeof(*actions),
292 fs_dr_actions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX,
293 sizeof(*fs_dr_actions), GFP_KERNEL);
294 if (!fs_dr_actions) {
296 goto free_actions_alloc;
299 term_actions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX,
300 sizeof(*term_actions), GFP_KERNEL);
303 goto free_fs_dr_actions_alloc;
306 match_sz = sizeof(fte->val);
308 /* Drop reformat action bit if destination vport set with reformat */
309 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
310 list_for_each_entry(dst, &fte->node.children, node.list) {
311 if (!contain_vport_reformat_action(dst))
314 fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
319 /* The order of the actions are must to be keep, only the following
320 * order is supported by SW steering:
321 * TX: modify header -> push vlan -> encap
322 * RX: decap -> pop vlan -> modify header
324 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) {
325 enum mlx5dr_action_reformat_type decap_type =
326 DR_ACTION_REFORMAT_TYP_TNL_L2_TO_L2;
328 tmp_action = mlx5dr_action_create_packet_reformat(domain,
336 fs_dr_actions[fs_dr_num_actions++] = tmp_action;
337 actions[num_actions++] = tmp_action;
340 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) {
343 if (fte->action.pkt_reformat->owner == MLX5_FLOW_RESOURCE_OWNER_FW) {
345 mlx5dr_err(domain, "FW-owned reformat can't be used in SW rule\n");
349 is_decap = fte->action.pkt_reformat->reformat_type ==
350 MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2;
353 actions[num_actions++] =
354 fte->action.pkt_reformat->action.dr_action;
356 delay_encap_set = true;
359 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) {
361 mlx5dr_action_create_pop_vlan();
366 fs_dr_actions[fs_dr_num_actions++] = tmp_action;
367 actions[num_actions++] = tmp_action;
370 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2) {
372 mlx5dr_action_create_pop_vlan();
377 fs_dr_actions[fs_dr_num_actions++] = tmp_action;
378 actions[num_actions++] = tmp_action;
381 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
382 actions[num_actions++] =
383 fte->action.modify_hdr->action.dr_action;
385 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
386 tmp_action = create_action_push_vlan(domain, &fte->action.vlan[0]);
391 fs_dr_actions[fs_dr_num_actions++] = tmp_action;
392 actions[num_actions++] = tmp_action;
395 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
396 tmp_action = create_action_push_vlan(domain, &fte->action.vlan[1]);
401 fs_dr_actions[fs_dr_num_actions++] = tmp_action;
402 actions[num_actions++] = tmp_action;
406 actions[num_actions++] =
407 fte->action.pkt_reformat->action.dr_action;
409 /* The order of the actions below is not important */
411 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_DROP) {
412 tmp_action = mlx5dr_action_create_drop();
417 fs_dr_actions[fs_dr_num_actions++] = tmp_action;
418 term_actions[num_term_actions++].dest = tmp_action;
421 if (fte->flow_context.flow_tag) {
423 mlx5dr_action_create_tag(fte->flow_context.flow_tag);
428 fs_dr_actions[fs_dr_num_actions++] = tmp_action;
429 actions[num_actions++] = tmp_action;
432 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
433 list_for_each_entry(dst, &fte->node.children, node.list) {
434 enum mlx5_flow_destination_type type = dst->dest_attr.type;
437 if (fs_dr_num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
438 num_term_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
443 if (type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
447 case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
448 tmp_action = create_ft_action(domain, dst);
453 fs_dr_actions[fs_dr_num_actions++] = tmp_action;
454 term_actions[num_term_actions++].dest = tmp_action;
456 case MLX5_FLOW_DESTINATION_TYPE_UPLINK:
457 case MLX5_FLOW_DESTINATION_TYPE_VPORT:
458 tmp_action = type == MLX5_FLOW_DESTINATION_TYPE_VPORT ?
459 create_vport_action(domain, dst) :
460 create_uplink_action(domain, dst);
465 fs_dr_actions[fs_dr_num_actions++] = tmp_action;
466 term_actions[num_term_actions].dest = tmp_action;
468 if (dst->dest_attr.vport.flags &
469 MLX5_FLOW_DEST_VPORT_REFORMAT_ID)
470 term_actions[num_term_actions].reformat =
471 dst->dest_attr.vport.pkt_reformat->action.dr_action;
475 case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM:
476 id = dst->dest_attr.ft_num;
477 tmp_action = mlx5dr_action_create_dest_table_num(domain,
483 fs_dr_actions[fs_dr_num_actions++] = tmp_action;
484 term_actions[num_term_actions++].dest = tmp_action;
486 case MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER:
487 id = dst->dest_attr.sampler_id;
488 tmp_action = mlx5dr_action_create_flow_sampler(domain,
494 fs_dr_actions[fs_dr_num_actions++] = tmp_action;
495 term_actions[num_term_actions++].dest = tmp_action;
497 case MLX5_FLOW_DESTINATION_TYPE_RANGE:
498 tmp_action = create_range_action(domain, dst);
503 fs_dr_actions[fs_dr_num_actions++] = tmp_action;
504 term_actions[num_term_actions++].dest = tmp_action;
513 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
514 list_for_each_entry(dst, &fte->node.children, node.list) {
517 if (dst->dest_attr.type !=
518 MLX5_FLOW_DESTINATION_TYPE_COUNTER)
521 if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
522 fs_dr_num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
527 id = dst->dest_attr.counter_id;
529 mlx5dr_action_create_flow_counter(id);
535 fs_dr_actions[fs_dr_num_actions++] = tmp_action;
536 actions[num_actions++] = tmp_action;
540 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) {
541 if (fte->action.exe_aso.type != MLX5_EXE_ASO_FLOW_METER) {
547 mlx5dr_action_create_aso(domain,
548 fte->action.exe_aso.object_id,
549 fte->action.exe_aso.return_reg_id,
550 fte->action.exe_aso.type,
551 fte->action.exe_aso.flow_meter.init_color,
552 fte->action.exe_aso.flow_meter.meter_idx);
557 fs_dr_actions[fs_dr_num_actions++] = tmp_action;
558 actions[num_actions++] = tmp_action;
561 params.match_sz = match_sz;
562 params.match_buf = (u64 *)fte->val;
563 if (num_term_actions == 1) {
564 if (term_actions->reformat) {
565 if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
569 actions[num_actions++] = term_actions->reformat;
572 if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
576 actions[num_actions++] = term_actions->dest;
577 } else if (num_term_actions > 1) {
578 bool ignore_flow_level =
579 !!(fte->action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL);
580 u32 flow_source = fte->flow_context.flow_source;
582 if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
583 fs_dr_num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
587 tmp_action = mlx5dr_action_create_mult_dest_tbl(domain,
596 fs_dr_actions[fs_dr_num_actions++] = tmp_action;
597 actions[num_actions++] = tmp_action;
600 rule = mlx5dr_rule_create(group->fs_dr_matcher.dr_matcher,
604 fte->flow_context.flow_source);
613 fte->fs_dr_rule.dr_rule = rule;
614 fte->fs_dr_rule.num_actions = fs_dr_num_actions;
615 fte->fs_dr_rule.dr_actions = fs_dr_actions;
620 /* Free in reverse order to handle action dependencies */
621 for (i = fs_dr_num_actions - 1; i >= 0; i--)
622 if (!IS_ERR_OR_NULL(fs_dr_actions[i]))
623 mlx5dr_action_destroy(fs_dr_actions[i]);
626 free_fs_dr_actions_alloc:
627 kfree(fs_dr_actions);
631 mlx5_core_err(dev, "Failed to create dr rule err(%d)\n", err);
635 static int mlx5_cmd_dr_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns,
636 struct mlx5_pkt_reformat_params *params,
637 enum mlx5_flow_namespace_type namespace,
638 struct mlx5_pkt_reformat *pkt_reformat)
640 struct mlx5dr_domain *dr_domain = ns->fs_dr_domain.dr_domain;
641 struct mlx5dr_action *action;
644 switch (params->type) {
645 case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
646 case MLX5_REFORMAT_TYPE_L2_TO_NVGRE:
647 case MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL:
648 dr_reformat = DR_ACTION_REFORMAT_TYP_L2_TO_TNL_L2;
650 case MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2:
651 dr_reformat = DR_ACTION_REFORMAT_TYP_TNL_L3_TO_L2;
653 case MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL:
654 dr_reformat = DR_ACTION_REFORMAT_TYP_L2_TO_TNL_L3;
656 case MLX5_REFORMAT_TYPE_INSERT_HDR:
657 dr_reformat = DR_ACTION_REFORMAT_TYP_INSERT_HDR;
659 case MLX5_REFORMAT_TYPE_REMOVE_HDR:
660 dr_reformat = DR_ACTION_REFORMAT_TYP_REMOVE_HDR;
663 mlx5_core_err(ns->dev, "Packet-reformat not supported(%d)\n",
668 action = mlx5dr_action_create_packet_reformat(dr_domain,
675 mlx5_core_err(ns->dev, "Failed allocating packet-reformat action\n");
679 pkt_reformat->owner = MLX5_FLOW_RESOURCE_OWNER_SW;
680 pkt_reformat->action.dr_action = action;
685 static void mlx5_cmd_dr_packet_reformat_dealloc(struct mlx5_flow_root_namespace *ns,
686 struct mlx5_pkt_reformat *pkt_reformat)
688 mlx5dr_action_destroy(pkt_reformat->action.dr_action);
691 static int mlx5_cmd_dr_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
692 u8 namespace, u8 num_actions,
693 void *modify_actions,
694 struct mlx5_modify_hdr *modify_hdr)
696 struct mlx5dr_domain *dr_domain = ns->fs_dr_domain.dr_domain;
697 struct mlx5dr_action *action;
700 actions_sz = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto) *
702 action = mlx5dr_action_create_modify_header(dr_domain, 0,
706 mlx5_core_err(ns->dev, "Failed allocating modify-header action\n");
710 modify_hdr->owner = MLX5_FLOW_RESOURCE_OWNER_SW;
711 modify_hdr->action.dr_action = action;
716 static void mlx5_cmd_dr_modify_header_dealloc(struct mlx5_flow_root_namespace *ns,
717 struct mlx5_modify_hdr *modify_hdr)
719 mlx5dr_action_destroy(modify_hdr->action.dr_action);
723 mlx5_cmd_dr_destroy_match_definer(struct mlx5_flow_root_namespace *ns,
729 static int mlx5_cmd_dr_create_match_definer(struct mlx5_flow_root_namespace *ns,
730 u16 format_id, u32 *match_mask)
735 static int mlx5_cmd_dr_delete_fte(struct mlx5_flow_root_namespace *ns,
736 struct mlx5_flow_table *ft,
739 struct mlx5_fs_dr_rule *rule = &fte->fs_dr_rule;
743 if (dr_is_fw_term_table(ft))
744 return mlx5_fs_cmd_get_fw_cmds()->delete_fte(ns, ft, fte);
746 err = mlx5dr_rule_destroy(rule->dr_rule);
750 /* Free in reverse order to handle action dependencies */
751 for (i = rule->num_actions - 1; i >= 0; i--)
752 if (!IS_ERR_OR_NULL(rule->dr_actions[i]))
753 mlx5dr_action_destroy(rule->dr_actions[i]);
755 kfree(rule->dr_actions);
759 static int mlx5_cmd_dr_update_fte(struct mlx5_flow_root_namespace *ns,
760 struct mlx5_flow_table *ft,
761 struct mlx5_flow_group *group,
765 struct fs_fte fte_tmp = {};
768 if (dr_is_fw_term_table(ft))
769 return mlx5_fs_cmd_get_fw_cmds()->update_fte(ns, ft, group, modify_mask, fte);
771 /* Backup current dr rule details */
772 fte_tmp.fs_dr_rule = fte->fs_dr_rule;
773 memset(&fte->fs_dr_rule, 0, sizeof(struct mlx5_fs_dr_rule));
775 /* First add the new updated rule, then delete the old rule */
776 ret = mlx5_cmd_dr_create_fte(ns, ft, group, fte);
780 ret = mlx5_cmd_dr_delete_fte(ns, ft, &fte_tmp);
781 WARN_ONCE(ret, "dr update fte duplicate rule deletion failed\n");
785 fte->fs_dr_rule = fte_tmp.fs_dr_rule;
789 static int mlx5_cmd_dr_set_peer(struct mlx5_flow_root_namespace *ns,
790 struct mlx5_flow_root_namespace *peer_ns,
793 struct mlx5dr_domain *peer_domain = NULL;
796 peer_domain = peer_ns->fs_dr_domain.dr_domain;
797 mlx5dr_domain_set_peer(ns->fs_dr_domain.dr_domain,
798 peer_domain, peer_vhca_id);
802 static int mlx5_cmd_dr_create_ns(struct mlx5_flow_root_namespace *ns)
804 ns->fs_dr_domain.dr_domain =
805 mlx5dr_domain_create(ns->dev,
806 MLX5DR_DOMAIN_TYPE_FDB);
807 if (!ns->fs_dr_domain.dr_domain) {
808 mlx5_core_err(ns->dev, "Failed to create dr flow namespace\n");
814 static int mlx5_cmd_dr_destroy_ns(struct mlx5_flow_root_namespace *ns)
816 return mlx5dr_domain_destroy(ns->fs_dr_domain.dr_domain);
819 static u32 mlx5_cmd_dr_get_capabilities(struct mlx5_flow_root_namespace *ns,
820 enum fs_flow_table_type ft_type)
822 u32 steering_caps = 0;
824 if (ft_type != FS_FT_FDB ||
825 MLX5_CAP_GEN(ns->dev, steering_format_version) == MLX5_STEERING_FORMAT_CONNECTX_5)
828 steering_caps |= MLX5_FLOW_STEERING_CAP_VLAN_PUSH_ON_RX;
829 steering_caps |= MLX5_FLOW_STEERING_CAP_VLAN_POP_ON_TX;
831 if (mlx5dr_supp_match_ranges(ns->dev))
832 steering_caps |= MLX5_FLOW_STEERING_CAP_MATCH_RANGES;
834 return steering_caps;
837 int mlx5_fs_dr_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat)
839 switch (pkt_reformat->reformat_type) {
840 case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
841 case MLX5_REFORMAT_TYPE_L2_TO_NVGRE:
842 case MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL:
843 case MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL:
844 case MLX5_REFORMAT_TYPE_INSERT_HDR:
845 return mlx5dr_action_get_pkt_reformat_id(pkt_reformat->action.dr_action);
850 bool mlx5_fs_dr_is_supported(struct mlx5_core_dev *dev)
852 return mlx5dr_is_supported(dev);
855 static const struct mlx5_flow_cmds mlx5_flow_cmds_dr = {
856 .create_flow_table = mlx5_cmd_dr_create_flow_table,
857 .destroy_flow_table = mlx5_cmd_dr_destroy_flow_table,
858 .modify_flow_table = mlx5_cmd_dr_modify_flow_table,
859 .create_flow_group = mlx5_cmd_dr_create_flow_group,
860 .destroy_flow_group = mlx5_cmd_dr_destroy_flow_group,
861 .create_fte = mlx5_cmd_dr_create_fte,
862 .update_fte = mlx5_cmd_dr_update_fte,
863 .delete_fte = mlx5_cmd_dr_delete_fte,
864 .update_root_ft = mlx5_cmd_dr_update_root_ft,
865 .packet_reformat_alloc = mlx5_cmd_dr_packet_reformat_alloc,
866 .packet_reformat_dealloc = mlx5_cmd_dr_packet_reformat_dealloc,
867 .modify_header_alloc = mlx5_cmd_dr_modify_header_alloc,
868 .modify_header_dealloc = mlx5_cmd_dr_modify_header_dealloc,
869 .create_match_definer = mlx5_cmd_dr_create_match_definer,
870 .destroy_match_definer = mlx5_cmd_dr_destroy_match_definer,
871 .set_peer = mlx5_cmd_dr_set_peer,
872 .create_ns = mlx5_cmd_dr_create_ns,
873 .destroy_ns = mlx5_cmd_dr_destroy_ns,
874 .get_capabilities = mlx5_cmd_dr_get_capabilities,
877 const struct mlx5_flow_cmds *mlx5_fs_cmd_get_dr_cmds(void)
879 return &mlx5_flow_cmds_dr;