2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/mlx5/driver.h>
34 #include <linux/mlx5/device.h>
35 #include <linux/mlx5/mlx5_ifc.h>
39 #include "fs_ft_pool.h"
40 #include "mlx5_core.h"
43 static int mlx5_cmd_stub_update_root_ft(struct mlx5_flow_root_namespace *ns,
44 struct mlx5_flow_table *ft,
51 static int mlx5_cmd_stub_create_flow_table(struct mlx5_flow_root_namespace *ns,
52 struct mlx5_flow_table *ft,
54 struct mlx5_flow_table *next_ft)
56 ft->max_fte = size ? roundup_pow_of_two(size) : 1;
61 static int mlx5_cmd_stub_destroy_flow_table(struct mlx5_flow_root_namespace *ns,
62 struct mlx5_flow_table *ft)
67 static int mlx5_cmd_stub_modify_flow_table(struct mlx5_flow_root_namespace *ns,
68 struct mlx5_flow_table *ft,
69 struct mlx5_flow_table *next_ft)
74 static int mlx5_cmd_stub_create_flow_group(struct mlx5_flow_root_namespace *ns,
75 struct mlx5_flow_table *ft,
77 struct mlx5_flow_group *fg)
82 static int mlx5_cmd_stub_destroy_flow_group(struct mlx5_flow_root_namespace *ns,
83 struct mlx5_flow_table *ft,
84 struct mlx5_flow_group *fg)
89 static int mlx5_cmd_stub_create_fte(struct mlx5_flow_root_namespace *ns,
90 struct mlx5_flow_table *ft,
91 struct mlx5_flow_group *group,
97 static int mlx5_cmd_stub_update_fte(struct mlx5_flow_root_namespace *ns,
98 struct mlx5_flow_table *ft,
99 struct mlx5_flow_group *group,
106 static int mlx5_cmd_stub_delete_fte(struct mlx5_flow_root_namespace *ns,
107 struct mlx5_flow_table *ft,
113 static int mlx5_cmd_stub_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns,
114 struct mlx5_pkt_reformat_params *params,
115 enum mlx5_flow_namespace_type namespace,
116 struct mlx5_pkt_reformat *pkt_reformat)
121 static void mlx5_cmd_stub_packet_reformat_dealloc(struct mlx5_flow_root_namespace *ns,
122 struct mlx5_pkt_reformat *pkt_reformat)
126 static int mlx5_cmd_stub_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
127 u8 namespace, u8 num_actions,
128 void *modify_actions,
129 struct mlx5_modify_hdr *modify_hdr)
134 static void mlx5_cmd_stub_modify_header_dealloc(struct mlx5_flow_root_namespace *ns,
135 struct mlx5_modify_hdr *modify_hdr)
139 static int mlx5_cmd_stub_set_peer(struct mlx5_flow_root_namespace *ns,
140 struct mlx5_flow_root_namespace *peer_ns)
145 static int mlx5_cmd_stub_create_ns(struct mlx5_flow_root_namespace *ns)
150 static int mlx5_cmd_stub_destroy_ns(struct mlx5_flow_root_namespace *ns)
155 static int mlx5_cmd_set_slave_root_fdb(struct mlx5_core_dev *master,
156 struct mlx5_core_dev *slave,
160 u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {};
161 u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {};
162 struct mlx5_flow_root_namespace *root;
163 struct mlx5_flow_namespace *ns;
165 MLX5_SET(set_flow_table_root_in, in, opcode,
166 MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
167 MLX5_SET(set_flow_table_root_in, in, table_type,
170 MLX5_SET(set_flow_table_root_in, in,
171 table_eswitch_owner_vhca_id_valid, 1);
172 MLX5_SET(set_flow_table_root_in, in,
173 table_eswitch_owner_vhca_id,
174 MLX5_CAP_GEN(master, vhca_id));
175 MLX5_SET(set_flow_table_root_in, in, table_id,
178 ns = mlx5_get_flow_namespace(slave,
179 MLX5_FLOW_NAMESPACE_FDB);
180 root = find_root(&ns->node);
181 MLX5_SET(set_flow_table_root_in, in, table_id,
185 return mlx5_cmd_exec(slave, in, sizeof(in), out, sizeof(out));
188 static int mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace *ns,
189 struct mlx5_flow_table *ft, u32 underlay_qpn,
192 u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {};
193 struct mlx5_core_dev *dev = ns->dev;
196 if ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) &&
200 if (ft->type == FS_FT_FDB &&
201 mlx5_lag_is_shared_fdb(dev) &&
202 !mlx5_lag_is_master(dev))
205 MLX5_SET(set_flow_table_root_in, in, opcode,
206 MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
207 MLX5_SET(set_flow_table_root_in, in, table_type, ft->type);
210 MLX5_SET(set_flow_table_root_in, in, op_mod, 1);
212 MLX5_SET(set_flow_table_root_in, in, table_id, ft->id);
214 MLX5_SET(set_flow_table_root_in, in, underlay_qpn, underlay_qpn);
215 MLX5_SET(set_flow_table_root_in, in, vport_number, ft->vport);
216 MLX5_SET(set_flow_table_root_in, in, other_vport,
217 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
219 err = mlx5_cmd_exec_in(dev, set_flow_table_root, in);
221 ft->type == FS_FT_FDB &&
222 mlx5_lag_is_shared_fdb(dev) &&
223 mlx5_lag_is_master(dev)) {
224 err = mlx5_cmd_set_slave_root_fdb(dev,
225 mlx5_lag_get_peer_mdev(dev),
226 !disconnect, (!disconnect) ?
228 if (err && !disconnect) {
229 MLX5_SET(set_flow_table_root_in, in, op_mod, 0);
230 MLX5_SET(set_flow_table_root_in, in, table_id,
232 mlx5_cmd_exec_in(dev, set_flow_table_root, in);
239 static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns,
240 struct mlx5_flow_table *ft,
242 struct mlx5_flow_table *next_ft)
244 int en_encap = !!(ft->flags & MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT);
245 int en_decap = !!(ft->flags & MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
246 int term = !!(ft->flags & MLX5_FLOW_TABLE_TERMINATION);
247 u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {};
248 u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {};
249 struct mlx5_core_dev *dev = ns->dev;
252 if (size != POOL_NEXT_SIZE)
253 size = roundup_pow_of_two(size);
254 size = mlx5_ft_pool_get_avail_sz(dev, ft->type, size);
258 MLX5_SET(create_flow_table_in, in, opcode,
259 MLX5_CMD_OP_CREATE_FLOW_TABLE);
261 MLX5_SET(create_flow_table_in, in, table_type, ft->type);
262 MLX5_SET(create_flow_table_in, in, flow_table_context.level, ft->level);
263 MLX5_SET(create_flow_table_in, in, flow_table_context.log_size, size ? ilog2(size) : 0);
264 MLX5_SET(create_flow_table_in, in, vport_number, ft->vport);
265 MLX5_SET(create_flow_table_in, in, other_vport,
266 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
268 MLX5_SET(create_flow_table_in, in, flow_table_context.decap_en,
270 MLX5_SET(create_flow_table_in, in, flow_table_context.reformat_en,
272 MLX5_SET(create_flow_table_in, in, flow_table_context.termination_table,
275 switch (ft->op_mod) {
276 case FS_FT_OP_MOD_NORMAL:
278 MLX5_SET(create_flow_table_in, in,
279 flow_table_context.table_miss_action,
280 MLX5_FLOW_TABLE_MISS_ACTION_FWD);
281 MLX5_SET(create_flow_table_in, in,
282 flow_table_context.table_miss_id, next_ft->id);
284 MLX5_SET(create_flow_table_in, in,
285 flow_table_context.table_miss_action,
286 ft->def_miss_action);
290 case FS_FT_OP_MOD_LAG_DEMUX:
291 MLX5_SET(create_flow_table_in, in, op_mod, 0x1);
293 MLX5_SET(create_flow_table_in, in,
294 flow_table_context.lag_master_next_table_id,
299 err = mlx5_cmd_exec_inout(dev, create_flow_table, in, out);
301 ft->id = MLX5_GET(create_flow_table_out, out,
305 mlx5_ft_pool_put_sz(ns->dev, size);
311 static int mlx5_cmd_destroy_flow_table(struct mlx5_flow_root_namespace *ns,
312 struct mlx5_flow_table *ft)
314 u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)] = {};
315 struct mlx5_core_dev *dev = ns->dev;
318 MLX5_SET(destroy_flow_table_in, in, opcode,
319 MLX5_CMD_OP_DESTROY_FLOW_TABLE);
320 MLX5_SET(destroy_flow_table_in, in, table_type, ft->type);
321 MLX5_SET(destroy_flow_table_in, in, table_id, ft->id);
322 MLX5_SET(destroy_flow_table_in, in, vport_number, ft->vport);
323 MLX5_SET(destroy_flow_table_in, in, other_vport,
324 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
326 err = mlx5_cmd_exec_in(dev, destroy_flow_table, in);
328 mlx5_ft_pool_put_sz(ns->dev, ft->max_fte);
333 static int mlx5_cmd_modify_flow_table(struct mlx5_flow_root_namespace *ns,
334 struct mlx5_flow_table *ft,
335 struct mlx5_flow_table *next_ft)
337 u32 in[MLX5_ST_SZ_DW(modify_flow_table_in)] = {};
338 struct mlx5_core_dev *dev = ns->dev;
340 MLX5_SET(modify_flow_table_in, in, opcode,
341 MLX5_CMD_OP_MODIFY_FLOW_TABLE);
342 MLX5_SET(modify_flow_table_in, in, table_type, ft->type);
343 MLX5_SET(modify_flow_table_in, in, table_id, ft->id);
345 if (ft->op_mod == FS_FT_OP_MOD_LAG_DEMUX) {
346 MLX5_SET(modify_flow_table_in, in, modify_field_select,
347 MLX5_MODIFY_FLOW_TABLE_LAG_NEXT_TABLE_ID);
349 MLX5_SET(modify_flow_table_in, in,
350 flow_table_context.lag_master_next_table_id, next_ft->id);
352 MLX5_SET(modify_flow_table_in, in,
353 flow_table_context.lag_master_next_table_id, 0);
356 MLX5_SET(modify_flow_table_in, in, vport_number, ft->vport);
357 MLX5_SET(modify_flow_table_in, in, other_vport,
358 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
359 MLX5_SET(modify_flow_table_in, in, modify_field_select,
360 MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID);
362 MLX5_SET(modify_flow_table_in, in,
363 flow_table_context.table_miss_action,
364 MLX5_FLOW_TABLE_MISS_ACTION_FWD);
365 MLX5_SET(modify_flow_table_in, in,
366 flow_table_context.table_miss_id,
369 MLX5_SET(modify_flow_table_in, in,
370 flow_table_context.table_miss_action,
371 ft->def_miss_action);
375 return mlx5_cmd_exec_in(dev, modify_flow_table, in);
378 static int mlx5_cmd_create_flow_group(struct mlx5_flow_root_namespace *ns,
379 struct mlx5_flow_table *ft,
381 struct mlx5_flow_group *fg)
383 u32 out[MLX5_ST_SZ_DW(create_flow_group_out)] = {};
384 struct mlx5_core_dev *dev = ns->dev;
387 MLX5_SET(create_flow_group_in, in, opcode,
388 MLX5_CMD_OP_CREATE_FLOW_GROUP);
389 MLX5_SET(create_flow_group_in, in, table_type, ft->type);
390 MLX5_SET(create_flow_group_in, in, table_id, ft->id);
392 MLX5_SET(create_flow_group_in, in, vport_number, ft->vport);
393 MLX5_SET(create_flow_group_in, in, other_vport, 1);
396 MLX5_SET(create_flow_group_in, in, vport_number, ft->vport);
397 MLX5_SET(create_flow_group_in, in, other_vport,
398 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
399 err = mlx5_cmd_exec_inout(dev, create_flow_group, in, out);
401 fg->id = MLX5_GET(create_flow_group_out, out,
406 static int mlx5_cmd_destroy_flow_group(struct mlx5_flow_root_namespace *ns,
407 struct mlx5_flow_table *ft,
408 struct mlx5_flow_group *fg)
410 u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)] = {};
411 struct mlx5_core_dev *dev = ns->dev;
413 MLX5_SET(destroy_flow_group_in, in, opcode,
414 MLX5_CMD_OP_DESTROY_FLOW_GROUP);
415 MLX5_SET(destroy_flow_group_in, in, table_type, ft->type);
416 MLX5_SET(destroy_flow_group_in, in, table_id, ft->id);
417 MLX5_SET(destroy_flow_group_in, in, group_id, fg->id);
418 MLX5_SET(destroy_flow_group_in, in, vport_number, ft->vport);
419 MLX5_SET(destroy_flow_group_in, in, other_vport,
420 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
421 return mlx5_cmd_exec_in(dev, destroy_flow_group, in);
424 static int mlx5_set_extended_dest(struct mlx5_core_dev *dev,
425 struct fs_fte *fte, bool *extended_dest)
427 int fw_log_max_fdb_encap_uplink =
428 MLX5_CAP_ESW(dev, log_max_fdb_encap_uplink);
429 int num_fwd_destinations = 0;
430 struct mlx5_flow_rule *dst;
433 *extended_dest = false;
434 if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
437 list_for_each_entry(dst, &fte->node.children, node.list) {
438 if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
440 if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_VPORT &&
441 dst->dest_attr.vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID)
443 num_fwd_destinations++;
445 if (num_fwd_destinations > 1 && num_encap > 0)
446 *extended_dest = true;
448 if (*extended_dest && !fw_log_max_fdb_encap_uplink) {
449 mlx5_core_warn(dev, "FW does not support extended destination");
452 if (num_encap > (1 << fw_log_max_fdb_encap_uplink)) {
453 mlx5_core_warn(dev, "FW does not support more than %d encaps",
454 1 << fw_log_max_fdb_encap_uplink);
460 static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
461 int opmod, int modify_mask,
462 struct mlx5_flow_table *ft,
466 u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {0};
467 bool extended_dest = false;
468 struct mlx5_flow_rule *dst;
469 void *in_flow_context, *vlan;
470 void *in_match_value;
477 if (mlx5_set_extended_dest(dev, fte, &extended_dest))
481 dst_cnt_size = MLX5_ST_SZ_BYTES(dest_format_struct);
483 dst_cnt_size = MLX5_ST_SZ_BYTES(extended_dest_format);
485 inlen = MLX5_ST_SZ_BYTES(set_fte_in) + fte->dests_size * dst_cnt_size;
486 in = kvzalloc(inlen, GFP_KERNEL);
490 MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
491 MLX5_SET(set_fte_in, in, op_mod, opmod);
492 MLX5_SET(set_fte_in, in, modify_enable_mask, modify_mask);
493 MLX5_SET(set_fte_in, in, table_type, ft->type);
494 MLX5_SET(set_fte_in, in, table_id, ft->id);
495 MLX5_SET(set_fte_in, in, flow_index, fte->index);
496 MLX5_SET(set_fte_in, in, ignore_flow_level,
497 !!(fte->action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL));
499 MLX5_SET(set_fte_in, in, vport_number, ft->vport);
500 MLX5_SET(set_fte_in, in, other_vport,
501 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
503 in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
504 MLX5_SET(flow_context, in_flow_context, group_id, group_id);
506 MLX5_SET(flow_context, in_flow_context, flow_tag,
507 fte->flow_context.flow_tag);
508 MLX5_SET(flow_context, in_flow_context, flow_source,
509 fte->flow_context.flow_source);
511 MLX5_SET(flow_context, in_flow_context, extended_destination,
516 action = fte->action.action &
517 ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
518 MLX5_SET(flow_context, in_flow_context, action, action);
520 MLX5_SET(flow_context, in_flow_context, action,
522 if (fte->action.pkt_reformat)
523 MLX5_SET(flow_context, in_flow_context, packet_reformat_id,
524 fte->action.pkt_reformat->id);
526 if (fte->action.modify_hdr)
527 MLX5_SET(flow_context, in_flow_context, modify_header_id,
528 fte->action.modify_hdr->id);
530 MLX5_SET(flow_context, in_flow_context, ipsec_obj_id, fte->action.ipsec_obj_id);
532 vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan);
534 MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[0].ethtype);
535 MLX5_SET(vlan, vlan, vid, fte->action.vlan[0].vid);
536 MLX5_SET(vlan, vlan, prio, fte->action.vlan[0].prio);
538 vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan_2);
540 MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[1].ethtype);
541 MLX5_SET(vlan, vlan, vid, fte->action.vlan[1].vid);
542 MLX5_SET(vlan, vlan, prio, fte->action.vlan[1].prio);
544 in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context,
546 memcpy(in_match_value, &fte->val, sizeof(fte->val));
548 in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
549 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
552 list_for_each_entry(dst, &fte->node.children, node.list) {
553 unsigned int id, type = dst->dest_attr.type;
555 if (type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
559 case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM:
560 id = dst->dest_attr.ft_num;
561 type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
563 case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
564 id = dst->dest_attr.ft->id;
566 case MLX5_FLOW_DESTINATION_TYPE_VPORT:
567 id = dst->dest_attr.vport.num;
568 MLX5_SET(dest_format_struct, in_dests,
569 destination_eswitch_owner_vhca_id_valid,
570 !!(dst->dest_attr.vport.flags &
571 MLX5_FLOW_DEST_VPORT_VHCA_ID));
572 MLX5_SET(dest_format_struct, in_dests,
573 destination_eswitch_owner_vhca_id,
574 dst->dest_attr.vport.vhca_id);
576 dst->dest_attr.vport.pkt_reformat) {
577 MLX5_SET(dest_format_struct, in_dests,
579 !!(dst->dest_attr.vport.flags &
580 MLX5_FLOW_DEST_VPORT_REFORMAT_ID));
581 MLX5_SET(extended_dest_format, in_dests,
583 dst->dest_attr.vport.pkt_reformat->id);
586 case MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER:
587 id = dst->dest_attr.sampler_id;
590 id = dst->dest_attr.tir_num;
593 MLX5_SET(dest_format_struct, in_dests, destination_type,
595 MLX5_SET(dest_format_struct, in_dests, destination_id, id);
596 in_dests += dst_cnt_size;
600 MLX5_SET(flow_context, in_flow_context, destination_list_size,
604 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
605 int max_list_size = BIT(MLX5_CAP_FLOWTABLE_TYPE(dev,
606 log_max_flow_counter,
610 list_for_each_entry(dst, &fte->node.children, node.list) {
611 if (dst->dest_attr.type !=
612 MLX5_FLOW_DESTINATION_TYPE_COUNTER)
615 MLX5_SET(flow_counter_list, in_dests, flow_counter_id,
616 dst->dest_attr.counter_id);
617 in_dests += dst_cnt_size;
620 if (list_size > max_list_size) {
625 MLX5_SET(flow_context, in_flow_context, flow_counter_list_size,
629 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
635 static int mlx5_cmd_create_fte(struct mlx5_flow_root_namespace *ns,
636 struct mlx5_flow_table *ft,
637 struct mlx5_flow_group *group,
640 struct mlx5_core_dev *dev = ns->dev;
641 unsigned int group_id = group->id;
643 return mlx5_cmd_set_fte(dev, 0, 0, ft, group_id, fte);
646 static int mlx5_cmd_update_fte(struct mlx5_flow_root_namespace *ns,
647 struct mlx5_flow_table *ft,
648 struct mlx5_flow_group *fg,
653 struct mlx5_core_dev *dev = ns->dev;
654 int atomic_mod_cap = MLX5_CAP_FLOWTABLE(dev,
655 flow_table_properties_nic_receive.
661 return mlx5_cmd_set_fte(dev, opmod, modify_mask, ft, fg->id, fte);
664 static int mlx5_cmd_delete_fte(struct mlx5_flow_root_namespace *ns,
665 struct mlx5_flow_table *ft,
668 u32 in[MLX5_ST_SZ_DW(delete_fte_in)] = {};
669 struct mlx5_core_dev *dev = ns->dev;
671 MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
672 MLX5_SET(delete_fte_in, in, table_type, ft->type);
673 MLX5_SET(delete_fte_in, in, table_id, ft->id);
674 MLX5_SET(delete_fte_in, in, flow_index, fte->index);
675 MLX5_SET(delete_fte_in, in, vport_number, ft->vport);
676 MLX5_SET(delete_fte_in, in, other_vport,
677 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
679 return mlx5_cmd_exec_in(dev, delete_fte, in);
682 int mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev,
683 enum mlx5_fc_bulk_alloc_bitmask alloc_bitmask,
686 u32 out[MLX5_ST_SZ_DW(alloc_flow_counter_out)] = {};
687 u32 in[MLX5_ST_SZ_DW(alloc_flow_counter_in)] = {};
690 MLX5_SET(alloc_flow_counter_in, in, opcode,
691 MLX5_CMD_OP_ALLOC_FLOW_COUNTER);
692 MLX5_SET(alloc_flow_counter_in, in, flow_counter_bulk, alloc_bitmask);
694 err = mlx5_cmd_exec_inout(dev, alloc_flow_counter, in, out);
696 *id = MLX5_GET(alloc_flow_counter_out, out, flow_counter_id);
700 int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id)
702 return mlx5_cmd_fc_bulk_alloc(dev, 0, id);
705 int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u32 id)
707 u32 in[MLX5_ST_SZ_DW(dealloc_flow_counter_in)] = {};
709 MLX5_SET(dealloc_flow_counter_in, in, opcode,
710 MLX5_CMD_OP_DEALLOC_FLOW_COUNTER);
711 MLX5_SET(dealloc_flow_counter_in, in, flow_counter_id, id);
712 return mlx5_cmd_exec_in(dev, dealloc_flow_counter, in);
715 int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u32 id,
716 u64 *packets, u64 *bytes)
718 u32 out[MLX5_ST_SZ_BYTES(query_flow_counter_out) +
719 MLX5_ST_SZ_BYTES(traffic_counter)] = {};
720 u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {};
724 MLX5_SET(query_flow_counter_in, in, opcode,
725 MLX5_CMD_OP_QUERY_FLOW_COUNTER);
726 MLX5_SET(query_flow_counter_in, in, op_mod, 0);
727 MLX5_SET(query_flow_counter_in, in, flow_counter_id, id);
728 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
732 stats = MLX5_ADDR_OF(query_flow_counter_out, out, flow_statistics);
733 *packets = MLX5_GET64(traffic_counter, stats, packets);
734 *bytes = MLX5_GET64(traffic_counter, stats, octets);
738 int mlx5_cmd_fc_get_bulk_query_out_len(int bulk_len)
740 return MLX5_ST_SZ_BYTES(query_flow_counter_out) +
741 MLX5_ST_SZ_BYTES(traffic_counter) * bulk_len;
744 int mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, u32 base_id, int bulk_len,
747 int outlen = mlx5_cmd_fc_get_bulk_query_out_len(bulk_len);
748 u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {};
750 MLX5_SET(query_flow_counter_in, in, opcode,
751 MLX5_CMD_OP_QUERY_FLOW_COUNTER);
752 MLX5_SET(query_flow_counter_in, in, flow_counter_id, base_id);
753 MLX5_SET(query_flow_counter_in, in, num_of_counters, bulk_len);
754 return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
757 static int mlx5_cmd_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns,
758 struct mlx5_pkt_reformat_params *params,
759 enum mlx5_flow_namespace_type namespace,
760 struct mlx5_pkt_reformat *pkt_reformat)
762 u32 out[MLX5_ST_SZ_DW(alloc_packet_reformat_context_out)] = {};
763 struct mlx5_core_dev *dev = ns->dev;
764 void *packet_reformat_context_in;
771 if (namespace == MLX5_FLOW_NAMESPACE_FDB)
772 max_encap_size = MLX5_CAP_ESW(dev, max_encap_header_size);
774 max_encap_size = MLX5_CAP_FLOWTABLE(dev, max_encap_header_size);
776 if (params->size > max_encap_size) {
777 mlx5_core_warn(dev, "encap size %zd too big, max supported is %d\n",
778 params->size, max_encap_size);
782 in = kzalloc(MLX5_ST_SZ_BYTES(alloc_packet_reformat_context_in) +
783 params->size, GFP_KERNEL);
787 packet_reformat_context_in = MLX5_ADDR_OF(alloc_packet_reformat_context_in,
788 in, packet_reformat_context);
789 reformat = MLX5_ADDR_OF(packet_reformat_context_in,
790 packet_reformat_context_in,
792 inlen = reformat - (void *)in + params->size;
794 MLX5_SET(alloc_packet_reformat_context_in, in, opcode,
795 MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT);
796 MLX5_SET(packet_reformat_context_in, packet_reformat_context_in,
797 reformat_data_size, params->size);
798 MLX5_SET(packet_reformat_context_in, packet_reformat_context_in,
799 reformat_type, params->type);
800 MLX5_SET(packet_reformat_context_in, packet_reformat_context_in,
801 reformat_param_0, params->param_0);
802 MLX5_SET(packet_reformat_context_in, packet_reformat_context_in,
803 reformat_param_1, params->param_1);
804 if (params->data && params->size)
805 memcpy(reformat, params->data, params->size);
807 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
809 pkt_reformat->id = MLX5_GET(alloc_packet_reformat_context_out,
810 out, packet_reformat_id);
815 static void mlx5_cmd_packet_reformat_dealloc(struct mlx5_flow_root_namespace *ns,
816 struct mlx5_pkt_reformat *pkt_reformat)
818 u32 in[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_in)] = {};
819 struct mlx5_core_dev *dev = ns->dev;
821 MLX5_SET(dealloc_packet_reformat_context_in, in, opcode,
822 MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT);
823 MLX5_SET(dealloc_packet_reformat_context_in, in, packet_reformat_id,
826 mlx5_cmd_exec_in(dev, dealloc_packet_reformat_context, in);
829 static int mlx5_cmd_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
830 u8 namespace, u8 num_actions,
831 void *modify_actions,
832 struct mlx5_modify_hdr *modify_hdr)
834 u32 out[MLX5_ST_SZ_DW(alloc_modify_header_context_out)] = {};
835 int max_actions, actions_size, inlen, err;
836 struct mlx5_core_dev *dev = ns->dev;
842 case MLX5_FLOW_NAMESPACE_FDB:
843 max_actions = MLX5_CAP_ESW_FLOWTABLE_FDB(dev, max_modify_header_actions);
844 table_type = FS_FT_FDB;
846 case MLX5_FLOW_NAMESPACE_KERNEL:
847 case MLX5_FLOW_NAMESPACE_BYPASS:
848 max_actions = MLX5_CAP_FLOWTABLE_NIC_RX(dev, max_modify_header_actions);
849 table_type = FS_FT_NIC_RX;
851 case MLX5_FLOW_NAMESPACE_EGRESS:
852 #ifdef CONFIG_MLX5_IPSEC
853 case MLX5_FLOW_NAMESPACE_EGRESS_KERNEL:
855 max_actions = MLX5_CAP_FLOWTABLE_NIC_TX(dev, max_modify_header_actions);
856 table_type = FS_FT_NIC_TX;
858 case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
859 max_actions = MLX5_CAP_ESW_INGRESS_ACL(dev, max_modify_header_actions);
860 table_type = FS_FT_ESW_INGRESS_ACL;
862 case MLX5_FLOW_NAMESPACE_RDMA_TX:
863 max_actions = MLX5_CAP_FLOWTABLE_RDMA_TX(dev, max_modify_header_actions);
864 table_type = FS_FT_RDMA_TX;
870 if (num_actions > max_actions) {
871 mlx5_core_warn(dev, "too many modify header actions %d, max supported %d\n",
872 num_actions, max_actions);
876 actions_size = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto) * num_actions;
877 inlen = MLX5_ST_SZ_BYTES(alloc_modify_header_context_in) + actions_size;
879 in = kzalloc(inlen, GFP_KERNEL);
883 MLX5_SET(alloc_modify_header_context_in, in, opcode,
884 MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT);
885 MLX5_SET(alloc_modify_header_context_in, in, table_type, table_type);
886 MLX5_SET(alloc_modify_header_context_in, in, num_of_actions, num_actions);
888 actions_in = MLX5_ADDR_OF(alloc_modify_header_context_in, in, actions);
889 memcpy(actions_in, modify_actions, actions_size);
891 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
893 modify_hdr->id = MLX5_GET(alloc_modify_header_context_out, out, modify_header_id);
898 static void mlx5_cmd_modify_header_dealloc(struct mlx5_flow_root_namespace *ns,
899 struct mlx5_modify_hdr *modify_hdr)
901 u32 in[MLX5_ST_SZ_DW(dealloc_modify_header_context_in)] = {};
902 struct mlx5_core_dev *dev = ns->dev;
904 MLX5_SET(dealloc_modify_header_context_in, in, opcode,
905 MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT);
906 MLX5_SET(dealloc_modify_header_context_in, in, modify_header_id,
909 mlx5_cmd_exec_in(dev, dealloc_modify_header_context, in);
912 static const struct mlx5_flow_cmds mlx5_flow_cmds = {
913 .create_flow_table = mlx5_cmd_create_flow_table,
914 .destroy_flow_table = mlx5_cmd_destroy_flow_table,
915 .modify_flow_table = mlx5_cmd_modify_flow_table,
916 .create_flow_group = mlx5_cmd_create_flow_group,
917 .destroy_flow_group = mlx5_cmd_destroy_flow_group,
918 .create_fte = mlx5_cmd_create_fte,
919 .update_fte = mlx5_cmd_update_fte,
920 .delete_fte = mlx5_cmd_delete_fte,
921 .update_root_ft = mlx5_cmd_update_root_ft,
922 .packet_reformat_alloc = mlx5_cmd_packet_reformat_alloc,
923 .packet_reformat_dealloc = mlx5_cmd_packet_reformat_dealloc,
924 .modify_header_alloc = mlx5_cmd_modify_header_alloc,
925 .modify_header_dealloc = mlx5_cmd_modify_header_dealloc,
926 .set_peer = mlx5_cmd_stub_set_peer,
927 .create_ns = mlx5_cmd_stub_create_ns,
928 .destroy_ns = mlx5_cmd_stub_destroy_ns,
931 static const struct mlx5_flow_cmds mlx5_flow_cmd_stubs = {
932 .create_flow_table = mlx5_cmd_stub_create_flow_table,
933 .destroy_flow_table = mlx5_cmd_stub_destroy_flow_table,
934 .modify_flow_table = mlx5_cmd_stub_modify_flow_table,
935 .create_flow_group = mlx5_cmd_stub_create_flow_group,
936 .destroy_flow_group = mlx5_cmd_stub_destroy_flow_group,
937 .create_fte = mlx5_cmd_stub_create_fte,
938 .update_fte = mlx5_cmd_stub_update_fte,
939 .delete_fte = mlx5_cmd_stub_delete_fte,
940 .update_root_ft = mlx5_cmd_stub_update_root_ft,
941 .packet_reformat_alloc = mlx5_cmd_stub_packet_reformat_alloc,
942 .packet_reformat_dealloc = mlx5_cmd_stub_packet_reformat_dealloc,
943 .modify_header_alloc = mlx5_cmd_stub_modify_header_alloc,
944 .modify_header_dealloc = mlx5_cmd_stub_modify_header_dealloc,
945 .set_peer = mlx5_cmd_stub_set_peer,
946 .create_ns = mlx5_cmd_stub_create_ns,
947 .destroy_ns = mlx5_cmd_stub_destroy_ns,
950 const struct mlx5_flow_cmds *mlx5_fs_cmd_get_fw_cmds(void)
952 return &mlx5_flow_cmds;
955 static const struct mlx5_flow_cmds *mlx5_fs_cmd_get_stub_cmds(void)
957 return &mlx5_flow_cmd_stubs;
960 const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type type)
964 case FS_FT_ESW_EGRESS_ACL:
965 case FS_FT_ESW_INGRESS_ACL:
967 case FS_FT_SNIFFER_RX:
968 case FS_FT_SNIFFER_TX:
972 return mlx5_fs_cmd_get_fw_cmds();
974 return mlx5_fs_cmd_get_stub_cmds();