2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/mlx5/driver.h>
34 #include <linux/mlx5/device.h>
35 #include <linux/mlx5/mlx5_ifc.h>
39 #include "mlx5_core.h"
42 static int mlx5_cmd_stub_update_root_ft(struct mlx5_core_dev *dev,
43 struct mlx5_flow_table *ft,
50 static int mlx5_cmd_stub_create_flow_table(struct mlx5_core_dev *dev,
52 enum fs_flow_table_op_mod op_mod,
53 enum fs_flow_table_type type,
55 unsigned int log_size,
56 struct mlx5_flow_table *next_ft,
57 unsigned int *table_id, u32 flags)
62 static int mlx5_cmd_stub_destroy_flow_table(struct mlx5_core_dev *dev,
63 struct mlx5_flow_table *ft)
68 static int mlx5_cmd_stub_modify_flow_table(struct mlx5_core_dev *dev,
69 struct mlx5_flow_table *ft,
70 struct mlx5_flow_table *next_ft)
75 static int mlx5_cmd_stub_create_flow_group(struct mlx5_core_dev *dev,
76 struct mlx5_flow_table *ft,
78 unsigned int *group_id)
83 static int mlx5_cmd_stub_destroy_flow_group(struct mlx5_core_dev *dev,
84 struct mlx5_flow_table *ft,
85 unsigned int group_id)
90 static int mlx5_cmd_stub_create_fte(struct mlx5_core_dev *dev,
91 struct mlx5_flow_table *ft,
92 struct mlx5_flow_group *group,
98 static int mlx5_cmd_stub_update_fte(struct mlx5_core_dev *dev,
99 struct mlx5_flow_table *ft,
100 unsigned int group_id,
107 static int mlx5_cmd_stub_delete_fte(struct mlx5_core_dev *dev,
108 struct mlx5_flow_table *ft,
114 static int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev,
115 struct mlx5_flow_table *ft, u32 underlay_qpn,
118 u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {0};
119 u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {0};
121 if ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) &&
125 MLX5_SET(set_flow_table_root_in, in, opcode,
126 MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
127 MLX5_SET(set_flow_table_root_in, in, table_type, ft->type);
130 MLX5_SET(set_flow_table_root_in, in, op_mod, 1);
131 MLX5_SET(set_flow_table_root_in, in, table_id, 0);
133 MLX5_SET(set_flow_table_root_in, in, op_mod, 0);
134 MLX5_SET(set_flow_table_root_in, in, table_id, ft->id);
137 MLX5_SET(set_flow_table_root_in, in, underlay_qpn, underlay_qpn);
139 MLX5_SET(set_flow_table_root_in, in, vport_number, ft->vport);
140 MLX5_SET(set_flow_table_root_in, in, other_vport, 1);
143 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
146 static int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
148 enum fs_flow_table_op_mod op_mod,
149 enum fs_flow_table_type type,
151 unsigned int log_size,
152 struct mlx5_flow_table *next_ft,
153 unsigned int *table_id, u32 flags)
155 int en_encap = !!(flags & MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT);
156 int en_decap = !!(flags & MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
157 u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {0};
158 u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {0};
161 MLX5_SET(create_flow_table_in, in, opcode,
162 MLX5_CMD_OP_CREATE_FLOW_TABLE);
164 MLX5_SET(create_flow_table_in, in, table_type, type);
165 MLX5_SET(create_flow_table_in, in, flow_table_context.level, level);
166 MLX5_SET(create_flow_table_in, in, flow_table_context.log_size, log_size);
168 MLX5_SET(create_flow_table_in, in, vport_number, vport);
169 MLX5_SET(create_flow_table_in, in, other_vport, 1);
172 MLX5_SET(create_flow_table_in, in, flow_table_context.decap_en,
174 MLX5_SET(create_flow_table_in, in, flow_table_context.reformat_en,
178 case FS_FT_OP_MOD_NORMAL:
180 MLX5_SET(create_flow_table_in, in,
181 flow_table_context.table_miss_action, 1);
182 MLX5_SET(create_flow_table_in, in,
183 flow_table_context.table_miss_id, next_ft->id);
187 case FS_FT_OP_MOD_LAG_DEMUX:
188 MLX5_SET(create_flow_table_in, in, op_mod, 0x1);
190 MLX5_SET(create_flow_table_in, in,
191 flow_table_context.lag_master_next_table_id,
196 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
198 *table_id = MLX5_GET(create_flow_table_out, out,
203 static int mlx5_cmd_destroy_flow_table(struct mlx5_core_dev *dev,
204 struct mlx5_flow_table *ft)
206 u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)] = {0};
207 u32 out[MLX5_ST_SZ_DW(destroy_flow_table_out)] = {0};
209 MLX5_SET(destroy_flow_table_in, in, opcode,
210 MLX5_CMD_OP_DESTROY_FLOW_TABLE);
211 MLX5_SET(destroy_flow_table_in, in, table_type, ft->type);
212 MLX5_SET(destroy_flow_table_in, in, table_id, ft->id);
214 MLX5_SET(destroy_flow_table_in, in, vport_number, ft->vport);
215 MLX5_SET(destroy_flow_table_in, in, other_vport, 1);
218 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
221 static int mlx5_cmd_modify_flow_table(struct mlx5_core_dev *dev,
222 struct mlx5_flow_table *ft,
223 struct mlx5_flow_table *next_ft)
225 u32 in[MLX5_ST_SZ_DW(modify_flow_table_in)] = {0};
226 u32 out[MLX5_ST_SZ_DW(modify_flow_table_out)] = {0};
228 MLX5_SET(modify_flow_table_in, in, opcode,
229 MLX5_CMD_OP_MODIFY_FLOW_TABLE);
230 MLX5_SET(modify_flow_table_in, in, table_type, ft->type);
231 MLX5_SET(modify_flow_table_in, in, table_id, ft->id);
233 if (ft->op_mod == FS_FT_OP_MOD_LAG_DEMUX) {
234 MLX5_SET(modify_flow_table_in, in, modify_field_select,
235 MLX5_MODIFY_FLOW_TABLE_LAG_NEXT_TABLE_ID);
237 MLX5_SET(modify_flow_table_in, in,
238 flow_table_context.lag_master_next_table_id, next_ft->id);
240 MLX5_SET(modify_flow_table_in, in,
241 flow_table_context.lag_master_next_table_id, 0);
245 MLX5_SET(modify_flow_table_in, in, vport_number,
247 MLX5_SET(modify_flow_table_in, in, other_vport, 1);
249 MLX5_SET(modify_flow_table_in, in, modify_field_select,
250 MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID);
252 MLX5_SET(modify_flow_table_in, in,
253 flow_table_context.table_miss_action, 1);
254 MLX5_SET(modify_flow_table_in, in,
255 flow_table_context.table_miss_id,
258 MLX5_SET(modify_flow_table_in, in,
259 flow_table_context.table_miss_action, 0);
263 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
266 static int mlx5_cmd_create_flow_group(struct mlx5_core_dev *dev,
267 struct mlx5_flow_table *ft,
269 unsigned int *group_id)
271 u32 out[MLX5_ST_SZ_DW(create_flow_group_out)] = {0};
272 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
275 MLX5_SET(create_flow_group_in, in, opcode,
276 MLX5_CMD_OP_CREATE_FLOW_GROUP);
277 MLX5_SET(create_flow_group_in, in, table_type, ft->type);
278 MLX5_SET(create_flow_group_in, in, table_id, ft->id);
280 MLX5_SET(create_flow_group_in, in, vport_number, ft->vport);
281 MLX5_SET(create_flow_group_in, in, other_vport, 1);
284 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
286 *group_id = MLX5_GET(create_flow_group_out, out,
291 static int mlx5_cmd_destroy_flow_group(struct mlx5_core_dev *dev,
292 struct mlx5_flow_table *ft,
293 unsigned int group_id)
295 u32 out[MLX5_ST_SZ_DW(destroy_flow_group_out)] = {0};
296 u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)] = {0};
298 MLX5_SET(destroy_flow_group_in, in, opcode,
299 MLX5_CMD_OP_DESTROY_FLOW_GROUP);
300 MLX5_SET(destroy_flow_group_in, in, table_type, ft->type);
301 MLX5_SET(destroy_flow_group_in, in, table_id, ft->id);
302 MLX5_SET(destroy_flow_group_in, in, group_id, group_id);
304 MLX5_SET(destroy_flow_group_in, in, vport_number, ft->vport);
305 MLX5_SET(destroy_flow_group_in, in, other_vport, 1);
308 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
311 static int mlx5_set_extended_dest(struct mlx5_core_dev *dev,
312 struct fs_fte *fte, bool *extended_dest)
314 int fw_log_max_fdb_encap_uplink =
315 MLX5_CAP_ESW(dev, log_max_fdb_encap_uplink);
316 int num_fwd_destinations = 0;
317 struct mlx5_flow_rule *dst;
320 *extended_dest = false;
321 if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
324 list_for_each_entry(dst, &fte->node.children, node.list) {
325 if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
327 if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_VPORT &&
328 dst->dest_attr.vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID)
330 num_fwd_destinations++;
332 if (num_fwd_destinations > 1 && num_encap > 0)
333 *extended_dest = true;
335 if (*extended_dest && !fw_log_max_fdb_encap_uplink) {
336 mlx5_core_warn(dev, "FW does not support extended destination");
339 if (num_encap > (1 << fw_log_max_fdb_encap_uplink)) {
340 mlx5_core_warn(dev, "FW does not support more than %d encaps",
341 1 << fw_log_max_fdb_encap_uplink);
347 static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
348 int opmod, int modify_mask,
349 struct mlx5_flow_table *ft,
353 u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {0};
354 bool extended_dest = false;
355 struct mlx5_flow_rule *dst;
356 void *in_flow_context, *vlan;
357 void *in_match_value;
364 if (mlx5_set_extended_dest(dev, fte, &extended_dest))
368 dst_cnt_size = MLX5_ST_SZ_BYTES(dest_format_struct);
370 dst_cnt_size = MLX5_ST_SZ_BYTES(extended_dest_format);
372 inlen = MLX5_ST_SZ_BYTES(set_fte_in) + fte->dests_size * dst_cnt_size;
373 in = kvzalloc(inlen, GFP_KERNEL);
377 MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
378 MLX5_SET(set_fte_in, in, op_mod, opmod);
379 MLX5_SET(set_fte_in, in, modify_enable_mask, modify_mask);
380 MLX5_SET(set_fte_in, in, table_type, ft->type);
381 MLX5_SET(set_fte_in, in, table_id, ft->id);
382 MLX5_SET(set_fte_in, in, flow_index, fte->index);
384 MLX5_SET(set_fte_in, in, vport_number, ft->vport);
385 MLX5_SET(set_fte_in, in, other_vport, 1);
388 in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
389 MLX5_SET(flow_context, in_flow_context, group_id, group_id);
391 MLX5_SET(flow_context, in_flow_context, flow_tag, fte->action.flow_tag);
392 MLX5_SET(flow_context, in_flow_context, extended_destination,
397 action = fte->action.action &
398 ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
399 MLX5_SET(flow_context, in_flow_context, action, action);
401 MLX5_SET(flow_context, in_flow_context, action,
403 MLX5_SET(flow_context, in_flow_context, packet_reformat_id,
404 fte->action.reformat_id);
406 MLX5_SET(flow_context, in_flow_context, modify_header_id,
407 fte->action.modify_id);
409 vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan);
411 MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[0].ethtype);
412 MLX5_SET(vlan, vlan, vid, fte->action.vlan[0].vid);
413 MLX5_SET(vlan, vlan, prio, fte->action.vlan[0].prio);
415 vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan_2);
417 MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[1].ethtype);
418 MLX5_SET(vlan, vlan, vid, fte->action.vlan[1].vid);
419 MLX5_SET(vlan, vlan, prio, fte->action.vlan[1].prio);
421 in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context,
423 memcpy(in_match_value, &fte->val, sizeof(fte->val));
425 in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
426 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
429 list_for_each_entry(dst, &fte->node.children, node.list) {
430 unsigned int id, type = dst->dest_attr.type;
432 if (type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
436 case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM:
437 id = dst->dest_attr.ft_num;
438 type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
440 case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
441 id = dst->dest_attr.ft->id;
443 case MLX5_FLOW_DESTINATION_TYPE_VPORT:
444 id = dst->dest_attr.vport.num;
445 MLX5_SET(dest_format_struct, in_dests,
446 destination_eswitch_owner_vhca_id_valid,
447 !!(dst->dest_attr.vport.flags &
448 MLX5_FLOW_DEST_VPORT_VHCA_ID));
449 MLX5_SET(dest_format_struct, in_dests,
450 destination_eswitch_owner_vhca_id,
451 dst->dest_attr.vport.vhca_id);
453 MLX5_SET(dest_format_struct, in_dests,
455 !!(dst->dest_attr.vport.flags &
456 MLX5_FLOW_DEST_VPORT_REFORMAT_ID));
457 MLX5_SET(extended_dest_format, in_dests,
459 dst->dest_attr.vport.reformat_id);
463 id = dst->dest_attr.tir_num;
466 MLX5_SET(dest_format_struct, in_dests, destination_type,
468 MLX5_SET(dest_format_struct, in_dests, destination_id, id);
469 in_dests += dst_cnt_size;
473 MLX5_SET(flow_context, in_flow_context, destination_list_size,
477 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
478 int max_list_size = BIT(MLX5_CAP_FLOWTABLE_TYPE(dev,
479 log_max_flow_counter,
483 list_for_each_entry(dst, &fte->node.children, node.list) {
484 if (dst->dest_attr.type !=
485 MLX5_FLOW_DESTINATION_TYPE_COUNTER)
488 MLX5_SET(flow_counter_list, in_dests, flow_counter_id,
489 dst->dest_attr.counter_id);
490 in_dests += dst_cnt_size;
493 if (list_size > max_list_size) {
498 MLX5_SET(flow_context, in_flow_context, flow_counter_list_size,
502 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
508 static int mlx5_cmd_create_fte(struct mlx5_core_dev *dev,
509 struct mlx5_flow_table *ft,
510 struct mlx5_flow_group *group,
513 unsigned int group_id = group->id;
515 return mlx5_cmd_set_fte(dev, 0, 0, ft, group_id, fte);
518 static int mlx5_cmd_update_fte(struct mlx5_core_dev *dev,
519 struct mlx5_flow_table *ft,
520 unsigned int group_id,
525 int atomic_mod_cap = MLX5_CAP_FLOWTABLE(dev,
526 flow_table_properties_nic_receive.
532 return mlx5_cmd_set_fte(dev, opmod, modify_mask, ft, group_id, fte);
535 static int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev,
536 struct mlx5_flow_table *ft,
539 u32 out[MLX5_ST_SZ_DW(delete_fte_out)] = {0};
540 u32 in[MLX5_ST_SZ_DW(delete_fte_in)] = {0};
542 MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
543 MLX5_SET(delete_fte_in, in, table_type, ft->type);
544 MLX5_SET(delete_fte_in, in, table_id, ft->id);
545 MLX5_SET(delete_fte_in, in, flow_index, fte->index);
547 MLX5_SET(delete_fte_in, in, vport_number, ft->vport);
548 MLX5_SET(delete_fte_in, in, other_vport, 1);
551 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
554 int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id)
556 u32 in[MLX5_ST_SZ_DW(alloc_flow_counter_in)] = {0};
557 u32 out[MLX5_ST_SZ_DW(alloc_flow_counter_out)] = {0};
560 MLX5_SET(alloc_flow_counter_in, in, opcode,
561 MLX5_CMD_OP_ALLOC_FLOW_COUNTER);
563 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
565 *id = MLX5_GET(alloc_flow_counter_out, out, flow_counter_id);
569 int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u32 id)
571 u32 in[MLX5_ST_SZ_DW(dealloc_flow_counter_in)] = {0};
572 u32 out[MLX5_ST_SZ_DW(dealloc_flow_counter_out)] = {0};
574 MLX5_SET(dealloc_flow_counter_in, in, opcode,
575 MLX5_CMD_OP_DEALLOC_FLOW_COUNTER);
576 MLX5_SET(dealloc_flow_counter_in, in, flow_counter_id, id);
577 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
580 int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u32 id,
581 u64 *packets, u64 *bytes)
583 u32 out[MLX5_ST_SZ_BYTES(query_flow_counter_out) +
584 MLX5_ST_SZ_BYTES(traffic_counter)] = {0};
585 u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0};
589 MLX5_SET(query_flow_counter_in, in, opcode,
590 MLX5_CMD_OP_QUERY_FLOW_COUNTER);
591 MLX5_SET(query_flow_counter_in, in, op_mod, 0);
592 MLX5_SET(query_flow_counter_in, in, flow_counter_id, id);
593 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
597 stats = MLX5_ADDR_OF(query_flow_counter_out, out, flow_statistics);
598 *packets = MLX5_GET64(traffic_counter, stats, packets);
599 *bytes = MLX5_GET64(traffic_counter, stats, octets);
603 struct mlx5_cmd_fc_bulk {
610 struct mlx5_cmd_fc_bulk *
611 mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev, u32 id, int num)
613 struct mlx5_cmd_fc_bulk *b;
615 MLX5_ST_SZ_BYTES(query_flow_counter_out) +
616 MLX5_ST_SZ_BYTES(traffic_counter) * num;
618 b = kzalloc(sizeof(*b) + outlen, GFP_KERNEL);
629 void mlx5_cmd_fc_bulk_free(struct mlx5_cmd_fc_bulk *b)
635 mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, struct mlx5_cmd_fc_bulk *b)
637 u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0};
639 MLX5_SET(query_flow_counter_in, in, opcode,
640 MLX5_CMD_OP_QUERY_FLOW_COUNTER);
641 MLX5_SET(query_flow_counter_in, in, op_mod, 0);
642 MLX5_SET(query_flow_counter_in, in, flow_counter_id, b->id);
643 MLX5_SET(query_flow_counter_in, in, num_of_counters, b->num);
644 return mlx5_cmd_exec(dev, in, sizeof(in), b->out, b->outlen);
647 void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev,
648 struct mlx5_cmd_fc_bulk *b, u32 id,
649 u64 *packets, u64 *bytes)
651 int index = id - b->id;
654 if (index < 0 || index >= b->num) {
655 mlx5_core_warn(dev, "Flow counter id (0x%x) out of range (0x%x..0x%x). Counter ignored.\n",
656 id, b->id, b->id + b->num - 1);
660 stats = MLX5_ADDR_OF(query_flow_counter_out, b->out,
661 flow_statistics[index]);
662 *packets = MLX5_GET64(traffic_counter, stats, packets);
663 *bytes = MLX5_GET64(traffic_counter, stats, octets);
666 int mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev,
670 enum mlx5_flow_namespace_type namespace,
671 u32 *packet_reformat_id)
673 u32 out[MLX5_ST_SZ_DW(alloc_packet_reformat_context_out)];
674 void *packet_reformat_context_in;
681 if (namespace == MLX5_FLOW_NAMESPACE_FDB)
682 max_encap_size = MLX5_CAP_ESW(dev, max_encap_header_size);
684 max_encap_size = MLX5_CAP_FLOWTABLE(dev, max_encap_header_size);
686 if (size > max_encap_size) {
687 mlx5_core_warn(dev, "encap size %zd too big, max supported is %d\n",
688 size, max_encap_size);
692 in = kzalloc(MLX5_ST_SZ_BYTES(alloc_packet_reformat_context_in) + size,
697 packet_reformat_context_in = MLX5_ADDR_OF(alloc_packet_reformat_context_in,
698 in, packet_reformat_context);
699 reformat = MLX5_ADDR_OF(packet_reformat_context_in,
700 packet_reformat_context_in,
702 inlen = reformat - (void *)in + size;
704 memset(in, 0, inlen);
705 MLX5_SET(alloc_packet_reformat_context_in, in, opcode,
706 MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT);
707 MLX5_SET(packet_reformat_context_in, packet_reformat_context_in,
708 reformat_data_size, size);
709 MLX5_SET(packet_reformat_context_in, packet_reformat_context_in,
710 reformat_type, reformat_type);
711 memcpy(reformat, reformat_data, size);
713 memset(out, 0, sizeof(out));
714 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
716 *packet_reformat_id = MLX5_GET(alloc_packet_reformat_context_out,
717 out, packet_reformat_id);
721 EXPORT_SYMBOL(mlx5_packet_reformat_alloc);
723 void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev,
724 u32 packet_reformat_id)
726 u32 in[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_in)];
727 u32 out[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_out)];
729 memset(in, 0, sizeof(in));
730 MLX5_SET(dealloc_packet_reformat_context_in, in, opcode,
731 MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT);
732 MLX5_SET(dealloc_packet_reformat_context_in, in, packet_reformat_id,
735 mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
737 EXPORT_SYMBOL(mlx5_packet_reformat_dealloc);
739 int mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
740 u8 namespace, u8 num_actions,
741 void *modify_actions, u32 *modify_header_id)
743 u32 out[MLX5_ST_SZ_DW(alloc_modify_header_context_out)];
744 int max_actions, actions_size, inlen, err;
750 case MLX5_FLOW_NAMESPACE_FDB:
751 max_actions = MLX5_CAP_ESW_FLOWTABLE_FDB(dev, max_modify_header_actions);
752 table_type = FS_FT_FDB;
754 case MLX5_FLOW_NAMESPACE_KERNEL:
755 case MLX5_FLOW_NAMESPACE_BYPASS:
756 max_actions = MLX5_CAP_FLOWTABLE_NIC_RX(dev, max_modify_header_actions);
757 table_type = FS_FT_NIC_RX;
759 case MLX5_FLOW_NAMESPACE_EGRESS:
760 max_actions = MLX5_CAP_FLOWTABLE_NIC_TX(dev, max_modify_header_actions);
761 table_type = FS_FT_NIC_TX;
767 if (num_actions > max_actions) {
768 mlx5_core_warn(dev, "too many modify header actions %d, max supported %d\n",
769 num_actions, max_actions);
773 actions_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto) * num_actions;
774 inlen = MLX5_ST_SZ_BYTES(alloc_modify_header_context_in) + actions_size;
776 in = kzalloc(inlen, GFP_KERNEL);
780 MLX5_SET(alloc_modify_header_context_in, in, opcode,
781 MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT);
782 MLX5_SET(alloc_modify_header_context_in, in, table_type, table_type);
783 MLX5_SET(alloc_modify_header_context_in, in, num_of_actions, num_actions);
785 actions_in = MLX5_ADDR_OF(alloc_modify_header_context_in, in, actions);
786 memcpy(actions_in, modify_actions, actions_size);
788 memset(out, 0, sizeof(out));
789 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
791 *modify_header_id = MLX5_GET(alloc_modify_header_context_out, out, modify_header_id);
795 EXPORT_SYMBOL(mlx5_modify_header_alloc);
797 void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev, u32 modify_header_id)
799 u32 in[MLX5_ST_SZ_DW(dealloc_modify_header_context_in)];
800 u32 out[MLX5_ST_SZ_DW(dealloc_modify_header_context_out)];
802 memset(in, 0, sizeof(in));
803 MLX5_SET(dealloc_modify_header_context_in, in, opcode,
804 MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT);
805 MLX5_SET(dealloc_modify_header_context_in, in, modify_header_id,
808 mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
810 EXPORT_SYMBOL(mlx5_modify_header_dealloc);
812 static const struct mlx5_flow_cmds mlx5_flow_cmds = {
813 .create_flow_table = mlx5_cmd_create_flow_table,
814 .destroy_flow_table = mlx5_cmd_destroy_flow_table,
815 .modify_flow_table = mlx5_cmd_modify_flow_table,
816 .create_flow_group = mlx5_cmd_create_flow_group,
817 .destroy_flow_group = mlx5_cmd_destroy_flow_group,
818 .create_fte = mlx5_cmd_create_fte,
819 .update_fte = mlx5_cmd_update_fte,
820 .delete_fte = mlx5_cmd_delete_fte,
821 .update_root_ft = mlx5_cmd_update_root_ft,
824 static const struct mlx5_flow_cmds mlx5_flow_cmd_stubs = {
825 .create_flow_table = mlx5_cmd_stub_create_flow_table,
826 .destroy_flow_table = mlx5_cmd_stub_destroy_flow_table,
827 .modify_flow_table = mlx5_cmd_stub_modify_flow_table,
828 .create_flow_group = mlx5_cmd_stub_create_flow_group,
829 .destroy_flow_group = mlx5_cmd_stub_destroy_flow_group,
830 .create_fte = mlx5_cmd_stub_create_fte,
831 .update_fte = mlx5_cmd_stub_update_fte,
832 .delete_fte = mlx5_cmd_stub_delete_fte,
833 .update_root_ft = mlx5_cmd_stub_update_root_ft,
836 static const struct mlx5_flow_cmds *mlx5_fs_cmd_get_fw_cmds(void)
838 return &mlx5_flow_cmds;
841 static const struct mlx5_flow_cmds *mlx5_fs_cmd_get_stub_cmds(void)
843 return &mlx5_flow_cmd_stubs;
846 const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type type)
850 case FS_FT_ESW_EGRESS_ACL:
851 case FS_FT_ESW_INGRESS_ACL:
853 case FS_FT_SNIFFER_RX:
854 case FS_FT_SNIFFER_TX:
856 return mlx5_fs_cmd_get_fw_cmds();
858 return mlx5_fs_cmd_get_stub_cmds();