2 * Copyright (c) 2013-2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/highmem.h>
34 #include <linux/module.h>
35 #include <linux/errno.h>
36 #include <linux/pci.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/slab.h>
39 #include <linux/delay.h>
40 #include <linux/random.h>
41 #include <linux/io-mapping.h>
42 #include <linux/mlx5/driver.h>
43 #include <linux/mlx5/eq.h>
44 #include <linux/debugfs.h>
46 #include "mlx5_core.h"
59 MLX5_CMD_DELIVERY_STAT_OK = 0x0,
60 MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR = 0x1,
61 MLX5_CMD_DELIVERY_STAT_TOK_ERR = 0x2,
62 MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR = 0x3,
63 MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR = 0x4,
64 MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR = 0x5,
65 MLX5_CMD_DELIVERY_STAT_FW_ERR = 0x6,
66 MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR = 0x7,
67 MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR = 0x8,
68 MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR = 0x9,
69 MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR = 0x10,
72 static struct mlx5_cmd_work_ent *alloc_cmd(struct mlx5_cmd *cmd,
73 struct mlx5_cmd_msg *in,
74 struct mlx5_cmd_msg *out,
75 void *uout, int uout_size,
77 void *context, int page_queue)
79 gfp_t alloc_flags = cbk ? GFP_ATOMIC : GFP_KERNEL;
80 struct mlx5_cmd_work_ent *ent;
82 ent = kzalloc(sizeof(*ent), alloc_flags);
84 return ERR_PTR(-ENOMEM);
89 ent->uout_size = uout_size;
91 ent->context = context;
93 ent->page_queue = page_queue;
98 static u8 alloc_token(struct mlx5_cmd *cmd)
102 spin_lock(&cmd->token_lock);
107 spin_unlock(&cmd->token_lock);
112 static int alloc_ent(struct mlx5_cmd *cmd)
117 spin_lock_irqsave(&cmd->alloc_lock, flags);
118 ret = find_first_bit(&cmd->bitmask, cmd->max_reg_cmds);
119 if (ret < cmd->max_reg_cmds)
120 clear_bit(ret, &cmd->bitmask);
121 spin_unlock_irqrestore(&cmd->alloc_lock, flags);
123 return ret < cmd->max_reg_cmds ? ret : -ENOMEM;
126 static void free_ent(struct mlx5_cmd *cmd, int idx)
130 spin_lock_irqsave(&cmd->alloc_lock, flags);
131 set_bit(idx, &cmd->bitmask);
132 spin_unlock_irqrestore(&cmd->alloc_lock, flags);
135 static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx)
137 return cmd->cmd_buf + (idx << cmd->log_stride);
140 static int mlx5_calc_cmd_blocks(struct mlx5_cmd_msg *msg)
143 int blen = size - min_t(int, sizeof(msg->first.data), size);
145 return DIV_ROUND_UP(blen, MLX5_CMD_DATA_BLOCK_SIZE);
148 static u8 xor8_buf(void *buf, size_t offset, int len)
153 int end = len + offset;
155 for (i = offset; i < end; i++)
161 static int verify_block_sig(struct mlx5_cmd_prot_block *block)
163 size_t rsvd0_off = offsetof(struct mlx5_cmd_prot_block, rsvd0);
164 int xor_len = sizeof(*block) - sizeof(block->data) - 1;
166 if (xor8_buf(block, rsvd0_off, xor_len) != 0xff)
169 if (xor8_buf(block, 0, sizeof(*block)) != 0xff)
175 static void calc_block_sig(struct mlx5_cmd_prot_block *block)
177 int ctrl_xor_len = sizeof(*block) - sizeof(block->data) - 2;
178 size_t rsvd0_off = offsetof(struct mlx5_cmd_prot_block, rsvd0);
180 block->ctrl_sig = ~xor8_buf(block, rsvd0_off, ctrl_xor_len);
181 block->sig = ~xor8_buf(block, 0, sizeof(*block) - 1);
184 static void calc_chain_sig(struct mlx5_cmd_msg *msg)
186 struct mlx5_cmd_mailbox *next = msg->next;
187 int n = mlx5_calc_cmd_blocks(msg);
190 for (i = 0; i < n && next; i++) {
191 calc_block_sig(next->buf);
196 static void set_signature(struct mlx5_cmd_work_ent *ent, int csum)
198 ent->lay->sig = ~xor8_buf(ent->lay, 0, sizeof(*ent->lay));
200 calc_chain_sig(ent->in);
201 calc_chain_sig(ent->out);
205 static void poll_timeout(struct mlx5_cmd_work_ent *ent)
207 unsigned long poll_end = jiffies + msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC + 1000);
211 own = READ_ONCE(ent->lay->status_own);
212 if (!(own & CMD_OWNER_HW)) {
217 } while (time_before(jiffies, poll_end));
219 ent->ret = -ETIMEDOUT;
222 static void free_cmd(struct mlx5_cmd_work_ent *ent)
227 static int verify_signature(struct mlx5_cmd_work_ent *ent)
229 struct mlx5_cmd_mailbox *next = ent->out->next;
230 int n = mlx5_calc_cmd_blocks(ent->out);
235 sig = xor8_buf(ent->lay, 0, sizeof(*ent->lay));
239 for (i = 0; i < n && next; i++) {
240 err = verify_block_sig(next->buf);
250 static void dump_buf(void *buf, int size, int data_only, int offset)
255 for (i = 0; i < size; i += 16) {
256 pr_debug("%03x: %08x %08x %08x %08x\n", offset, be32_to_cpu(p[0]),
257 be32_to_cpu(p[1]), be32_to_cpu(p[2]),
266 static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
267 u32 *synd, u8 *status)
273 case MLX5_CMD_OP_TEARDOWN_HCA:
274 case MLX5_CMD_OP_DISABLE_HCA:
275 case MLX5_CMD_OP_MANAGE_PAGES:
276 case MLX5_CMD_OP_DESTROY_MKEY:
277 case MLX5_CMD_OP_DESTROY_EQ:
278 case MLX5_CMD_OP_DESTROY_CQ:
279 case MLX5_CMD_OP_DESTROY_QP:
280 case MLX5_CMD_OP_DESTROY_PSV:
281 case MLX5_CMD_OP_DESTROY_SRQ:
282 case MLX5_CMD_OP_DESTROY_XRC_SRQ:
283 case MLX5_CMD_OP_DESTROY_XRQ:
284 case MLX5_CMD_OP_DESTROY_DCT:
285 case MLX5_CMD_OP_DEALLOC_Q_COUNTER:
286 case MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT:
287 case MLX5_CMD_OP_DESTROY_QOS_PARA_VPORT:
288 case MLX5_CMD_OP_DEALLOC_PD:
289 case MLX5_CMD_OP_DEALLOC_UAR:
290 case MLX5_CMD_OP_DETACH_FROM_MCG:
291 case MLX5_CMD_OP_DEALLOC_XRCD:
292 case MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN:
293 case MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT:
294 case MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY:
295 case MLX5_CMD_OP_DESTROY_LAG:
296 case MLX5_CMD_OP_DESTROY_VPORT_LAG:
297 case MLX5_CMD_OP_DESTROY_TIR:
298 case MLX5_CMD_OP_DESTROY_SQ:
299 case MLX5_CMD_OP_DESTROY_RQ:
300 case MLX5_CMD_OP_DESTROY_RMP:
301 case MLX5_CMD_OP_DESTROY_TIS:
302 case MLX5_CMD_OP_DESTROY_RQT:
303 case MLX5_CMD_OP_DESTROY_FLOW_TABLE:
304 case MLX5_CMD_OP_DESTROY_FLOW_GROUP:
305 case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY:
306 case MLX5_CMD_OP_DEALLOC_FLOW_COUNTER:
307 case MLX5_CMD_OP_2ERR_QP:
308 case MLX5_CMD_OP_2RST_QP:
309 case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT:
310 case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
311 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
312 case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT:
313 case MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT:
314 case MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT:
315 case MLX5_CMD_OP_FPGA_DESTROY_QP:
316 case MLX5_CMD_OP_DESTROY_GENERAL_OBJECT:
317 case MLX5_CMD_OP_DEALLOC_MEMIC:
318 case MLX5_CMD_OP_PAGE_FAULT_RESUME:
319 case MLX5_CMD_OP_QUERY_ESW_FUNCTIONS:
320 return MLX5_CMD_STAT_OK;
322 case MLX5_CMD_OP_QUERY_HCA_CAP:
323 case MLX5_CMD_OP_QUERY_ADAPTER:
324 case MLX5_CMD_OP_INIT_HCA:
325 case MLX5_CMD_OP_ENABLE_HCA:
326 case MLX5_CMD_OP_QUERY_PAGES:
327 case MLX5_CMD_OP_SET_HCA_CAP:
328 case MLX5_CMD_OP_QUERY_ISSI:
329 case MLX5_CMD_OP_SET_ISSI:
330 case MLX5_CMD_OP_CREATE_MKEY:
331 case MLX5_CMD_OP_QUERY_MKEY:
332 case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS:
333 case MLX5_CMD_OP_CREATE_EQ:
334 case MLX5_CMD_OP_QUERY_EQ:
335 case MLX5_CMD_OP_GEN_EQE:
336 case MLX5_CMD_OP_CREATE_CQ:
337 case MLX5_CMD_OP_QUERY_CQ:
338 case MLX5_CMD_OP_MODIFY_CQ:
339 case MLX5_CMD_OP_CREATE_QP:
340 case MLX5_CMD_OP_RST2INIT_QP:
341 case MLX5_CMD_OP_INIT2RTR_QP:
342 case MLX5_CMD_OP_RTR2RTS_QP:
343 case MLX5_CMD_OP_RTS2RTS_QP:
344 case MLX5_CMD_OP_SQERR2RTS_QP:
345 case MLX5_CMD_OP_QUERY_QP:
346 case MLX5_CMD_OP_SQD_RTS_QP:
347 case MLX5_CMD_OP_INIT2INIT_QP:
348 case MLX5_CMD_OP_CREATE_PSV:
349 case MLX5_CMD_OP_CREATE_SRQ:
350 case MLX5_CMD_OP_QUERY_SRQ:
351 case MLX5_CMD_OP_ARM_RQ:
352 case MLX5_CMD_OP_CREATE_XRC_SRQ:
353 case MLX5_CMD_OP_QUERY_XRC_SRQ:
354 case MLX5_CMD_OP_ARM_XRC_SRQ:
355 case MLX5_CMD_OP_CREATE_XRQ:
356 case MLX5_CMD_OP_QUERY_XRQ:
357 case MLX5_CMD_OP_ARM_XRQ:
358 case MLX5_CMD_OP_CREATE_DCT:
359 case MLX5_CMD_OP_DRAIN_DCT:
360 case MLX5_CMD_OP_QUERY_DCT:
361 case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
362 case MLX5_CMD_OP_QUERY_VPORT_STATE:
363 case MLX5_CMD_OP_MODIFY_VPORT_STATE:
364 case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
365 case MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT:
366 case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
367 case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
368 case MLX5_CMD_OP_SET_ROCE_ADDRESS:
369 case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
370 case MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT:
371 case MLX5_CMD_OP_QUERY_HCA_VPORT_GID:
372 case MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY:
373 case MLX5_CMD_OP_QUERY_VNIC_ENV:
374 case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
375 case MLX5_CMD_OP_ALLOC_Q_COUNTER:
376 case MLX5_CMD_OP_QUERY_Q_COUNTER:
377 case MLX5_CMD_OP_SET_MONITOR_COUNTER:
378 case MLX5_CMD_OP_ARM_MONITOR_COUNTER:
379 case MLX5_CMD_OP_SET_PP_RATE_LIMIT:
380 case MLX5_CMD_OP_QUERY_RATE_LIMIT:
381 case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
382 case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
383 case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
384 case MLX5_CMD_OP_CREATE_QOS_PARA_VPORT:
385 case MLX5_CMD_OP_ALLOC_PD:
386 case MLX5_CMD_OP_ALLOC_UAR:
387 case MLX5_CMD_OP_CONFIG_INT_MODERATION:
388 case MLX5_CMD_OP_ACCESS_REG:
389 case MLX5_CMD_OP_ATTACH_TO_MCG:
390 case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG:
391 case MLX5_CMD_OP_MAD_IFC:
392 case MLX5_CMD_OP_QUERY_MAD_DEMUX:
393 case MLX5_CMD_OP_SET_MAD_DEMUX:
394 case MLX5_CMD_OP_NOP:
395 case MLX5_CMD_OP_ALLOC_XRCD:
396 case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
397 case MLX5_CMD_OP_QUERY_CONG_STATUS:
398 case MLX5_CMD_OP_MODIFY_CONG_STATUS:
399 case MLX5_CMD_OP_QUERY_CONG_PARAMS:
400 case MLX5_CMD_OP_MODIFY_CONG_PARAMS:
401 case MLX5_CMD_OP_QUERY_CONG_STATISTICS:
402 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
403 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
404 case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
405 case MLX5_CMD_OP_CREATE_LAG:
406 case MLX5_CMD_OP_MODIFY_LAG:
407 case MLX5_CMD_OP_QUERY_LAG:
408 case MLX5_CMD_OP_CREATE_VPORT_LAG:
409 case MLX5_CMD_OP_CREATE_TIR:
410 case MLX5_CMD_OP_MODIFY_TIR:
411 case MLX5_CMD_OP_QUERY_TIR:
412 case MLX5_CMD_OP_CREATE_SQ:
413 case MLX5_CMD_OP_MODIFY_SQ:
414 case MLX5_CMD_OP_QUERY_SQ:
415 case MLX5_CMD_OP_CREATE_RQ:
416 case MLX5_CMD_OP_MODIFY_RQ:
417 case MLX5_CMD_OP_QUERY_RQ:
418 case MLX5_CMD_OP_CREATE_RMP:
419 case MLX5_CMD_OP_MODIFY_RMP:
420 case MLX5_CMD_OP_QUERY_RMP:
421 case MLX5_CMD_OP_CREATE_TIS:
422 case MLX5_CMD_OP_MODIFY_TIS:
423 case MLX5_CMD_OP_QUERY_TIS:
424 case MLX5_CMD_OP_CREATE_RQT:
425 case MLX5_CMD_OP_MODIFY_RQT:
426 case MLX5_CMD_OP_QUERY_RQT:
428 case MLX5_CMD_OP_CREATE_FLOW_TABLE:
429 case MLX5_CMD_OP_QUERY_FLOW_TABLE:
430 case MLX5_CMD_OP_CREATE_FLOW_GROUP:
431 case MLX5_CMD_OP_QUERY_FLOW_GROUP:
432 case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
433 case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
434 case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
435 case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT:
436 case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
437 case MLX5_CMD_OP_FPGA_CREATE_QP:
438 case MLX5_CMD_OP_FPGA_MODIFY_QP:
439 case MLX5_CMD_OP_FPGA_QUERY_QP:
440 case MLX5_CMD_OP_FPGA_QUERY_QP_COUNTERS:
441 case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
442 case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
443 case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
444 case MLX5_CMD_OP_CREATE_UCTX:
445 case MLX5_CMD_OP_DESTROY_UCTX:
446 case MLX5_CMD_OP_CREATE_UMEM:
447 case MLX5_CMD_OP_DESTROY_UMEM:
448 case MLX5_CMD_OP_ALLOC_MEMIC:
449 case MLX5_CMD_OP_MODIFY_XRQ:
450 case MLX5_CMD_OP_RELEASE_XRQ_ERROR:
451 *status = MLX5_DRIVER_STATUS_ABORTED;
452 *synd = MLX5_DRIVER_SYND;
455 mlx5_core_err(dev, "Unknown FW command (%d)\n", op);
460 const char *mlx5_command_str(int command)
462 #define MLX5_COMMAND_STR_CASE(__cmd) case MLX5_CMD_OP_ ## __cmd: return #__cmd
465 MLX5_COMMAND_STR_CASE(QUERY_HCA_CAP);
466 MLX5_COMMAND_STR_CASE(QUERY_ADAPTER);
467 MLX5_COMMAND_STR_CASE(INIT_HCA);
468 MLX5_COMMAND_STR_CASE(TEARDOWN_HCA);
469 MLX5_COMMAND_STR_CASE(ENABLE_HCA);
470 MLX5_COMMAND_STR_CASE(DISABLE_HCA);
471 MLX5_COMMAND_STR_CASE(QUERY_PAGES);
472 MLX5_COMMAND_STR_CASE(MANAGE_PAGES);
473 MLX5_COMMAND_STR_CASE(SET_HCA_CAP);
474 MLX5_COMMAND_STR_CASE(QUERY_ISSI);
475 MLX5_COMMAND_STR_CASE(SET_ISSI);
476 MLX5_COMMAND_STR_CASE(SET_DRIVER_VERSION);
477 MLX5_COMMAND_STR_CASE(CREATE_MKEY);
478 MLX5_COMMAND_STR_CASE(QUERY_MKEY);
479 MLX5_COMMAND_STR_CASE(DESTROY_MKEY);
480 MLX5_COMMAND_STR_CASE(QUERY_SPECIAL_CONTEXTS);
481 MLX5_COMMAND_STR_CASE(PAGE_FAULT_RESUME);
482 MLX5_COMMAND_STR_CASE(CREATE_EQ);
483 MLX5_COMMAND_STR_CASE(DESTROY_EQ);
484 MLX5_COMMAND_STR_CASE(QUERY_EQ);
485 MLX5_COMMAND_STR_CASE(GEN_EQE);
486 MLX5_COMMAND_STR_CASE(CREATE_CQ);
487 MLX5_COMMAND_STR_CASE(DESTROY_CQ);
488 MLX5_COMMAND_STR_CASE(QUERY_CQ);
489 MLX5_COMMAND_STR_CASE(MODIFY_CQ);
490 MLX5_COMMAND_STR_CASE(CREATE_QP);
491 MLX5_COMMAND_STR_CASE(DESTROY_QP);
492 MLX5_COMMAND_STR_CASE(RST2INIT_QP);
493 MLX5_COMMAND_STR_CASE(INIT2RTR_QP);
494 MLX5_COMMAND_STR_CASE(RTR2RTS_QP);
495 MLX5_COMMAND_STR_CASE(RTS2RTS_QP);
496 MLX5_COMMAND_STR_CASE(SQERR2RTS_QP);
497 MLX5_COMMAND_STR_CASE(2ERR_QP);
498 MLX5_COMMAND_STR_CASE(2RST_QP);
499 MLX5_COMMAND_STR_CASE(QUERY_QP);
500 MLX5_COMMAND_STR_CASE(SQD_RTS_QP);
501 MLX5_COMMAND_STR_CASE(INIT2INIT_QP);
502 MLX5_COMMAND_STR_CASE(CREATE_PSV);
503 MLX5_COMMAND_STR_CASE(DESTROY_PSV);
504 MLX5_COMMAND_STR_CASE(CREATE_SRQ);
505 MLX5_COMMAND_STR_CASE(DESTROY_SRQ);
506 MLX5_COMMAND_STR_CASE(QUERY_SRQ);
507 MLX5_COMMAND_STR_CASE(ARM_RQ);
508 MLX5_COMMAND_STR_CASE(CREATE_XRC_SRQ);
509 MLX5_COMMAND_STR_CASE(DESTROY_XRC_SRQ);
510 MLX5_COMMAND_STR_CASE(QUERY_XRC_SRQ);
511 MLX5_COMMAND_STR_CASE(ARM_XRC_SRQ);
512 MLX5_COMMAND_STR_CASE(CREATE_DCT);
513 MLX5_COMMAND_STR_CASE(DESTROY_DCT);
514 MLX5_COMMAND_STR_CASE(DRAIN_DCT);
515 MLX5_COMMAND_STR_CASE(QUERY_DCT);
516 MLX5_COMMAND_STR_CASE(ARM_DCT_FOR_KEY_VIOLATION);
517 MLX5_COMMAND_STR_CASE(QUERY_VPORT_STATE);
518 MLX5_COMMAND_STR_CASE(MODIFY_VPORT_STATE);
519 MLX5_COMMAND_STR_CASE(QUERY_ESW_VPORT_CONTEXT);
520 MLX5_COMMAND_STR_CASE(MODIFY_ESW_VPORT_CONTEXT);
521 MLX5_COMMAND_STR_CASE(QUERY_NIC_VPORT_CONTEXT);
522 MLX5_COMMAND_STR_CASE(MODIFY_NIC_VPORT_CONTEXT);
523 MLX5_COMMAND_STR_CASE(QUERY_ROCE_ADDRESS);
524 MLX5_COMMAND_STR_CASE(SET_ROCE_ADDRESS);
525 MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_CONTEXT);
526 MLX5_COMMAND_STR_CASE(MODIFY_HCA_VPORT_CONTEXT);
527 MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_GID);
528 MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_PKEY);
529 MLX5_COMMAND_STR_CASE(QUERY_VNIC_ENV);
530 MLX5_COMMAND_STR_CASE(QUERY_VPORT_COUNTER);
531 MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER);
532 MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER);
533 MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER);
534 MLX5_COMMAND_STR_CASE(SET_MONITOR_COUNTER);
535 MLX5_COMMAND_STR_CASE(ARM_MONITOR_COUNTER);
536 MLX5_COMMAND_STR_CASE(SET_PP_RATE_LIMIT);
537 MLX5_COMMAND_STR_CASE(QUERY_RATE_LIMIT);
538 MLX5_COMMAND_STR_CASE(CREATE_SCHEDULING_ELEMENT);
539 MLX5_COMMAND_STR_CASE(DESTROY_SCHEDULING_ELEMENT);
540 MLX5_COMMAND_STR_CASE(QUERY_SCHEDULING_ELEMENT);
541 MLX5_COMMAND_STR_CASE(MODIFY_SCHEDULING_ELEMENT);
542 MLX5_COMMAND_STR_CASE(CREATE_QOS_PARA_VPORT);
543 MLX5_COMMAND_STR_CASE(DESTROY_QOS_PARA_VPORT);
544 MLX5_COMMAND_STR_CASE(ALLOC_PD);
545 MLX5_COMMAND_STR_CASE(DEALLOC_PD);
546 MLX5_COMMAND_STR_CASE(ALLOC_UAR);
547 MLX5_COMMAND_STR_CASE(DEALLOC_UAR);
548 MLX5_COMMAND_STR_CASE(CONFIG_INT_MODERATION);
549 MLX5_COMMAND_STR_CASE(ACCESS_REG);
550 MLX5_COMMAND_STR_CASE(ATTACH_TO_MCG);
551 MLX5_COMMAND_STR_CASE(DETACH_FROM_MCG);
552 MLX5_COMMAND_STR_CASE(GET_DROPPED_PACKET_LOG);
553 MLX5_COMMAND_STR_CASE(MAD_IFC);
554 MLX5_COMMAND_STR_CASE(QUERY_MAD_DEMUX);
555 MLX5_COMMAND_STR_CASE(SET_MAD_DEMUX);
556 MLX5_COMMAND_STR_CASE(NOP);
557 MLX5_COMMAND_STR_CASE(ALLOC_XRCD);
558 MLX5_COMMAND_STR_CASE(DEALLOC_XRCD);
559 MLX5_COMMAND_STR_CASE(ALLOC_TRANSPORT_DOMAIN);
560 MLX5_COMMAND_STR_CASE(DEALLOC_TRANSPORT_DOMAIN);
561 MLX5_COMMAND_STR_CASE(QUERY_CONG_STATUS);
562 MLX5_COMMAND_STR_CASE(MODIFY_CONG_STATUS);
563 MLX5_COMMAND_STR_CASE(QUERY_CONG_PARAMS);
564 MLX5_COMMAND_STR_CASE(MODIFY_CONG_PARAMS);
565 MLX5_COMMAND_STR_CASE(QUERY_CONG_STATISTICS);
566 MLX5_COMMAND_STR_CASE(ADD_VXLAN_UDP_DPORT);
567 MLX5_COMMAND_STR_CASE(DELETE_VXLAN_UDP_DPORT);
568 MLX5_COMMAND_STR_CASE(SET_L2_TABLE_ENTRY);
569 MLX5_COMMAND_STR_CASE(QUERY_L2_TABLE_ENTRY);
570 MLX5_COMMAND_STR_CASE(DELETE_L2_TABLE_ENTRY);
571 MLX5_COMMAND_STR_CASE(SET_WOL_ROL);
572 MLX5_COMMAND_STR_CASE(QUERY_WOL_ROL);
573 MLX5_COMMAND_STR_CASE(CREATE_LAG);
574 MLX5_COMMAND_STR_CASE(MODIFY_LAG);
575 MLX5_COMMAND_STR_CASE(QUERY_LAG);
576 MLX5_COMMAND_STR_CASE(DESTROY_LAG);
577 MLX5_COMMAND_STR_CASE(CREATE_VPORT_LAG);
578 MLX5_COMMAND_STR_CASE(DESTROY_VPORT_LAG);
579 MLX5_COMMAND_STR_CASE(CREATE_TIR);
580 MLX5_COMMAND_STR_CASE(MODIFY_TIR);
581 MLX5_COMMAND_STR_CASE(DESTROY_TIR);
582 MLX5_COMMAND_STR_CASE(QUERY_TIR);
583 MLX5_COMMAND_STR_CASE(CREATE_SQ);
584 MLX5_COMMAND_STR_CASE(MODIFY_SQ);
585 MLX5_COMMAND_STR_CASE(DESTROY_SQ);
586 MLX5_COMMAND_STR_CASE(QUERY_SQ);
587 MLX5_COMMAND_STR_CASE(CREATE_RQ);
588 MLX5_COMMAND_STR_CASE(MODIFY_RQ);
589 MLX5_COMMAND_STR_CASE(DESTROY_RQ);
590 MLX5_COMMAND_STR_CASE(QUERY_RQ);
591 MLX5_COMMAND_STR_CASE(CREATE_RMP);
592 MLX5_COMMAND_STR_CASE(MODIFY_RMP);
593 MLX5_COMMAND_STR_CASE(DESTROY_RMP);
594 MLX5_COMMAND_STR_CASE(QUERY_RMP);
595 MLX5_COMMAND_STR_CASE(CREATE_TIS);
596 MLX5_COMMAND_STR_CASE(MODIFY_TIS);
597 MLX5_COMMAND_STR_CASE(DESTROY_TIS);
598 MLX5_COMMAND_STR_CASE(QUERY_TIS);
599 MLX5_COMMAND_STR_CASE(CREATE_RQT);
600 MLX5_COMMAND_STR_CASE(MODIFY_RQT);
601 MLX5_COMMAND_STR_CASE(DESTROY_RQT);
602 MLX5_COMMAND_STR_CASE(QUERY_RQT);
603 MLX5_COMMAND_STR_CASE(SET_FLOW_TABLE_ROOT);
604 MLX5_COMMAND_STR_CASE(CREATE_FLOW_TABLE);
605 MLX5_COMMAND_STR_CASE(DESTROY_FLOW_TABLE);
606 MLX5_COMMAND_STR_CASE(QUERY_FLOW_TABLE);
607 MLX5_COMMAND_STR_CASE(CREATE_FLOW_GROUP);
608 MLX5_COMMAND_STR_CASE(DESTROY_FLOW_GROUP);
609 MLX5_COMMAND_STR_CASE(QUERY_FLOW_GROUP);
610 MLX5_COMMAND_STR_CASE(SET_FLOW_TABLE_ENTRY);
611 MLX5_COMMAND_STR_CASE(QUERY_FLOW_TABLE_ENTRY);
612 MLX5_COMMAND_STR_CASE(DELETE_FLOW_TABLE_ENTRY);
613 MLX5_COMMAND_STR_CASE(ALLOC_FLOW_COUNTER);
614 MLX5_COMMAND_STR_CASE(DEALLOC_FLOW_COUNTER);
615 MLX5_COMMAND_STR_CASE(QUERY_FLOW_COUNTER);
616 MLX5_COMMAND_STR_CASE(MODIFY_FLOW_TABLE);
617 MLX5_COMMAND_STR_CASE(ALLOC_PACKET_REFORMAT_CONTEXT);
618 MLX5_COMMAND_STR_CASE(DEALLOC_PACKET_REFORMAT_CONTEXT);
619 MLX5_COMMAND_STR_CASE(ALLOC_MODIFY_HEADER_CONTEXT);
620 MLX5_COMMAND_STR_CASE(DEALLOC_MODIFY_HEADER_CONTEXT);
621 MLX5_COMMAND_STR_CASE(FPGA_CREATE_QP);
622 MLX5_COMMAND_STR_CASE(FPGA_MODIFY_QP);
623 MLX5_COMMAND_STR_CASE(FPGA_QUERY_QP);
624 MLX5_COMMAND_STR_CASE(FPGA_QUERY_QP_COUNTERS);
625 MLX5_COMMAND_STR_CASE(FPGA_DESTROY_QP);
626 MLX5_COMMAND_STR_CASE(CREATE_XRQ);
627 MLX5_COMMAND_STR_CASE(DESTROY_XRQ);
628 MLX5_COMMAND_STR_CASE(QUERY_XRQ);
629 MLX5_COMMAND_STR_CASE(ARM_XRQ);
630 MLX5_COMMAND_STR_CASE(CREATE_GENERAL_OBJECT);
631 MLX5_COMMAND_STR_CASE(DESTROY_GENERAL_OBJECT);
632 MLX5_COMMAND_STR_CASE(MODIFY_GENERAL_OBJECT);
633 MLX5_COMMAND_STR_CASE(QUERY_GENERAL_OBJECT);
634 MLX5_COMMAND_STR_CASE(QUERY_MODIFY_HEADER_CONTEXT);
635 MLX5_COMMAND_STR_CASE(ALLOC_MEMIC);
636 MLX5_COMMAND_STR_CASE(DEALLOC_MEMIC);
637 MLX5_COMMAND_STR_CASE(QUERY_ESW_FUNCTIONS);
638 MLX5_COMMAND_STR_CASE(CREATE_UCTX);
639 MLX5_COMMAND_STR_CASE(DESTROY_UCTX);
640 MLX5_COMMAND_STR_CASE(CREATE_UMEM);
641 MLX5_COMMAND_STR_CASE(DESTROY_UMEM);
642 MLX5_COMMAND_STR_CASE(RELEASE_XRQ_ERROR);
643 MLX5_COMMAND_STR_CASE(MODIFY_XRQ);
644 default: return "unknown command opcode";
648 static const char *cmd_status_str(u8 status)
651 case MLX5_CMD_STAT_OK:
653 case MLX5_CMD_STAT_INT_ERR:
654 return "internal error";
655 case MLX5_CMD_STAT_BAD_OP_ERR:
656 return "bad operation";
657 case MLX5_CMD_STAT_BAD_PARAM_ERR:
658 return "bad parameter";
659 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR:
660 return "bad system state";
661 case MLX5_CMD_STAT_BAD_RES_ERR:
662 return "bad resource";
663 case MLX5_CMD_STAT_RES_BUSY:
664 return "resource busy";
665 case MLX5_CMD_STAT_LIM_ERR:
666 return "limits exceeded";
667 case MLX5_CMD_STAT_BAD_RES_STATE_ERR:
668 return "bad resource state";
669 case MLX5_CMD_STAT_IX_ERR:
671 case MLX5_CMD_STAT_NO_RES_ERR:
672 return "no resources";
673 case MLX5_CMD_STAT_BAD_INP_LEN_ERR:
674 return "bad input length";
675 case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR:
676 return "bad output length";
677 case MLX5_CMD_STAT_BAD_QP_STATE_ERR:
678 return "bad QP state";
679 case MLX5_CMD_STAT_BAD_PKT_ERR:
680 return "bad packet (discarded)";
681 case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR:
682 return "bad size too many outstanding CQEs";
684 return "unknown status";
688 static int cmd_status_to_err(u8 status)
691 case MLX5_CMD_STAT_OK: return 0;
692 case MLX5_CMD_STAT_INT_ERR: return -EIO;
693 case MLX5_CMD_STAT_BAD_OP_ERR: return -EINVAL;
694 case MLX5_CMD_STAT_BAD_PARAM_ERR: return -EINVAL;
695 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: return -EIO;
696 case MLX5_CMD_STAT_BAD_RES_ERR: return -EINVAL;
697 case MLX5_CMD_STAT_RES_BUSY: return -EBUSY;
698 case MLX5_CMD_STAT_LIM_ERR: return -ENOMEM;
699 case MLX5_CMD_STAT_BAD_RES_STATE_ERR: return -EINVAL;
700 case MLX5_CMD_STAT_IX_ERR: return -EINVAL;
701 case MLX5_CMD_STAT_NO_RES_ERR: return -EAGAIN;
702 case MLX5_CMD_STAT_BAD_INP_LEN_ERR: return -EIO;
703 case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: return -EIO;
704 case MLX5_CMD_STAT_BAD_QP_STATE_ERR: return -EINVAL;
705 case MLX5_CMD_STAT_BAD_PKT_ERR: return -EINVAL;
706 case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: return -EINVAL;
707 default: return -EIO;
711 struct mlx5_ifc_mbox_out_bits {
713 u8 reserved_at_8[0x18];
717 u8 reserved_at_40[0x40];
720 struct mlx5_ifc_mbox_in_bits {
724 u8 reserved_at_20[0x10];
727 u8 reserved_at_40[0x40];
730 void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome)
732 *status = MLX5_GET(mbox_out, out, status);
733 *syndrome = MLX5_GET(mbox_out, out, syndrome);
736 static int mlx5_cmd_check(struct mlx5_core_dev *dev, void *in, void *out)
744 mlx5_cmd_mbox_status(out, &status, &syndrome);
748 opcode = MLX5_GET(mbox_in, in, opcode);
749 op_mod = MLX5_GET(mbox_in, in, op_mod);
750 uid = MLX5_GET(mbox_in, in, uid);
752 if (!uid && opcode != MLX5_CMD_OP_DESTROY_MKEY)
753 mlx5_core_err_rl(dev,
754 "%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x)\n",
755 mlx5_command_str(opcode), opcode, op_mod,
756 cmd_status_str(status), status, syndrome);
759 "%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x)\n",
760 mlx5_command_str(opcode),
762 cmd_status_str(status),
766 return cmd_status_to_err(status);
769 static void dump_command(struct mlx5_core_dev *dev,
770 struct mlx5_cmd_work_ent *ent, int input)
772 struct mlx5_cmd_msg *msg = input ? ent->in : ent->out;
773 u16 op = MLX5_GET(mbox_in, ent->lay->in, opcode);
774 struct mlx5_cmd_mailbox *next = msg->next;
775 int n = mlx5_calc_cmd_blocks(msg);
781 data_only = !!(mlx5_core_debug_mask & (1 << MLX5_CMD_DATA));
784 mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_DATA,
785 "dump command data %s(0x%x) %s\n",
786 mlx5_command_str(op), op,
787 input ? "INPUT" : "OUTPUT");
789 mlx5_core_dbg(dev, "dump command %s(0x%x) %s\n",
790 mlx5_command_str(op), op,
791 input ? "INPUT" : "OUTPUT");
795 dump_buf(ent->lay->in, sizeof(ent->lay->in), 1, offset);
796 offset += sizeof(ent->lay->in);
798 dump_buf(ent->lay->out, sizeof(ent->lay->out), 1, offset);
799 offset += sizeof(ent->lay->out);
802 dump_buf(ent->lay, sizeof(*ent->lay), 0, offset);
803 offset += sizeof(*ent->lay);
806 for (i = 0; i < n && next; i++) {
808 dump_len = min_t(int, MLX5_CMD_DATA_BLOCK_SIZE, msg->len - offset);
809 dump_buf(next->buf, dump_len, 1, offset);
810 offset += MLX5_CMD_DATA_BLOCK_SIZE;
812 mlx5_core_dbg(dev, "command block:\n");
813 dump_buf(next->buf, sizeof(struct mlx5_cmd_prot_block), 0, offset);
814 offset += sizeof(struct mlx5_cmd_prot_block);
823 static u16 msg_to_opcode(struct mlx5_cmd_msg *in)
825 return MLX5_GET(mbox_in, in->first.data, opcode);
828 static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced);
830 static void cb_timeout_handler(struct work_struct *work)
832 struct delayed_work *dwork = container_of(work, struct delayed_work,
834 struct mlx5_cmd_work_ent *ent = container_of(dwork,
835 struct mlx5_cmd_work_ent,
837 struct mlx5_core_dev *dev = container_of(ent->cmd, struct mlx5_core_dev,
840 ent->ret = -ETIMEDOUT;
841 mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
842 mlx5_command_str(msg_to_opcode(ent->in)),
843 msg_to_opcode(ent->in));
844 mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
847 static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg);
848 static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev,
849 struct mlx5_cmd_msg *msg);
851 static bool opcode_allowed(struct mlx5_cmd *cmd, u16 opcode)
853 if (cmd->allowed_opcode == CMD_ALLOWED_OPCODE_ALL)
856 return cmd->allowed_opcode == opcode;
859 static void cmd_work_handler(struct work_struct *work)
861 struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work);
862 struct mlx5_cmd *cmd = ent->cmd;
863 struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, cmd);
864 unsigned long cb_timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
865 struct mlx5_cmd_layout *lay;
866 struct semaphore *sem;
868 bool poll_cmd = ent->polling;
872 complete(&ent->handling);
873 sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
875 if (!ent->page_queue) {
876 alloc_ret = alloc_ent(cmd);
878 mlx5_core_err_rl(dev, "failed to allocate command entry\n");
880 ent->callback(-EAGAIN, ent->context);
881 mlx5_free_cmd_msg(dev, ent->out);
882 free_msg(dev, ent->in);
886 complete(&ent->done);
891 ent->idx = alloc_ret;
893 ent->idx = cmd->max_reg_cmds;
894 spin_lock_irqsave(&cmd->alloc_lock, flags);
895 clear_bit(ent->idx, &cmd->bitmask);
896 spin_unlock_irqrestore(&cmd->alloc_lock, flags);
899 cmd->ent_arr[ent->idx] = ent;
900 lay = get_inst(cmd, ent->idx);
902 memset(lay, 0, sizeof(*lay));
903 memcpy(lay->in, ent->in->first.data, sizeof(lay->in));
904 ent->op = be32_to_cpu(lay->in[0]) >> 16;
906 lay->in_ptr = cpu_to_be64(ent->in->next->dma);
907 lay->inlen = cpu_to_be32(ent->in->len);
909 lay->out_ptr = cpu_to_be64(ent->out->next->dma);
910 lay->outlen = cpu_to_be32(ent->out->len);
911 lay->type = MLX5_PCI_CMD_XPORT;
912 lay->token = ent->token;
913 lay->status_own = CMD_OWNER_HW;
914 set_signature(ent, !cmd->checksum_disabled);
915 dump_command(dev, ent, 1);
916 ent->ts1 = ktime_get_ns();
917 cmd_mode = cmd->mode;
920 schedule_delayed_work(&ent->cb_timeout_work, cb_timeout);
921 set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state);
923 /* Skip sending command to fw if internal error */
924 if (pci_channel_offline(dev->pdev) ||
925 dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR ||
926 cmd->state != MLX5_CMDIF_STATE_UP ||
927 !opcode_allowed(&dev->cmd, ent->op)) {
931 ent->ret = mlx5_internal_err_ret_value(dev, msg_to_opcode(ent->in), &drv_synd, &status);
932 MLX5_SET(mbox_out, ent->out, status, status);
933 MLX5_SET(mbox_out, ent->out, syndrome, drv_synd);
935 mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
936 /* no doorbell, no need to keep the entry */
937 free_ent(cmd, ent->idx);
943 /* ring doorbell after the descriptor is valid */
944 mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx);
946 iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell);
947 /* if not in polling don't use ent after this point */
948 if (cmd_mode == CMD_MODE_POLLING || poll_cmd) {
950 /* make sure we read the descriptor after ownership is SW */
952 mlx5_cmd_comp_handler(dev, 1UL << ent->idx, (ent->ret == -ETIMEDOUT));
956 static const char *deliv_status_to_str(u8 status)
959 case MLX5_CMD_DELIVERY_STAT_OK:
961 case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR:
962 return "signature error";
963 case MLX5_CMD_DELIVERY_STAT_TOK_ERR:
964 return "token error";
965 case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR:
966 return "bad block number";
967 case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR:
968 return "output pointer not aligned to block size";
969 case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR:
970 return "input pointer not aligned to block size";
971 case MLX5_CMD_DELIVERY_STAT_FW_ERR:
972 return "firmware internal error";
973 case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR:
974 return "command input length error";
975 case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR:
976 return "command output length error";
977 case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR:
978 return "reserved fields not cleared";
979 case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR:
980 return "bad command descriptor type";
982 return "unknown status code";
986 static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
988 unsigned long timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
989 struct mlx5_cmd *cmd = &dev->cmd;
992 if (!wait_for_completion_timeout(&ent->handling, timeout) &&
993 cancel_work_sync(&ent->work)) {
994 ent->ret = -ECANCELED;
997 if (cmd->mode == CMD_MODE_POLLING || ent->polling) {
998 wait_for_completion(&ent->done);
999 } else if (!wait_for_completion_timeout(&ent->done, timeout)) {
1000 ent->ret = -ETIMEDOUT;
1001 mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
1007 if (err == -ETIMEDOUT) {
1008 mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
1009 mlx5_command_str(msg_to_opcode(ent->in)),
1010 msg_to_opcode(ent->in));
1011 } else if (err == -ECANCELED) {
1012 mlx5_core_warn(dev, "%s(0x%x) canceled on out of queue timeout.\n",
1013 mlx5_command_str(msg_to_opcode(ent->in)),
1014 msg_to_opcode(ent->in));
1016 mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n",
1017 err, deliv_status_to_str(ent->status), ent->status);
1023 * 1. Callback functions may not sleep
1024 * 2. page queue commands do not support asynchrous completion
1026 static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
1027 struct mlx5_cmd_msg *out, void *uout, int uout_size,
1028 mlx5_cmd_cbk_t callback,
1029 void *context, int page_queue, u8 *status,
1030 u8 token, bool force_polling)
1032 struct mlx5_cmd *cmd = &dev->cmd;
1033 struct mlx5_cmd_work_ent *ent;
1034 struct mlx5_cmd_stats *stats;
1039 if (callback && page_queue)
1042 ent = alloc_cmd(cmd, in, out, uout, uout_size, callback, context,
1045 return PTR_ERR(ent);
1048 ent->polling = force_polling;
1050 init_completion(&ent->handling);
1052 init_completion(&ent->done);
1054 INIT_DELAYED_WORK(&ent->cb_timeout_work, cb_timeout_handler);
1055 INIT_WORK(&ent->work, cmd_work_handler);
1057 cmd_work_handler(&ent->work);
1058 } else if (!queue_work(cmd->wq, &ent->work)) {
1059 mlx5_core_warn(dev, "failed to queue work\n");
1067 err = wait_func(dev, ent);
1068 if (err == -ETIMEDOUT)
1070 if (err == -ECANCELED)
1073 ds = ent->ts2 - ent->ts1;
1074 op = MLX5_GET(mbox_in, in->first.data, opcode);
1075 if (op < MLX5_CMD_OP_MAX) {
1076 stats = &cmd->stats[op];
1077 spin_lock_irq(&stats->lock);
1080 spin_unlock_irq(&stats->lock);
1082 mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME,
1083 "fw exec time for %s is %lld nsec\n",
1084 mlx5_command_str(op), ds);
1085 *status = ent->status;
1093 static ssize_t dbg_write(struct file *filp, const char __user *buf,
1094 size_t count, loff_t *pos)
1096 struct mlx5_core_dev *dev = filp->private_data;
1097 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1101 if (!dbg->in_msg || !dbg->out_msg)
1104 if (count < sizeof(lbuf) - 1)
1107 if (copy_from_user(lbuf, buf, sizeof(lbuf) - 1))
1110 lbuf[sizeof(lbuf) - 1] = 0;
1112 if (strcmp(lbuf, "go"))
1115 err = mlx5_cmd_exec(dev, dbg->in_msg, dbg->inlen, dbg->out_msg, dbg->outlen);
1117 return err ? err : count;
1120 static const struct file_operations fops = {
1121 .owner = THIS_MODULE,
1122 .open = simple_open,
1126 static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size,
1129 struct mlx5_cmd_prot_block *block;
1130 struct mlx5_cmd_mailbox *next;
1136 copy = min_t(int, size, sizeof(to->first.data));
1137 memcpy(to->first.data, from, copy);
1148 copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE);
1150 memcpy(block->data, from, copy);
1153 block->token = token;
1160 static int mlx5_copy_from_msg(void *to, struct mlx5_cmd_msg *from, int size)
1162 struct mlx5_cmd_prot_block *block;
1163 struct mlx5_cmd_mailbox *next;
1169 copy = min_t(int, size, sizeof(from->first.data));
1170 memcpy(to, from->first.data, copy);
1181 copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE);
1184 memcpy(to, block->data, copy);
1193 static struct mlx5_cmd_mailbox *alloc_cmd_box(struct mlx5_core_dev *dev,
1196 struct mlx5_cmd_mailbox *mailbox;
1198 mailbox = kmalloc(sizeof(*mailbox), flags);
1200 return ERR_PTR(-ENOMEM);
1202 mailbox->buf = dma_pool_zalloc(dev->cmd.pool, flags,
1204 if (!mailbox->buf) {
1205 mlx5_core_dbg(dev, "failed allocation\n");
1207 return ERR_PTR(-ENOMEM);
1209 mailbox->next = NULL;
1214 static void free_cmd_box(struct mlx5_core_dev *dev,
1215 struct mlx5_cmd_mailbox *mailbox)
1217 dma_pool_free(dev->cmd.pool, mailbox->buf, mailbox->dma);
1221 static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev,
1222 gfp_t flags, int size,
1225 struct mlx5_cmd_mailbox *tmp, *head = NULL;
1226 struct mlx5_cmd_prot_block *block;
1227 struct mlx5_cmd_msg *msg;
1232 msg = kzalloc(sizeof(*msg), flags);
1234 return ERR_PTR(-ENOMEM);
1237 n = mlx5_calc_cmd_blocks(msg);
1239 for (i = 0; i < n; i++) {
1240 tmp = alloc_cmd_box(dev, flags);
1242 mlx5_core_warn(dev, "failed allocating block\n");
1249 block->next = cpu_to_be64(tmp->next ? tmp->next->dma : 0);
1250 block->block_num = cpu_to_be32(n - i - 1);
1251 block->token = token;
1260 free_cmd_box(dev, head);
1265 return ERR_PTR(err);
1268 static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev,
1269 struct mlx5_cmd_msg *msg)
1271 struct mlx5_cmd_mailbox *head = msg->next;
1272 struct mlx5_cmd_mailbox *next;
1276 free_cmd_box(dev, head);
1282 static ssize_t data_write(struct file *filp, const char __user *buf,
1283 size_t count, loff_t *pos)
1285 struct mlx5_core_dev *dev = filp->private_data;
1286 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1295 ptr = memdup_user(buf, count);
1297 return PTR_ERR(ptr);
1306 static ssize_t data_read(struct file *filp, char __user *buf, size_t count,
1309 struct mlx5_core_dev *dev = filp->private_data;
1310 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1315 return simple_read_from_buffer(buf, count, pos, dbg->out_msg,
1319 static const struct file_operations dfops = {
1320 .owner = THIS_MODULE,
1321 .open = simple_open,
1322 .write = data_write,
1326 static ssize_t outlen_read(struct file *filp, char __user *buf, size_t count,
1329 struct mlx5_core_dev *dev = filp->private_data;
1330 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1334 err = snprintf(outlen, sizeof(outlen), "%d", dbg->outlen);
1338 return simple_read_from_buffer(buf, count, pos, outlen, err);
1341 static ssize_t outlen_write(struct file *filp, const char __user *buf,
1342 size_t count, loff_t *pos)
1344 struct mlx5_core_dev *dev = filp->private_data;
1345 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1346 char outlen_str[8] = {0};
1351 if (*pos != 0 || count > 6)
1354 kfree(dbg->out_msg);
1355 dbg->out_msg = NULL;
1358 if (copy_from_user(outlen_str, buf, count))
1361 err = sscanf(outlen_str, "%d", &outlen);
1365 ptr = kzalloc(outlen, GFP_KERNEL);
1370 dbg->outlen = outlen;
1377 static const struct file_operations olfops = {
1378 .owner = THIS_MODULE,
1379 .open = simple_open,
1380 .write = outlen_write,
1381 .read = outlen_read,
1384 static void set_wqname(struct mlx5_core_dev *dev)
1386 struct mlx5_cmd *cmd = &dev->cmd;
1388 snprintf(cmd->wq_name, sizeof(cmd->wq_name), "mlx5_cmd_%s",
1389 dev_name(dev->device));
1392 static void clean_debug_files(struct mlx5_core_dev *dev)
1394 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1396 if (!mlx5_debugfs_root)
1399 mlx5_cmdif_debugfs_cleanup(dev);
1400 debugfs_remove_recursive(dbg->dbg_root);
1403 static void create_debugfs_files(struct mlx5_core_dev *dev)
1405 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1407 dbg->dbg_root = debugfs_create_dir("cmd", dev->priv.dbg_root);
1409 debugfs_create_file("in", 0400, dbg->dbg_root, dev, &dfops);
1410 debugfs_create_file("out", 0200, dbg->dbg_root, dev, &dfops);
1411 debugfs_create_file("out_len", 0600, dbg->dbg_root, dev, &olfops);
1412 debugfs_create_u8("status", 0600, dbg->dbg_root, &dbg->status);
1413 debugfs_create_file("run", 0200, dbg->dbg_root, dev, &fops);
1415 mlx5_cmdif_debugfs_init(dev);
1418 void mlx5_cmd_allowed_opcode(struct mlx5_core_dev *dev, u16 opcode)
1420 struct mlx5_cmd *cmd = &dev->cmd;
1423 for (i = 0; i < cmd->max_reg_cmds; i++)
1425 down(&cmd->pages_sem);
1427 cmd->allowed_opcode = opcode;
1429 up(&cmd->pages_sem);
1430 for (i = 0; i < cmd->max_reg_cmds; i++)
1434 static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode)
1436 struct mlx5_cmd *cmd = &dev->cmd;
1439 for (i = 0; i < cmd->max_reg_cmds; i++)
1441 down(&cmd->pages_sem);
1445 up(&cmd->pages_sem);
1446 for (i = 0; i < cmd->max_reg_cmds; i++)
1450 static int cmd_comp_notifier(struct notifier_block *nb,
1451 unsigned long type, void *data)
1453 struct mlx5_core_dev *dev;
1454 struct mlx5_cmd *cmd;
1455 struct mlx5_eqe *eqe;
1457 cmd = mlx5_nb_cof(nb, struct mlx5_cmd, nb);
1458 dev = container_of(cmd, struct mlx5_core_dev, cmd);
1461 mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector), false);
1465 void mlx5_cmd_use_events(struct mlx5_core_dev *dev)
1467 MLX5_NB_INIT(&dev->cmd.nb, cmd_comp_notifier, CMD);
1468 mlx5_eq_notifier_register(dev, &dev->cmd.nb);
1469 mlx5_cmd_change_mod(dev, CMD_MODE_EVENTS);
1472 void mlx5_cmd_use_polling(struct mlx5_core_dev *dev)
1474 mlx5_cmd_change_mod(dev, CMD_MODE_POLLING);
1475 mlx5_eq_notifier_unregister(dev, &dev->cmd.nb);
1478 static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
1480 unsigned long flags;
1483 spin_lock_irqsave(&msg->parent->lock, flags);
1484 list_add_tail(&msg->list, &msg->parent->head);
1485 spin_unlock_irqrestore(&msg->parent->lock, flags);
1487 mlx5_free_cmd_msg(dev, msg);
1491 static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced)
1493 struct mlx5_cmd *cmd = &dev->cmd;
1494 struct mlx5_cmd_work_ent *ent;
1495 mlx5_cmd_cbk_t callback;
1500 struct mlx5_cmd_stats *stats;
1501 unsigned long flags;
1502 unsigned long vector;
1504 /* there can be at most 32 command queues */
1505 vector = vec & 0xffffffff;
1506 for (i = 0; i < (1 << cmd->log_sz); i++) {
1507 if (test_bit(i, &vector)) {
1508 struct semaphore *sem;
1510 ent = cmd->ent_arr[i];
1512 /* if we already completed the command, ignore it */
1513 if (!test_and_clear_bit(MLX5_CMD_ENT_STATE_PENDING_COMP,
1515 /* only real completion can free the cmd slot */
1517 mlx5_core_err(dev, "Command completion arrived after timeout (entry idx = %d).\n",
1519 free_ent(cmd, ent->idx);
1526 cancel_delayed_work(&ent->cb_timeout_work);
1527 if (ent->page_queue)
1528 sem = &cmd->pages_sem;
1531 ent->ts2 = ktime_get_ns();
1532 memcpy(ent->out->first.data, ent->lay->out, sizeof(ent->lay->out));
1533 dump_command(dev, ent, 0);
1535 if (!cmd->checksum_disabled)
1536 ent->ret = verify_signature(ent);
1539 if (vec & MLX5_TRIGGERED_CMD_COMP)
1540 ent->status = MLX5_DRIVER_STATUS_ABORTED;
1542 ent->status = ent->lay->status_own >> 1;
1544 mlx5_core_dbg(dev, "command completed. ret 0x%x, delivery status %s(0x%x)\n",
1545 ent->ret, deliv_status_to_str(ent->status), ent->status);
1548 /* only real completion will free the entry slot */
1550 free_ent(cmd, ent->idx);
1552 if (ent->callback) {
1553 ds = ent->ts2 - ent->ts1;
1554 if (ent->op < MLX5_CMD_OP_MAX) {
1555 stats = &cmd->stats[ent->op];
1556 spin_lock_irqsave(&stats->lock, flags);
1559 spin_unlock_irqrestore(&stats->lock, flags);
1562 callback = ent->callback;
1563 context = ent->context;
1566 err = mlx5_copy_from_msg(ent->uout,
1570 err = err ? err : mlx5_cmd_check(dev,
1571 ent->in->first.data,
1575 mlx5_free_cmd_msg(dev, ent->out);
1576 free_msg(dev, ent->in);
1578 err = err ? err : ent->status;
1581 callback(err, context);
1583 complete(&ent->done);
1590 void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev)
1592 unsigned long flags;
1595 /* wait for pending handlers to complete */
1596 mlx5_eq_synchronize_cmd_irq(dev);
1597 spin_lock_irqsave(&dev->cmd.alloc_lock, flags);
1598 vector = ~dev->cmd.bitmask & ((1ul << (1 << dev->cmd.log_sz)) - 1);
1602 vector |= MLX5_TRIGGERED_CMD_COMP;
1603 spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
1605 mlx5_core_dbg(dev, "vector 0x%llx\n", vector);
1606 mlx5_cmd_comp_handler(dev, vector, true);
1610 spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
1613 void mlx5_cmd_flush(struct mlx5_core_dev *dev)
1615 struct mlx5_cmd *cmd = &dev->cmd;
1618 for (i = 0; i < cmd->max_reg_cmds; i++)
1619 while (down_trylock(&cmd->sem))
1620 mlx5_cmd_trigger_completions(dev);
1622 while (down_trylock(&cmd->pages_sem))
1623 mlx5_cmd_trigger_completions(dev);
1626 up(&cmd->pages_sem);
1627 for (i = 0; i < cmd->max_reg_cmds; i++)
1631 static int status_to_err(u8 status)
1634 case MLX5_CMD_DELIVERY_STAT_OK:
1635 case MLX5_DRIVER_STATUS_ABORTED:
1637 case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR:
1638 case MLX5_CMD_DELIVERY_STAT_TOK_ERR:
1640 case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR:
1641 case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR:
1642 case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR:
1643 return -EFAULT; /* Bad address */
1644 case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR:
1645 case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR:
1646 case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR:
1647 case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR:
1649 case MLX5_CMD_DELIVERY_STAT_FW_ERR:
1656 static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size,
1659 struct mlx5_cmd_msg *msg = ERR_PTR(-ENOMEM);
1660 struct cmd_msg_cache *ch = NULL;
1661 struct mlx5_cmd *cmd = &dev->cmd;
1667 for (i = 0; i < MLX5_NUM_COMMAND_CACHES; i++) {
1668 ch = &cmd->cache[i];
1669 if (in_size > ch->max_inbox_size)
1671 spin_lock_irq(&ch->lock);
1672 if (list_empty(&ch->head)) {
1673 spin_unlock_irq(&ch->lock);
1676 msg = list_entry(ch->head.next, typeof(*msg), list);
1677 /* For cached lists, we must explicitly state what is
1681 list_del(&msg->list);
1682 spin_unlock_irq(&ch->lock);
1690 msg = mlx5_alloc_cmd_msg(dev, gfp, in_size, 0);
1694 static int is_manage_pages(void *in)
1696 return MLX5_GET(mbox_in, in, opcode) == MLX5_CMD_OP_MANAGE_PAGES;
1699 static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
1700 int out_size, mlx5_cmd_cbk_t callback, void *context,
1703 struct mlx5_cmd_msg *inb;
1704 struct mlx5_cmd_msg *outb;
1713 opcode = MLX5_GET(mbox_in, in, opcode);
1714 if (pci_channel_offline(dev->pdev) ||
1715 dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR ||
1716 dev->cmd.state != MLX5_CMDIF_STATE_UP ||
1717 !opcode_allowed(&dev->cmd, opcode)) {
1718 err = mlx5_internal_err_ret_value(dev, opcode, &drv_synd, &status);
1719 MLX5_SET(mbox_out, out, status, status);
1720 MLX5_SET(mbox_out, out, syndrome, drv_synd);
1724 pages_queue = is_manage_pages(in);
1725 gfp = callback ? GFP_ATOMIC : GFP_KERNEL;
1727 inb = alloc_msg(dev, in_size, gfp);
1733 token = alloc_token(&dev->cmd);
1735 err = mlx5_copy_to_msg(inb, in, in_size, token);
1737 mlx5_core_warn(dev, "err %d\n", err);
1741 outb = mlx5_alloc_cmd_msg(dev, gfp, out_size, token);
1743 err = PTR_ERR(outb);
1747 err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context,
1748 pages_queue, &status, token, force_polling);
1752 mlx5_core_dbg(dev, "err %d, status %d\n", err, status);
1754 err = status_to_err(status);
1759 err = mlx5_copy_from_msg(out, outb, out_size);
1763 mlx5_free_cmd_msg(dev, outb);
1771 int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
1776 err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, false);
1777 return err ? : mlx5_cmd_check(dev, in, out);
1779 EXPORT_SYMBOL(mlx5_cmd_exec);
1781 void mlx5_cmd_init_async_ctx(struct mlx5_core_dev *dev,
1782 struct mlx5_async_ctx *ctx)
1785 /* Starts at 1 to avoid doing wake_up if we are not cleaning up */
1786 atomic_set(&ctx->num_inflight, 1);
1787 init_waitqueue_head(&ctx->wait);
1789 EXPORT_SYMBOL(mlx5_cmd_init_async_ctx);
1792 * mlx5_cmd_cleanup_async_ctx - Clean up an async_ctx
1793 * @ctx: The ctx to clean
1795 * Upon return all callbacks given to mlx5_cmd_exec_cb() have been called. The
1796 * caller must ensure that mlx5_cmd_exec_cb() is not called during or after
1797 * the call mlx5_cleanup_async_ctx().
1799 void mlx5_cmd_cleanup_async_ctx(struct mlx5_async_ctx *ctx)
1801 atomic_dec(&ctx->num_inflight);
1802 wait_event(ctx->wait, atomic_read(&ctx->num_inflight) == 0);
1804 EXPORT_SYMBOL(mlx5_cmd_cleanup_async_ctx);
1806 static void mlx5_cmd_exec_cb_handler(int status, void *_work)
1808 struct mlx5_async_work *work = _work;
1809 struct mlx5_async_ctx *ctx = work->ctx;
1811 work->user_callback(status, work);
1812 if (atomic_dec_and_test(&ctx->num_inflight))
1813 wake_up(&ctx->wait);
1816 int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size,
1817 void *out, int out_size, mlx5_async_cbk_t callback,
1818 struct mlx5_async_work *work)
1823 work->user_callback = callback;
1824 if (WARN_ON(!atomic_inc_not_zero(&ctx->num_inflight)))
1826 ret = cmd_exec(ctx->dev, in, in_size, out, out_size,
1827 mlx5_cmd_exec_cb_handler, work, false);
1828 if (ret && atomic_dec_and_test(&ctx->num_inflight))
1829 wake_up(&ctx->wait);
1833 EXPORT_SYMBOL(mlx5_cmd_exec_cb);
1835 int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size,
1836 void *out, int out_size)
1840 err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, true);
1842 return err ? : mlx5_cmd_check(dev, in, out);
1844 EXPORT_SYMBOL(mlx5_cmd_exec_polling);
1846 static void destroy_msg_cache(struct mlx5_core_dev *dev)
1848 struct cmd_msg_cache *ch;
1849 struct mlx5_cmd_msg *msg;
1850 struct mlx5_cmd_msg *n;
1853 for (i = 0; i < MLX5_NUM_COMMAND_CACHES; i++) {
1854 ch = &dev->cmd.cache[i];
1855 list_for_each_entry_safe(msg, n, &ch->head, list) {
1856 list_del(&msg->list);
1857 mlx5_free_cmd_msg(dev, msg);
1862 static unsigned cmd_cache_num_ent[MLX5_NUM_COMMAND_CACHES] = {
1866 static unsigned cmd_cache_ent_size[MLX5_NUM_COMMAND_CACHES] = {
1867 16 + MLX5_CMD_DATA_BLOCK_SIZE,
1868 16 + MLX5_CMD_DATA_BLOCK_SIZE * 2,
1869 16 + MLX5_CMD_DATA_BLOCK_SIZE * 16,
1870 16 + MLX5_CMD_DATA_BLOCK_SIZE * 256,
1871 16 + MLX5_CMD_DATA_BLOCK_SIZE * 512,
1874 static void create_msg_cache(struct mlx5_core_dev *dev)
1876 struct mlx5_cmd *cmd = &dev->cmd;
1877 struct cmd_msg_cache *ch;
1878 struct mlx5_cmd_msg *msg;
1882 /* Initialize and fill the caches with initial entries */
1883 for (k = 0; k < MLX5_NUM_COMMAND_CACHES; k++) {
1884 ch = &cmd->cache[k];
1885 spin_lock_init(&ch->lock);
1886 INIT_LIST_HEAD(&ch->head);
1887 ch->num_ent = cmd_cache_num_ent[k];
1888 ch->max_inbox_size = cmd_cache_ent_size[k];
1889 for (i = 0; i < ch->num_ent; i++) {
1890 msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL | __GFP_NOWARN,
1891 ch->max_inbox_size, 0);
1895 list_add_tail(&msg->list, &ch->head);
1900 static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
1902 struct device *ddev = dev->device;
1904 cmd->cmd_alloc_buf = dma_alloc_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE,
1905 &cmd->alloc_dma, GFP_KERNEL);
1906 if (!cmd->cmd_alloc_buf)
1909 /* make sure it is aligned to 4K */
1910 if (!((uintptr_t)cmd->cmd_alloc_buf & (MLX5_ADAPTER_PAGE_SIZE - 1))) {
1911 cmd->cmd_buf = cmd->cmd_alloc_buf;
1912 cmd->dma = cmd->alloc_dma;
1913 cmd->alloc_size = MLX5_ADAPTER_PAGE_SIZE;
1917 dma_free_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE, cmd->cmd_alloc_buf,
1919 cmd->cmd_alloc_buf = dma_alloc_coherent(ddev,
1920 2 * MLX5_ADAPTER_PAGE_SIZE - 1,
1921 &cmd->alloc_dma, GFP_KERNEL);
1922 if (!cmd->cmd_alloc_buf)
1925 cmd->cmd_buf = PTR_ALIGN(cmd->cmd_alloc_buf, MLX5_ADAPTER_PAGE_SIZE);
1926 cmd->dma = ALIGN(cmd->alloc_dma, MLX5_ADAPTER_PAGE_SIZE);
1927 cmd->alloc_size = 2 * MLX5_ADAPTER_PAGE_SIZE - 1;
1931 static void free_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
1933 struct device *ddev = dev->device;
1935 dma_free_coherent(ddev, cmd->alloc_size, cmd->cmd_alloc_buf,
1939 static u16 cmdif_rev(struct mlx5_core_dev *dev)
1941 return ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
1944 int mlx5_cmd_init(struct mlx5_core_dev *dev)
1946 int size = sizeof(struct mlx5_cmd_prot_block);
1947 int align = roundup_pow_of_two(size);
1948 struct mlx5_cmd *cmd = &dev->cmd;
1954 memset(cmd, 0, sizeof(*cmd));
1955 cmd_if_rev = cmdif_rev(dev);
1956 if (cmd_if_rev != CMD_IF_REV) {
1958 "Driver cmdif rev(%d) differs from firmware's(%d)\n",
1959 CMD_IF_REV, cmd_if_rev);
1963 cmd->stats = kvzalloc(MLX5_CMD_OP_MAX * sizeof(*cmd->stats), GFP_KERNEL);
1967 cmd->pool = dma_pool_create("mlx5_cmd", dev->device, size, align, 0);
1973 err = alloc_cmd_page(dev, cmd);
1977 cmd_l = ioread32be(&dev->iseg->cmdq_addr_l_sz) & 0xff;
1978 cmd->log_sz = cmd_l >> 4 & 0xf;
1979 cmd->log_stride = cmd_l & 0xf;
1980 if (1 << cmd->log_sz > MLX5_MAX_COMMANDS) {
1981 mlx5_core_err(dev, "firmware reports too many outstanding commands %d\n",
1987 if (cmd->log_sz + cmd->log_stride > MLX5_ADAPTER_PAGE_SHIFT) {
1988 mlx5_core_err(dev, "command queue size overflow\n");
1993 cmd->state = MLX5_CMDIF_STATE_DOWN;
1994 cmd->checksum_disabled = 1;
1995 cmd->max_reg_cmds = (1 << cmd->log_sz) - 1;
1996 cmd->bitmask = (1UL << cmd->max_reg_cmds) - 1;
1998 cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
1999 if (cmd->cmdif_rev > CMD_IF_REV) {
2000 mlx5_core_err(dev, "driver does not support command interface version. driver %d, firmware %d\n",
2001 CMD_IF_REV, cmd->cmdif_rev);
2006 spin_lock_init(&cmd->alloc_lock);
2007 spin_lock_init(&cmd->token_lock);
2008 for (i = 0; i < MLX5_CMD_OP_MAX; i++)
2009 spin_lock_init(&cmd->stats[i].lock);
2011 sema_init(&cmd->sem, cmd->max_reg_cmds);
2012 sema_init(&cmd->pages_sem, 1);
2014 cmd_h = (u32)((u64)(cmd->dma) >> 32);
2015 cmd_l = (u32)(cmd->dma);
2016 if (cmd_l & 0xfff) {
2017 mlx5_core_err(dev, "invalid command queue address\n");
2022 iowrite32be(cmd_h, &dev->iseg->cmdq_addr_h);
2023 iowrite32be(cmd_l, &dev->iseg->cmdq_addr_l_sz);
2025 /* Make sure firmware sees the complete address before we proceed */
2028 mlx5_core_dbg(dev, "descriptor at dma 0x%llx\n", (unsigned long long)(cmd->dma));
2030 cmd->mode = CMD_MODE_POLLING;
2031 cmd->allowed_opcode = CMD_ALLOWED_OPCODE_ALL;
2033 create_msg_cache(dev);
2036 cmd->wq = create_singlethread_workqueue(cmd->wq_name);
2038 mlx5_core_err(dev, "failed to create command workqueue\n");
2043 create_debugfs_files(dev);
2048 destroy_msg_cache(dev);
2051 free_cmd_page(dev, cmd);
2054 dma_pool_destroy(cmd->pool);
2059 EXPORT_SYMBOL(mlx5_cmd_init);
2061 void mlx5_cmd_cleanup(struct mlx5_core_dev *dev)
2063 struct mlx5_cmd *cmd = &dev->cmd;
2065 clean_debug_files(dev);
2066 destroy_workqueue(cmd->wq);
2067 destroy_msg_cache(dev);
2068 free_cmd_page(dev, cmd);
2069 dma_pool_destroy(cmd->pool);
2072 EXPORT_SYMBOL(mlx5_cmd_cleanup);
2074 void mlx5_cmd_set_state(struct mlx5_core_dev *dev,
2075 enum mlx5_cmdif_state cmdif_state)
2077 dev->cmd.state = cmdif_state;
2079 EXPORT_SYMBOL(mlx5_cmd_set_state);