2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/interrupt.h>
34 #include <linux/module.h>
35 #include <linux/mlx5/driver.h>
36 #include <linux/mlx5/cmd.h>
37 #include "mlx5_core.h"
38 #include "fpga/core.h"
42 MLX5_EQE_SIZE = sizeof(struct mlx5_eqe),
43 MLX5_EQE_OWNER_INIT_VAL = 0x1,
47 MLX5_EQ_STATE_ARMED = 0x9,
48 MLX5_EQ_STATE_FIRED = 0xa,
49 MLX5_EQ_STATE_ALWAYS_ARMED = 0xb,
53 MLX5_NUM_SPARE_EQE = 0x80,
54 MLX5_NUM_ASYNC_EQE = 0x100,
55 MLX5_NUM_CMD_EQE = 32,
56 MLX5_NUM_PF_DRAIN = 64,
60 MLX5_EQ_DOORBEL_OFFSET = 0x40,
63 #define MLX5_ASYNC_EVENT_MASK ((1ull << MLX5_EVENT_TYPE_PATH_MIG) | \
64 (1ull << MLX5_EVENT_TYPE_COMM_EST) | \
65 (1ull << MLX5_EVENT_TYPE_SQ_DRAINED) | \
66 (1ull << MLX5_EVENT_TYPE_CQ_ERROR) | \
67 (1ull << MLX5_EVENT_TYPE_WQ_CATAS_ERROR) | \
68 (1ull << MLX5_EVENT_TYPE_PATH_MIG_FAILED) | \
69 (1ull << MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
70 (1ull << MLX5_EVENT_TYPE_WQ_ACCESS_ERROR) | \
71 (1ull << MLX5_EVENT_TYPE_PORT_CHANGE) | \
72 (1ull << MLX5_EVENT_TYPE_SRQ_CATAS_ERROR) | \
73 (1ull << MLX5_EVENT_TYPE_SRQ_LAST_WQE) | \
74 (1ull << MLX5_EVENT_TYPE_SRQ_RQ_LIMIT))
87 static int mlx5_cmd_destroy_eq(struct mlx5_core_dev *dev, u8 eqn)
89 u32 out[MLX5_ST_SZ_DW(destroy_eq_out)] = {0};
90 u32 in[MLX5_ST_SZ_DW(destroy_eq_in)] = {0};
92 MLX5_SET(destroy_eq_in, in, opcode, MLX5_CMD_OP_DESTROY_EQ);
93 MLX5_SET(destroy_eq_in, in, eq_number, eqn);
94 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
97 static struct mlx5_eqe *get_eqe(struct mlx5_eq *eq, u32 entry)
99 return mlx5_buf_offset(&eq->buf, entry * MLX5_EQE_SIZE);
102 static struct mlx5_eqe *next_eqe_sw(struct mlx5_eq *eq)
104 struct mlx5_eqe *eqe = get_eqe(eq, eq->cons_index & (eq->nent - 1));
106 return ((eqe->owner & 1) ^ !!(eq->cons_index & eq->nent)) ? NULL : eqe;
109 static const char *eqe_type_str(u8 type)
112 case MLX5_EVENT_TYPE_COMP:
113 return "MLX5_EVENT_TYPE_COMP";
114 case MLX5_EVENT_TYPE_PATH_MIG:
115 return "MLX5_EVENT_TYPE_PATH_MIG";
116 case MLX5_EVENT_TYPE_COMM_EST:
117 return "MLX5_EVENT_TYPE_COMM_EST";
118 case MLX5_EVENT_TYPE_SQ_DRAINED:
119 return "MLX5_EVENT_TYPE_SQ_DRAINED";
120 case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
121 return "MLX5_EVENT_TYPE_SRQ_LAST_WQE";
122 case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
123 return "MLX5_EVENT_TYPE_SRQ_RQ_LIMIT";
124 case MLX5_EVENT_TYPE_CQ_ERROR:
125 return "MLX5_EVENT_TYPE_CQ_ERROR";
126 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
127 return "MLX5_EVENT_TYPE_WQ_CATAS_ERROR";
128 case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
129 return "MLX5_EVENT_TYPE_PATH_MIG_FAILED";
130 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
131 return "MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR";
132 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
133 return "MLX5_EVENT_TYPE_WQ_ACCESS_ERROR";
134 case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
135 return "MLX5_EVENT_TYPE_SRQ_CATAS_ERROR";
136 case MLX5_EVENT_TYPE_INTERNAL_ERROR:
137 return "MLX5_EVENT_TYPE_INTERNAL_ERROR";
138 case MLX5_EVENT_TYPE_PORT_CHANGE:
139 return "MLX5_EVENT_TYPE_PORT_CHANGE";
140 case MLX5_EVENT_TYPE_GPIO_EVENT:
141 return "MLX5_EVENT_TYPE_GPIO_EVENT";
142 case MLX5_EVENT_TYPE_PORT_MODULE_EVENT:
143 return "MLX5_EVENT_TYPE_PORT_MODULE_EVENT";
144 case MLX5_EVENT_TYPE_REMOTE_CONFIG:
145 return "MLX5_EVENT_TYPE_REMOTE_CONFIG";
146 case MLX5_EVENT_TYPE_DB_BF_CONGESTION:
147 return "MLX5_EVENT_TYPE_DB_BF_CONGESTION";
148 case MLX5_EVENT_TYPE_STALL_EVENT:
149 return "MLX5_EVENT_TYPE_STALL_EVENT";
150 case MLX5_EVENT_TYPE_CMD:
151 return "MLX5_EVENT_TYPE_CMD";
152 case MLX5_EVENT_TYPE_PAGE_REQUEST:
153 return "MLX5_EVENT_TYPE_PAGE_REQUEST";
154 case MLX5_EVENT_TYPE_PAGE_FAULT:
155 return "MLX5_EVENT_TYPE_PAGE_FAULT";
156 case MLX5_EVENT_TYPE_PPS_EVENT:
157 return "MLX5_EVENT_TYPE_PPS_EVENT";
158 case MLX5_EVENT_TYPE_NIC_VPORT_CHANGE:
159 return "MLX5_EVENT_TYPE_NIC_VPORT_CHANGE";
160 case MLX5_EVENT_TYPE_FPGA_ERROR:
161 return "MLX5_EVENT_TYPE_FPGA_ERROR";
163 return "Unrecognized event";
167 static enum mlx5_dev_event port_subtype_event(u8 subtype)
170 case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
171 return MLX5_DEV_EVENT_PORT_DOWN;
172 case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
173 return MLX5_DEV_EVENT_PORT_UP;
174 case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED:
175 return MLX5_DEV_EVENT_PORT_INITIALIZED;
176 case MLX5_PORT_CHANGE_SUBTYPE_LID:
177 return MLX5_DEV_EVENT_LID_CHANGE;
178 case MLX5_PORT_CHANGE_SUBTYPE_PKEY:
179 return MLX5_DEV_EVENT_PKEY_CHANGE;
180 case MLX5_PORT_CHANGE_SUBTYPE_GUID:
181 return MLX5_DEV_EVENT_GUID_CHANGE;
182 case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG:
183 return MLX5_DEV_EVENT_CLIENT_REREG;
188 static void eq_update_ci(struct mlx5_eq *eq, int arm)
190 __be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2);
191 u32 val = (eq->cons_index & 0xffffff) | (eq->eqn << 24);
192 __raw_writel((__force u32)cpu_to_be32(val), addr);
193 /* We still want ordering, just not swabbing, so add a barrier */
197 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
198 static void eqe_pf_action(struct work_struct *work)
200 struct mlx5_pagefault *pfault = container_of(work,
201 struct mlx5_pagefault,
203 struct mlx5_eq *eq = pfault->eq;
205 mlx5_core_page_fault(eq->dev, pfault);
206 mempool_free(pfault, eq->pf_ctx.pool);
209 static void eq_pf_process(struct mlx5_eq *eq)
211 struct mlx5_core_dev *dev = eq->dev;
212 struct mlx5_eqe_page_fault *pf_eqe;
213 struct mlx5_pagefault *pfault;
214 struct mlx5_eqe *eqe;
217 while ((eqe = next_eqe_sw(eq))) {
218 pfault = mempool_alloc(eq->pf_ctx.pool, GFP_ATOMIC);
220 schedule_work(&eq->pf_ctx.work);
225 pf_eqe = &eqe->data.page_fault;
226 pfault->event_subtype = eqe->sub_type;
227 pfault->bytes_committed = be32_to_cpu(pf_eqe->bytes_committed);
230 "PAGE_FAULT: subtype: 0x%02x, bytes_committed: 0x%06x\n",
231 eqe->sub_type, pfault->bytes_committed);
233 switch (eqe->sub_type) {
234 case MLX5_PFAULT_SUBTYPE_RDMA:
235 /* RDMA based event */
237 be32_to_cpu(pf_eqe->rdma.pftype_token) >> 24;
239 be32_to_cpu(pf_eqe->rdma.pftype_token) &
242 be32_to_cpu(pf_eqe->rdma.r_key);
243 pfault->rdma.packet_size =
244 be16_to_cpu(pf_eqe->rdma.packet_length);
245 pfault->rdma.rdma_op_len =
246 be32_to_cpu(pf_eqe->rdma.rdma_op_len);
247 pfault->rdma.rdma_va =
248 be64_to_cpu(pf_eqe->rdma.rdma_va);
250 "PAGE_FAULT: type:0x%x, token: 0x%06x, r_key: 0x%08x\n",
251 pfault->type, pfault->token,
254 "PAGE_FAULT: rdma_op_len: 0x%08x, rdma_va: 0x%016llx\n",
255 pfault->rdma.rdma_op_len,
256 pfault->rdma.rdma_va);
259 case MLX5_PFAULT_SUBTYPE_WQE:
260 /* WQE based event */
262 be32_to_cpu(pf_eqe->wqe.pftype_wq) >> 24;
264 be32_to_cpu(pf_eqe->wqe.token);
266 be32_to_cpu(pf_eqe->wqe.pftype_wq) &
268 pfault->wqe.wqe_index =
269 be16_to_cpu(pf_eqe->wqe.wqe_index);
270 pfault->wqe.packet_size =
271 be16_to_cpu(pf_eqe->wqe.packet_length);
273 "PAGE_FAULT: type:0x%x, token: 0x%06x, wq_num: 0x%06x, wqe_index: 0x%04x\n",
274 pfault->type, pfault->token,
276 pfault->wqe.wqe_index);
281 "Unsupported page fault event sub-type: 0x%02hhx\n",
283 /* Unsupported page faults should still be
284 * resolved by the page fault handler
289 INIT_WORK(&pfault->work, eqe_pf_action);
290 queue_work(eq->pf_ctx.wq, &pfault->work);
295 if (unlikely(set_ci >= MLX5_NUM_SPARE_EQE)) {
304 static irqreturn_t mlx5_eq_pf_int(int irq, void *eq_ptr)
306 struct mlx5_eq *eq = eq_ptr;
309 if (spin_trylock_irqsave(&eq->pf_ctx.lock, flags)) {
311 spin_unlock_irqrestore(&eq->pf_ctx.lock, flags);
313 schedule_work(&eq->pf_ctx.work);
319 /* mempool_refill() was proposed but unfortunately wasn't accepted
320 * http://lkml.iu.edu/hypermail/linux/kernel/1512.1/05073.html
323 static void mempool_refill(mempool_t *pool)
325 while (pool->curr_nr < pool->min_nr)
326 mempool_free(mempool_alloc(pool, GFP_KERNEL), pool);
329 static void eq_pf_action(struct work_struct *work)
331 struct mlx5_eq *eq = container_of(work, struct mlx5_eq, pf_ctx.work);
333 mempool_refill(eq->pf_ctx.pool);
335 spin_lock_irq(&eq->pf_ctx.lock);
337 spin_unlock_irq(&eq->pf_ctx.lock);
340 static int init_pf_ctx(struct mlx5_eq_pagefault *pf_ctx, const char *name)
342 spin_lock_init(&pf_ctx->lock);
343 INIT_WORK(&pf_ctx->work, eq_pf_action);
345 pf_ctx->wq = alloc_ordered_workqueue(name,
350 pf_ctx->pool = mempool_create_kmalloc_pool
351 (MLX5_NUM_PF_DRAIN, sizeof(struct mlx5_pagefault));
357 destroy_workqueue(pf_ctx->wq);
361 int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 token,
362 u32 wq_num, u8 type, int error)
364 u32 out[MLX5_ST_SZ_DW(page_fault_resume_out)] = {0};
365 u32 in[MLX5_ST_SZ_DW(page_fault_resume_in)] = {0};
367 MLX5_SET(page_fault_resume_in, in, opcode,
368 MLX5_CMD_OP_PAGE_FAULT_RESUME);
369 MLX5_SET(page_fault_resume_in, in, error, !!error);
370 MLX5_SET(page_fault_resume_in, in, page_fault_type, type);
371 MLX5_SET(page_fault_resume_in, in, wq_number, wq_num);
372 MLX5_SET(page_fault_resume_in, in, token, token);
374 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
376 EXPORT_SYMBOL_GPL(mlx5_core_page_fault_resume);
379 static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr)
381 struct mlx5_eq *eq = eq_ptr;
382 struct mlx5_core_dev *dev = eq->dev;
383 struct mlx5_eqe *eqe;
389 while ((eqe = next_eqe_sw(eq))) {
391 * Make sure we read EQ entry contents after we've
392 * checked the ownership bit.
396 mlx5_core_dbg(eq->dev, "eqn %d, eqe type %s\n",
397 eq->eqn, eqe_type_str(eqe->type));
399 case MLX5_EVENT_TYPE_COMP:
400 cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff;
401 mlx5_cq_completion(dev, cqn);
404 case MLX5_EVENT_TYPE_PATH_MIG:
405 case MLX5_EVENT_TYPE_COMM_EST:
406 case MLX5_EVENT_TYPE_SQ_DRAINED:
407 case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
408 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
409 case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
410 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
411 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
412 rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
413 rsn |= (eqe->data.qp_srq.type << MLX5_USER_INDEX_LEN);
414 mlx5_core_dbg(dev, "event %s(%d) arrived on resource 0x%x\n",
415 eqe_type_str(eqe->type), eqe->type, rsn);
416 mlx5_rsc_event(dev, rsn, eqe->type);
419 case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
420 case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
421 rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
422 mlx5_core_dbg(dev, "SRQ event %s(%d): srqn 0x%x\n",
423 eqe_type_str(eqe->type), eqe->type, rsn);
424 mlx5_srq_event(dev, rsn, eqe->type);
427 case MLX5_EVENT_TYPE_CMD:
428 mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector), false);
431 case MLX5_EVENT_TYPE_PORT_CHANGE:
432 port = (eqe->data.port.port >> 4) & 0xf;
433 switch (eqe->sub_type) {
434 case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
435 case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
436 case MLX5_PORT_CHANGE_SUBTYPE_LID:
437 case MLX5_PORT_CHANGE_SUBTYPE_PKEY:
438 case MLX5_PORT_CHANGE_SUBTYPE_GUID:
439 case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG:
440 case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED:
442 dev->event(dev, port_subtype_event(eqe->sub_type),
443 (unsigned long)port);
446 mlx5_core_warn(dev, "Port event with unrecognized subtype: port %d, sub_type %d\n",
447 port, eqe->sub_type);
450 case MLX5_EVENT_TYPE_CQ_ERROR:
451 cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
452 mlx5_core_warn(dev, "CQ error on CQN 0x%x, syndrom 0x%x\n",
453 cqn, eqe->data.cq_err.syndrome);
454 mlx5_cq_event(dev, cqn, eqe->type);
457 case MLX5_EVENT_TYPE_PAGE_REQUEST:
459 u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id);
460 s32 npages = be32_to_cpu(eqe->data.req_pages.num_pages);
462 mlx5_core_dbg(dev, "page request for func 0x%x, npages %d\n",
464 mlx5_core_req_pages_handler(dev, func_id, npages);
468 case MLX5_EVENT_TYPE_NIC_VPORT_CHANGE:
469 mlx5_eswitch_vport_event(dev->priv.eswitch, eqe);
472 case MLX5_EVENT_TYPE_PORT_MODULE_EVENT:
473 mlx5_port_module_event(dev, eqe);
476 case MLX5_EVENT_TYPE_PPS_EVENT:
478 dev->event(dev, MLX5_DEV_EVENT_PPS, (unsigned long)eqe);
481 case MLX5_EVENT_TYPE_FPGA_ERROR:
482 mlx5_fpga_event(dev, eqe->type, &eqe->data.raw);
486 mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n",
494 /* The HCA will think the queue has overflowed if we
495 * don't tell it we've been processing events. We
496 * create our EQs with MLX5_NUM_SPARE_EQE extra
497 * entries, so we must update our consumer index at
500 if (unlikely(set_ci >= MLX5_NUM_SPARE_EQE)) {
509 tasklet_schedule(&eq->tasklet_ctx.task);
514 static void init_eq_buf(struct mlx5_eq *eq)
516 struct mlx5_eqe *eqe;
519 for (i = 0; i < eq->nent; i++) {
520 eqe = get_eqe(eq, i);
521 eqe->owner = MLX5_EQE_OWNER_INIT_VAL;
525 int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
526 int nent, u64 mask, const char *name,
527 enum mlx5_eq_type type)
529 u32 out[MLX5_ST_SZ_DW(create_eq_out)] = {0};
530 struct mlx5_priv *priv = &dev->priv;
531 irq_handler_t handler;
539 eq->nent = roundup_pow_of_two(nent + MLX5_NUM_SPARE_EQE);
541 err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, &eq->buf);
545 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
546 if (type == MLX5_EQ_TYPE_PF)
547 handler = mlx5_eq_pf_int;
550 handler = mlx5_eq_int;
554 inlen = MLX5_ST_SZ_BYTES(create_eq_in) +
555 MLX5_FLD_SZ_BYTES(create_eq_in, pas[0]) * eq->buf.npages;
557 in = kvzalloc(inlen, GFP_KERNEL);
563 pas = (__be64 *)MLX5_ADDR_OF(create_eq_in, in, pas);
564 mlx5_fill_page_array(&eq->buf, pas);
566 MLX5_SET(create_eq_in, in, opcode, MLX5_CMD_OP_CREATE_EQ);
567 MLX5_SET64(create_eq_in, in, event_bitmask, mask);
569 eqc = MLX5_ADDR_OF(create_eq_in, in, eq_context_entry);
570 MLX5_SET(eqc, eqc, log_eq_size, ilog2(eq->nent));
571 MLX5_SET(eqc, eqc, uar_page, priv->uar->index);
572 MLX5_SET(eqc, eqc, intr, vecidx);
573 MLX5_SET(eqc, eqc, log_page_size,
574 eq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
576 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
580 snprintf(priv->irq_info[vecidx].name, MLX5_MAX_IRQ_NAME, "%s@pci:%s",
581 name, pci_name(dev->pdev));
583 eq->eqn = MLX5_GET(create_eq_out, out, eq_number);
584 eq->irqn = priv->msix_arr[vecidx].vector;
586 eq->doorbell = priv->uar->map + MLX5_EQ_DOORBEL_OFFSET;
587 err = request_irq(eq->irqn, handler, 0,
588 priv->irq_info[vecidx].name, eq);
592 err = mlx5_debug_eq_add(dev, eq);
596 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
597 if (type == MLX5_EQ_TYPE_PF) {
598 err = init_pf_ctx(&eq->pf_ctx, name);
604 INIT_LIST_HEAD(&eq->tasklet_ctx.list);
605 INIT_LIST_HEAD(&eq->tasklet_ctx.process_list);
606 spin_lock_init(&eq->tasklet_ctx.lock);
607 tasklet_init(&eq->tasklet_ctx.task, mlx5_cq_tasklet_cb,
608 (unsigned long)&eq->tasklet_ctx);
611 /* EQs are created in ARMED state
619 free_irq(priv->msix_arr[vecidx].vector, eq);
622 mlx5_cmd_destroy_eq(dev, eq->eqn);
628 mlx5_buf_free(dev, &eq->buf);
631 EXPORT_SYMBOL_GPL(mlx5_create_map_eq);
633 int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
637 mlx5_debug_eq_remove(dev, eq);
638 free_irq(eq->irqn, eq);
639 err = mlx5_cmd_destroy_eq(dev, eq->eqn);
641 mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n",
643 synchronize_irq(eq->irqn);
645 if (eq->type == MLX5_EQ_TYPE_COMP) {
646 tasklet_disable(&eq->tasklet_ctx.task);
647 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
648 } else if (eq->type == MLX5_EQ_TYPE_PF) {
649 cancel_work_sync(&eq->pf_ctx.work);
650 destroy_workqueue(eq->pf_ctx.wq);
651 mempool_destroy(eq->pf_ctx.pool);
654 mlx5_buf_free(dev, &eq->buf);
658 EXPORT_SYMBOL_GPL(mlx5_destroy_unmap_eq);
660 u32 mlx5_get_msix_vec(struct mlx5_core_dev *dev, int vecidx)
662 return dev->priv.msix_arr[MLX5_EQ_VEC_ASYNC].vector;
665 int mlx5_eq_init(struct mlx5_core_dev *dev)
669 spin_lock_init(&dev->priv.eq_table.lock);
671 err = mlx5_eq_debugfs_init(dev);
676 void mlx5_eq_cleanup(struct mlx5_core_dev *dev)
678 mlx5_eq_debugfs_cleanup(dev);
681 int mlx5_start_eqs(struct mlx5_core_dev *dev)
683 struct mlx5_eq_table *table = &dev->priv.eq_table;
684 u64 async_event_mask = MLX5_ASYNC_EVENT_MASK;
687 if (MLX5_VPORT_MANAGER(dev))
688 async_event_mask |= (1ull << MLX5_EVENT_TYPE_NIC_VPORT_CHANGE);
690 if (MLX5_CAP_GEN(dev, port_module_event))
691 async_event_mask |= (1ull << MLX5_EVENT_TYPE_PORT_MODULE_EVENT);
693 mlx5_core_dbg(dev, "port_module_event is not set\n");
695 if (MLX5_PPS_CAP(dev))
696 async_event_mask |= (1ull << MLX5_EVENT_TYPE_PPS_EVENT);
698 if (MLX5_CAP_GEN(dev, fpga))
699 async_event_mask |= (1ull << MLX5_EVENT_TYPE_FPGA_ERROR);
701 err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD,
702 MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD,
703 "mlx5_cmd_eq", MLX5_EQ_TYPE_ASYNC);
705 mlx5_core_warn(dev, "failed to create cmd EQ %d\n", err);
709 mlx5_cmd_use_events(dev);
711 err = mlx5_create_map_eq(dev, &table->async_eq, MLX5_EQ_VEC_ASYNC,
712 MLX5_NUM_ASYNC_EQE, async_event_mask,
713 "mlx5_async_eq", MLX5_EQ_TYPE_ASYNC);
715 mlx5_core_warn(dev, "failed to create async EQ %d\n", err);
719 err = mlx5_create_map_eq(dev, &table->pages_eq,
721 /* TODO: sriov max_vf + */ 1,
722 1 << MLX5_EVENT_TYPE_PAGE_REQUEST, "mlx5_pages_eq",
725 mlx5_core_warn(dev, "failed to create pages EQ %d\n", err);
729 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
730 if (MLX5_CAP_GEN(dev, pg)) {
731 err = mlx5_create_map_eq(dev, &table->pfault_eq,
734 1 << MLX5_EVENT_TYPE_PAGE_FAULT,
735 "mlx5_page_fault_eq",
738 mlx5_core_warn(dev, "failed to create page fault EQ %d\n",
746 mlx5_destroy_unmap_eq(dev, &table->pages_eq);
752 mlx5_destroy_unmap_eq(dev, &table->async_eq);
755 mlx5_cmd_use_polling(dev);
756 mlx5_destroy_unmap_eq(dev, &table->cmd_eq);
760 int mlx5_stop_eqs(struct mlx5_core_dev *dev)
762 struct mlx5_eq_table *table = &dev->priv.eq_table;
765 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
766 if (MLX5_CAP_GEN(dev, pg)) {
767 err = mlx5_destroy_unmap_eq(dev, &table->pfault_eq);
773 err = mlx5_destroy_unmap_eq(dev, &table->pages_eq);
777 mlx5_destroy_unmap_eq(dev, &table->async_eq);
778 mlx5_cmd_use_polling(dev);
780 err = mlx5_destroy_unmap_eq(dev, &table->cmd_eq);
782 mlx5_cmd_use_events(dev);
787 int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
788 u32 *out, int outlen)
790 u32 in[MLX5_ST_SZ_DW(query_eq_in)] = {0};
792 MLX5_SET(query_eq_in, in, opcode, MLX5_CMD_OP_QUERY_EQ);
793 MLX5_SET(query_eq_in, in, eq_number, eq->eqn);
794 return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
796 EXPORT_SYMBOL_GPL(mlx5_core_eq_query);