2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/interrupt.h>
34 #include <linux/module.h>
35 #include <linux/mlx5/driver.h>
36 #include <linux/mlx5/cmd.h>
37 #ifdef CONFIG_RFS_ACCEL
38 #include <linux/cpu_rmap.h>
40 #include "mlx5_core.h"
41 #include "fpga/core.h"
43 #include "diag/fw_tracer.h"
46 MLX5_EQE_SIZE = sizeof(struct mlx5_eqe),
47 MLX5_EQE_OWNER_INIT_VAL = 0x1,
51 MLX5_EQ_STATE_ARMED = 0x9,
52 MLX5_EQ_STATE_FIRED = 0xa,
53 MLX5_EQ_STATE_ALWAYS_ARMED = 0xb,
57 MLX5_NUM_SPARE_EQE = 0x80,
58 MLX5_NUM_ASYNC_EQE = 0x1000,
59 MLX5_NUM_CMD_EQE = 32,
60 MLX5_NUM_PF_DRAIN = 64,
64 MLX5_EQ_DOORBEL_OFFSET = 0x40,
67 #define MLX5_ASYNC_EVENT_MASK ((1ull << MLX5_EVENT_TYPE_PATH_MIG) | \
68 (1ull << MLX5_EVENT_TYPE_COMM_EST) | \
69 (1ull << MLX5_EVENT_TYPE_SQ_DRAINED) | \
70 (1ull << MLX5_EVENT_TYPE_CQ_ERROR) | \
71 (1ull << MLX5_EVENT_TYPE_WQ_CATAS_ERROR) | \
72 (1ull << MLX5_EVENT_TYPE_PATH_MIG_FAILED) | \
73 (1ull << MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
74 (1ull << MLX5_EVENT_TYPE_WQ_ACCESS_ERROR) | \
75 (1ull << MLX5_EVENT_TYPE_PORT_CHANGE) | \
76 (1ull << MLX5_EVENT_TYPE_SRQ_CATAS_ERROR) | \
77 (1ull << MLX5_EVENT_TYPE_SRQ_LAST_WQE) | \
78 (1ull << MLX5_EVENT_TYPE_SRQ_RQ_LIMIT))
91 static int mlx5_cmd_destroy_eq(struct mlx5_core_dev *dev, u8 eqn)
93 u32 out[MLX5_ST_SZ_DW(destroy_eq_out)] = {0};
94 u32 in[MLX5_ST_SZ_DW(destroy_eq_in)] = {0};
96 MLX5_SET(destroy_eq_in, in, opcode, MLX5_CMD_OP_DESTROY_EQ);
97 MLX5_SET(destroy_eq_in, in, eq_number, eqn);
98 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
101 static struct mlx5_eqe *get_eqe(struct mlx5_eq *eq, u32 entry)
103 return mlx5_buf_offset(&eq->buf, entry * MLX5_EQE_SIZE);
106 static struct mlx5_eqe *next_eqe_sw(struct mlx5_eq *eq)
108 struct mlx5_eqe *eqe = get_eqe(eq, eq->cons_index & (eq->nent - 1));
110 return ((eqe->owner & 1) ^ !!(eq->cons_index & eq->nent)) ? NULL : eqe;
113 static const char *eqe_type_str(u8 type)
116 case MLX5_EVENT_TYPE_COMP:
117 return "MLX5_EVENT_TYPE_COMP";
118 case MLX5_EVENT_TYPE_PATH_MIG:
119 return "MLX5_EVENT_TYPE_PATH_MIG";
120 case MLX5_EVENT_TYPE_COMM_EST:
121 return "MLX5_EVENT_TYPE_COMM_EST";
122 case MLX5_EVENT_TYPE_SQ_DRAINED:
123 return "MLX5_EVENT_TYPE_SQ_DRAINED";
124 case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
125 return "MLX5_EVENT_TYPE_SRQ_LAST_WQE";
126 case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
127 return "MLX5_EVENT_TYPE_SRQ_RQ_LIMIT";
128 case MLX5_EVENT_TYPE_CQ_ERROR:
129 return "MLX5_EVENT_TYPE_CQ_ERROR";
130 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
131 return "MLX5_EVENT_TYPE_WQ_CATAS_ERROR";
132 case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
133 return "MLX5_EVENT_TYPE_PATH_MIG_FAILED";
134 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
135 return "MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR";
136 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
137 return "MLX5_EVENT_TYPE_WQ_ACCESS_ERROR";
138 case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
139 return "MLX5_EVENT_TYPE_SRQ_CATAS_ERROR";
140 case MLX5_EVENT_TYPE_INTERNAL_ERROR:
141 return "MLX5_EVENT_TYPE_INTERNAL_ERROR";
142 case MLX5_EVENT_TYPE_PORT_CHANGE:
143 return "MLX5_EVENT_TYPE_PORT_CHANGE";
144 case MLX5_EVENT_TYPE_GPIO_EVENT:
145 return "MLX5_EVENT_TYPE_GPIO_EVENT";
146 case MLX5_EVENT_TYPE_PORT_MODULE_EVENT:
147 return "MLX5_EVENT_TYPE_PORT_MODULE_EVENT";
148 case MLX5_EVENT_TYPE_TEMP_WARN_EVENT:
149 return "MLX5_EVENT_TYPE_TEMP_WARN_EVENT";
150 case MLX5_EVENT_TYPE_REMOTE_CONFIG:
151 return "MLX5_EVENT_TYPE_REMOTE_CONFIG";
152 case MLX5_EVENT_TYPE_DB_BF_CONGESTION:
153 return "MLX5_EVENT_TYPE_DB_BF_CONGESTION";
154 case MLX5_EVENT_TYPE_STALL_EVENT:
155 return "MLX5_EVENT_TYPE_STALL_EVENT";
156 case MLX5_EVENT_TYPE_CMD:
157 return "MLX5_EVENT_TYPE_CMD";
158 case MLX5_EVENT_TYPE_PAGE_REQUEST:
159 return "MLX5_EVENT_TYPE_PAGE_REQUEST";
160 case MLX5_EVENT_TYPE_PAGE_FAULT:
161 return "MLX5_EVENT_TYPE_PAGE_FAULT";
162 case MLX5_EVENT_TYPE_PPS_EVENT:
163 return "MLX5_EVENT_TYPE_PPS_EVENT";
164 case MLX5_EVENT_TYPE_NIC_VPORT_CHANGE:
165 return "MLX5_EVENT_TYPE_NIC_VPORT_CHANGE";
166 case MLX5_EVENT_TYPE_FPGA_ERROR:
167 return "MLX5_EVENT_TYPE_FPGA_ERROR";
168 case MLX5_EVENT_TYPE_FPGA_QP_ERROR:
169 return "MLX5_EVENT_TYPE_FPGA_QP_ERROR";
170 case MLX5_EVENT_TYPE_GENERAL_EVENT:
171 return "MLX5_EVENT_TYPE_GENERAL_EVENT";
172 case MLX5_EVENT_TYPE_DEVICE_TRACER:
173 return "MLX5_EVENT_TYPE_DEVICE_TRACER";
175 return "Unrecognized event";
179 static enum mlx5_dev_event port_subtype_event(u8 subtype)
182 case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
183 return MLX5_DEV_EVENT_PORT_DOWN;
184 case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
185 return MLX5_DEV_EVENT_PORT_UP;
186 case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED:
187 return MLX5_DEV_EVENT_PORT_INITIALIZED;
188 case MLX5_PORT_CHANGE_SUBTYPE_LID:
189 return MLX5_DEV_EVENT_LID_CHANGE;
190 case MLX5_PORT_CHANGE_SUBTYPE_PKEY:
191 return MLX5_DEV_EVENT_PKEY_CHANGE;
192 case MLX5_PORT_CHANGE_SUBTYPE_GUID:
193 return MLX5_DEV_EVENT_GUID_CHANGE;
194 case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG:
195 return MLX5_DEV_EVENT_CLIENT_REREG;
200 static void eq_update_ci(struct mlx5_eq *eq, int arm)
202 __be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2);
203 u32 val = (eq->cons_index & 0xffffff) | (eq->eqn << 24);
205 __raw_writel((__force u32)cpu_to_be32(val), addr);
206 /* We still want ordering, just not swabbing, so add a barrier */
210 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
211 static void eqe_pf_action(struct work_struct *work)
213 struct mlx5_pagefault *pfault = container_of(work,
214 struct mlx5_pagefault,
216 struct mlx5_eq *eq = pfault->eq;
218 mlx5_core_page_fault(eq->dev, pfault);
219 mempool_free(pfault, eq->pf_ctx.pool);
222 static void eq_pf_process(struct mlx5_eq *eq)
224 struct mlx5_core_dev *dev = eq->dev;
225 struct mlx5_eqe_page_fault *pf_eqe;
226 struct mlx5_pagefault *pfault;
227 struct mlx5_eqe *eqe;
230 while ((eqe = next_eqe_sw(eq))) {
231 pfault = mempool_alloc(eq->pf_ctx.pool, GFP_ATOMIC);
233 schedule_work(&eq->pf_ctx.work);
238 pf_eqe = &eqe->data.page_fault;
239 pfault->event_subtype = eqe->sub_type;
240 pfault->bytes_committed = be32_to_cpu(pf_eqe->bytes_committed);
243 "PAGE_FAULT: subtype: 0x%02x, bytes_committed: 0x%06x\n",
244 eqe->sub_type, pfault->bytes_committed);
246 switch (eqe->sub_type) {
247 case MLX5_PFAULT_SUBTYPE_RDMA:
248 /* RDMA based event */
250 be32_to_cpu(pf_eqe->rdma.pftype_token) >> 24;
252 be32_to_cpu(pf_eqe->rdma.pftype_token) &
255 be32_to_cpu(pf_eqe->rdma.r_key);
256 pfault->rdma.packet_size =
257 be16_to_cpu(pf_eqe->rdma.packet_length);
258 pfault->rdma.rdma_op_len =
259 be32_to_cpu(pf_eqe->rdma.rdma_op_len);
260 pfault->rdma.rdma_va =
261 be64_to_cpu(pf_eqe->rdma.rdma_va);
263 "PAGE_FAULT: type:0x%x, token: 0x%06x, r_key: 0x%08x\n",
264 pfault->type, pfault->token,
267 "PAGE_FAULT: rdma_op_len: 0x%08x, rdma_va: 0x%016llx\n",
268 pfault->rdma.rdma_op_len,
269 pfault->rdma.rdma_va);
272 case MLX5_PFAULT_SUBTYPE_WQE:
273 /* WQE based event */
275 be32_to_cpu(pf_eqe->wqe.pftype_wq) >> 24;
277 be32_to_cpu(pf_eqe->wqe.token);
279 be32_to_cpu(pf_eqe->wqe.pftype_wq) &
281 pfault->wqe.wqe_index =
282 be16_to_cpu(pf_eqe->wqe.wqe_index);
283 pfault->wqe.packet_size =
284 be16_to_cpu(pf_eqe->wqe.packet_length);
286 "PAGE_FAULT: type:0x%x, token: 0x%06x, wq_num: 0x%06x, wqe_index: 0x%04x\n",
287 pfault->type, pfault->token,
289 pfault->wqe.wqe_index);
294 "Unsupported page fault event sub-type: 0x%02hhx\n",
296 /* Unsupported page faults should still be
297 * resolved by the page fault handler
302 INIT_WORK(&pfault->work, eqe_pf_action);
303 queue_work(eq->pf_ctx.wq, &pfault->work);
308 if (unlikely(set_ci >= MLX5_NUM_SPARE_EQE)) {
317 static irqreturn_t mlx5_eq_pf_int(int irq, void *eq_ptr)
319 struct mlx5_eq *eq = eq_ptr;
322 if (spin_trylock_irqsave(&eq->pf_ctx.lock, flags)) {
324 spin_unlock_irqrestore(&eq->pf_ctx.lock, flags);
326 schedule_work(&eq->pf_ctx.work);
332 /* mempool_refill() was proposed but unfortunately wasn't accepted
333 * http://lkml.iu.edu/hypermail/linux/kernel/1512.1/05073.html
336 static void mempool_refill(mempool_t *pool)
338 while (pool->curr_nr < pool->min_nr)
339 mempool_free(mempool_alloc(pool, GFP_KERNEL), pool);
342 static void eq_pf_action(struct work_struct *work)
344 struct mlx5_eq *eq = container_of(work, struct mlx5_eq, pf_ctx.work);
346 mempool_refill(eq->pf_ctx.pool);
348 spin_lock_irq(&eq->pf_ctx.lock);
350 spin_unlock_irq(&eq->pf_ctx.lock);
353 static int init_pf_ctx(struct mlx5_eq_pagefault *pf_ctx, const char *name)
355 spin_lock_init(&pf_ctx->lock);
356 INIT_WORK(&pf_ctx->work, eq_pf_action);
358 pf_ctx->wq = alloc_ordered_workqueue(name,
363 pf_ctx->pool = mempool_create_kmalloc_pool
364 (MLX5_NUM_PF_DRAIN, sizeof(struct mlx5_pagefault));
370 destroy_workqueue(pf_ctx->wq);
374 int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 token,
375 u32 wq_num, u8 type, int error)
377 u32 out[MLX5_ST_SZ_DW(page_fault_resume_out)] = {0};
378 u32 in[MLX5_ST_SZ_DW(page_fault_resume_in)] = {0};
380 MLX5_SET(page_fault_resume_in, in, opcode,
381 MLX5_CMD_OP_PAGE_FAULT_RESUME);
382 MLX5_SET(page_fault_resume_in, in, error, !!error);
383 MLX5_SET(page_fault_resume_in, in, page_fault_type, type);
384 MLX5_SET(page_fault_resume_in, in, wq_number, wq_num);
385 MLX5_SET(page_fault_resume_in, in, token, token);
387 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
389 EXPORT_SYMBOL_GPL(mlx5_core_page_fault_resume);
392 static void general_event_handler(struct mlx5_core_dev *dev,
393 struct mlx5_eqe *eqe)
395 switch (eqe->sub_type) {
396 case MLX5_GENERAL_SUBTYPE_DELAY_DROP_TIMEOUT:
398 dev->event(dev, MLX5_DEV_EVENT_DELAY_DROP_TIMEOUT, 0);
401 mlx5_core_dbg(dev, "General event with unrecognized subtype: sub_type %d\n",
406 static void mlx5_temp_warning_event(struct mlx5_core_dev *dev,
407 struct mlx5_eqe *eqe)
412 value_lsb = be64_to_cpu(eqe->data.temp_warning.sensor_warning_lsb);
413 value_msb = be64_to_cpu(eqe->data.temp_warning.sensor_warning_msb);
416 "High temperature on sensors with bit set %llx %llx",
417 value_msb, value_lsb);
420 /* caller must eventually call mlx5_cq_put on the returned cq */
421 static struct mlx5_core_cq *mlx5_eq_cq_get(struct mlx5_eq *eq, u32 cqn)
423 struct mlx5_cq_table *table = &eq->cq_table;
424 struct mlx5_core_cq *cq = NULL;
426 spin_lock(&table->lock);
427 cq = radix_tree_lookup(&table->tree, cqn);
430 spin_unlock(&table->lock);
435 static void mlx5_eq_cq_completion(struct mlx5_eq *eq, u32 cqn)
437 struct mlx5_core_cq *cq = mlx5_eq_cq_get(eq, cqn);
440 mlx5_core_warn(eq->dev, "Completion event for bogus CQ 0x%x\n", cqn);
451 static void mlx5_eq_cq_event(struct mlx5_eq *eq, u32 cqn, int event_type)
453 struct mlx5_core_cq *cq = mlx5_eq_cq_get(eq, cqn);
456 mlx5_core_warn(eq->dev, "Async event for bogus CQ 0x%x\n", cqn);
460 cq->event(cq, event_type);
465 static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr)
467 struct mlx5_eq *eq = eq_ptr;
468 struct mlx5_core_dev *dev = eq->dev;
469 struct mlx5_eqe *eqe;
475 while ((eqe = next_eqe_sw(eq))) {
477 * Make sure we read EQ entry contents after we've
478 * checked the ownership bit.
482 mlx5_core_dbg(eq->dev, "eqn %d, eqe type %s\n",
483 eq->eqn, eqe_type_str(eqe->type));
485 case MLX5_EVENT_TYPE_COMP:
486 cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff;
487 mlx5_eq_cq_completion(eq, cqn);
489 case MLX5_EVENT_TYPE_DCT_DRAINED:
490 rsn = be32_to_cpu(eqe->data.dct.dctn) & 0xffffff;
491 rsn |= (MLX5_RES_DCT << MLX5_USER_INDEX_LEN);
492 mlx5_rsc_event(dev, rsn, eqe->type);
494 case MLX5_EVENT_TYPE_PATH_MIG:
495 case MLX5_EVENT_TYPE_COMM_EST:
496 case MLX5_EVENT_TYPE_SQ_DRAINED:
497 case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
498 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
499 case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
500 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
501 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
502 rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
503 rsn |= (eqe->data.qp_srq.type << MLX5_USER_INDEX_LEN);
504 mlx5_core_dbg(dev, "event %s(%d) arrived on resource 0x%x\n",
505 eqe_type_str(eqe->type), eqe->type, rsn);
506 mlx5_rsc_event(dev, rsn, eqe->type);
509 case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
510 case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
511 rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
512 mlx5_core_dbg(dev, "SRQ event %s(%d): srqn 0x%x\n",
513 eqe_type_str(eqe->type), eqe->type, rsn);
514 mlx5_srq_event(dev, rsn, eqe->type);
517 case MLX5_EVENT_TYPE_CMD:
518 mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector), false);
521 case MLX5_EVENT_TYPE_PORT_CHANGE:
522 port = (eqe->data.port.port >> 4) & 0xf;
523 switch (eqe->sub_type) {
524 case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
525 case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
526 case MLX5_PORT_CHANGE_SUBTYPE_LID:
527 case MLX5_PORT_CHANGE_SUBTYPE_PKEY:
528 case MLX5_PORT_CHANGE_SUBTYPE_GUID:
529 case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG:
530 case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED:
532 dev->event(dev, port_subtype_event(eqe->sub_type),
533 (unsigned long)port);
536 mlx5_core_warn(dev, "Port event with unrecognized subtype: port %d, sub_type %d\n",
537 port, eqe->sub_type);
540 case MLX5_EVENT_TYPE_CQ_ERROR:
541 cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
542 mlx5_core_warn(dev, "CQ error on CQN 0x%x, syndrome 0x%x\n",
543 cqn, eqe->data.cq_err.syndrome);
544 mlx5_eq_cq_event(eq, cqn, eqe->type);
547 case MLX5_EVENT_TYPE_PAGE_REQUEST:
549 u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id);
550 s32 npages = be32_to_cpu(eqe->data.req_pages.num_pages);
552 mlx5_core_dbg(dev, "page request for func 0x%x, npages %d\n",
554 mlx5_core_req_pages_handler(dev, func_id, npages);
558 case MLX5_EVENT_TYPE_NIC_VPORT_CHANGE:
559 mlx5_eswitch_vport_event(dev->priv.eswitch, eqe);
562 case MLX5_EVENT_TYPE_PORT_MODULE_EVENT:
563 mlx5_port_module_event(dev, eqe);
566 case MLX5_EVENT_TYPE_PPS_EVENT:
567 mlx5_pps_event(dev, eqe);
570 case MLX5_EVENT_TYPE_FPGA_ERROR:
571 case MLX5_EVENT_TYPE_FPGA_QP_ERROR:
572 mlx5_fpga_event(dev, eqe->type, &eqe->data.raw);
575 case MLX5_EVENT_TYPE_TEMP_WARN_EVENT:
576 mlx5_temp_warning_event(dev, eqe);
579 case MLX5_EVENT_TYPE_GENERAL_EVENT:
580 general_event_handler(dev, eqe);
583 case MLX5_EVENT_TYPE_DEVICE_TRACER:
584 mlx5_fw_tracer_event(dev, eqe);
588 mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n",
596 /* The HCA will think the queue has overflowed if we
597 * don't tell it we've been processing events. We
598 * create our EQs with MLX5_NUM_SPARE_EQE extra
599 * entries, so we must update our consumer index at
602 if (unlikely(set_ci >= MLX5_NUM_SPARE_EQE)) {
611 tasklet_schedule(&eq->tasklet_ctx.task);
616 /* Some architectures don't latch interrupts when they are disabled, so using
617 * mlx5_eq_poll_irq_disabled could end up losing interrupts while trying to
618 * avoid losing them. It is not recommended to use it, unless this is the last
621 u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq *eq)
625 disable_irq(eq->irqn);
626 count_eqe = eq->cons_index;
627 mlx5_eq_int(eq->irqn, eq);
628 count_eqe = eq->cons_index - count_eqe;
629 enable_irq(eq->irqn);
634 static void init_eq_buf(struct mlx5_eq *eq)
636 struct mlx5_eqe *eqe;
639 for (i = 0; i < eq->nent; i++) {
640 eqe = get_eqe(eq, i);
641 eqe->owner = MLX5_EQE_OWNER_INIT_VAL;
645 int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
646 int nent, u64 mask, const char *name,
647 enum mlx5_eq_type type)
649 struct mlx5_cq_table *cq_table = &eq->cq_table;
650 u32 out[MLX5_ST_SZ_DW(create_eq_out)] = {0};
651 struct mlx5_priv *priv = &dev->priv;
652 irq_handler_t handler;
660 memset(cq_table, 0, sizeof(*cq_table));
661 spin_lock_init(&cq_table->lock);
662 INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC);
665 eq->nent = roundup_pow_of_two(nent + MLX5_NUM_SPARE_EQE);
667 err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, &eq->buf);
671 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
672 if (type == MLX5_EQ_TYPE_PF)
673 handler = mlx5_eq_pf_int;
676 handler = mlx5_eq_int;
680 inlen = MLX5_ST_SZ_BYTES(create_eq_in) +
681 MLX5_FLD_SZ_BYTES(create_eq_in, pas[0]) * eq->buf.npages;
683 in = kvzalloc(inlen, GFP_KERNEL);
689 pas = (__be64 *)MLX5_ADDR_OF(create_eq_in, in, pas);
690 mlx5_fill_page_array(&eq->buf, pas);
692 MLX5_SET(create_eq_in, in, opcode, MLX5_CMD_OP_CREATE_EQ);
693 MLX5_SET64(create_eq_in, in, event_bitmask, mask);
695 eqc = MLX5_ADDR_OF(create_eq_in, in, eq_context_entry);
696 MLX5_SET(eqc, eqc, log_eq_size, ilog2(eq->nent));
697 MLX5_SET(eqc, eqc, uar_page, priv->uar->index);
698 MLX5_SET(eqc, eqc, intr, vecidx);
699 MLX5_SET(eqc, eqc, log_page_size,
700 eq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
702 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
706 snprintf(priv->irq_info[vecidx].name, MLX5_MAX_IRQ_NAME, "%s@pci:%s",
707 name, pci_name(dev->pdev));
709 eq->eqn = MLX5_GET(create_eq_out, out, eq_number);
710 eq->irqn = pci_irq_vector(dev->pdev, vecidx);
712 eq->doorbell = priv->uar->map + MLX5_EQ_DOORBEL_OFFSET;
713 err = request_irq(eq->irqn, handler, 0,
714 priv->irq_info[vecidx].name, eq);
718 err = mlx5_debug_eq_add(dev, eq);
722 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
723 if (type == MLX5_EQ_TYPE_PF) {
724 err = init_pf_ctx(&eq->pf_ctx, name);
730 INIT_LIST_HEAD(&eq->tasklet_ctx.list);
731 INIT_LIST_HEAD(&eq->tasklet_ctx.process_list);
732 spin_lock_init(&eq->tasklet_ctx.lock);
733 tasklet_init(&eq->tasklet_ctx.task, mlx5_cq_tasklet_cb,
734 (unsigned long)&eq->tasklet_ctx);
737 /* EQs are created in ARMED state
745 free_irq(eq->irqn, eq);
748 mlx5_cmd_destroy_eq(dev, eq->eqn);
754 mlx5_buf_free(dev, &eq->buf);
758 int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
762 mlx5_debug_eq_remove(dev, eq);
763 free_irq(eq->irqn, eq);
764 err = mlx5_cmd_destroy_eq(dev, eq->eqn);
766 mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n",
768 synchronize_irq(eq->irqn);
770 if (eq->type == MLX5_EQ_TYPE_COMP) {
771 tasklet_disable(&eq->tasklet_ctx.task);
772 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
773 } else if (eq->type == MLX5_EQ_TYPE_PF) {
774 cancel_work_sync(&eq->pf_ctx.work);
775 destroy_workqueue(eq->pf_ctx.wq);
776 mempool_destroy(eq->pf_ctx.pool);
779 mlx5_buf_free(dev, &eq->buf);
784 int mlx5_eq_add_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
786 struct mlx5_cq_table *table = &eq->cq_table;
789 spin_lock_irq(&table->lock);
790 err = radix_tree_insert(&table->tree, cq->cqn, cq);
791 spin_unlock_irq(&table->lock);
796 int mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
798 struct mlx5_cq_table *table = &eq->cq_table;
799 struct mlx5_core_cq *tmp;
801 spin_lock_irq(&table->lock);
802 tmp = radix_tree_delete(&table->tree, cq->cqn);
803 spin_unlock_irq(&table->lock);
806 mlx5_core_warn(eq->dev, "cq 0x%x not found in eq 0x%x tree\n", eq->eqn, cq->cqn);
811 mlx5_core_warn(eq->dev, "corruption on cqn 0x%x in eq 0x%x\n", eq->eqn, cq->cqn);
818 int mlx5_eq_init(struct mlx5_core_dev *dev)
822 spin_lock_init(&dev->priv.eq_table.lock);
824 err = mlx5_eq_debugfs_init(dev);
829 void mlx5_eq_cleanup(struct mlx5_core_dev *dev)
831 mlx5_eq_debugfs_cleanup(dev);
834 int mlx5_start_eqs(struct mlx5_core_dev *dev)
836 struct mlx5_eq_table *table = &dev->priv.eq_table;
837 u64 async_event_mask = MLX5_ASYNC_EVENT_MASK;
840 if (MLX5_VPORT_MANAGER(dev))
841 async_event_mask |= (1ull << MLX5_EVENT_TYPE_NIC_VPORT_CHANGE);
843 if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH &&
844 MLX5_CAP_GEN(dev, general_notification_event))
845 async_event_mask |= (1ull << MLX5_EVENT_TYPE_GENERAL_EVENT);
847 if (MLX5_CAP_GEN(dev, port_module_event))
848 async_event_mask |= (1ull << MLX5_EVENT_TYPE_PORT_MODULE_EVENT);
850 mlx5_core_dbg(dev, "port_module_event is not set\n");
852 if (MLX5_PPS_CAP(dev))
853 async_event_mask |= (1ull << MLX5_EVENT_TYPE_PPS_EVENT);
855 if (MLX5_CAP_GEN(dev, fpga))
856 async_event_mask |= (1ull << MLX5_EVENT_TYPE_FPGA_ERROR) |
857 (1ull << MLX5_EVENT_TYPE_FPGA_QP_ERROR);
858 if (MLX5_CAP_GEN_MAX(dev, dct))
859 async_event_mask |= (1ull << MLX5_EVENT_TYPE_DCT_DRAINED);
861 if (MLX5_CAP_GEN(dev, temp_warn_event))
862 async_event_mask |= (1ull << MLX5_EVENT_TYPE_TEMP_WARN_EVENT);
864 if (MLX5_CAP_MCAM_REG(dev, tracer_registers))
865 async_event_mask |= (1ull << MLX5_EVENT_TYPE_DEVICE_TRACER);
867 err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD,
868 MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD,
869 "mlx5_cmd_eq", MLX5_EQ_TYPE_ASYNC);
871 mlx5_core_warn(dev, "failed to create cmd EQ %d\n", err);
875 mlx5_cmd_use_events(dev);
877 err = mlx5_create_map_eq(dev, &table->async_eq, MLX5_EQ_VEC_ASYNC,
878 MLX5_NUM_ASYNC_EQE, async_event_mask,
879 "mlx5_async_eq", MLX5_EQ_TYPE_ASYNC);
881 mlx5_core_warn(dev, "failed to create async EQ %d\n", err);
885 err = mlx5_create_map_eq(dev, &table->pages_eq,
887 /* TODO: sriov max_vf + */ 1,
888 1 << MLX5_EVENT_TYPE_PAGE_REQUEST, "mlx5_pages_eq",
891 mlx5_core_warn(dev, "failed to create pages EQ %d\n", err);
895 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
896 if (MLX5_CAP_GEN(dev, pg)) {
897 err = mlx5_create_map_eq(dev, &table->pfault_eq,
900 1 << MLX5_EVENT_TYPE_PAGE_FAULT,
901 "mlx5_page_fault_eq",
904 mlx5_core_warn(dev, "failed to create page fault EQ %d\n",
912 mlx5_destroy_unmap_eq(dev, &table->pages_eq);
918 mlx5_destroy_unmap_eq(dev, &table->async_eq);
921 mlx5_cmd_use_polling(dev);
922 mlx5_destroy_unmap_eq(dev, &table->cmd_eq);
926 void mlx5_stop_eqs(struct mlx5_core_dev *dev)
928 struct mlx5_eq_table *table = &dev->priv.eq_table;
931 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
932 if (MLX5_CAP_GEN(dev, pg)) {
933 err = mlx5_destroy_unmap_eq(dev, &table->pfault_eq);
935 mlx5_core_err(dev, "failed to destroy page fault eq, err(%d)\n",
940 err = mlx5_destroy_unmap_eq(dev, &table->pages_eq);
942 mlx5_core_err(dev, "failed to destroy pages eq, err(%d)\n",
945 err = mlx5_destroy_unmap_eq(dev, &table->async_eq);
947 mlx5_core_err(dev, "failed to destroy async eq, err(%d)\n",
949 mlx5_cmd_use_polling(dev);
951 err = mlx5_destroy_unmap_eq(dev, &table->cmd_eq);
953 mlx5_core_err(dev, "failed to destroy command eq, err(%d)\n",
957 int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
958 u32 *out, int outlen)
960 u32 in[MLX5_ST_SZ_DW(query_eq_in)] = {0};
962 MLX5_SET(query_eq_in, in, opcode, MLX5_CMD_OP_QUERY_EQ);
963 MLX5_SET(query_eq_in, in, eq_number, eq->eqn);
964 return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
967 /* This function should only be called after mlx5_cmd_force_teardown_hca */
968 void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev)
970 struct mlx5_eq_table *table = &dev->priv.eq_table;
973 #ifdef CONFIG_RFS_ACCEL
975 free_irq_cpu_rmap(dev->rmap);
979 list_for_each_entry(eq, &table->comp_eqs_list, list)
980 free_irq(eq->irqn, eq);
982 free_irq(table->pages_eq.irqn, &table->pages_eq);
983 free_irq(table->async_eq.irqn, &table->async_eq);
984 free_irq(table->cmd_eq.irqn, &table->cmd_eq);
985 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
986 if (MLX5_CAP_GEN(dev, pg))
987 free_irq(table->pfault_eq.irqn, &table->pfault_eq);
989 pci_free_irq_vectors(dev->pdev);