2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/interrupt.h>
34 #include <linux/module.h>
35 #include <linux/mlx5/driver.h>
36 #include <linux/mlx5/cmd.h>
37 #ifdef CONFIG_RFS_ACCEL
38 #include <linux/cpu_rmap.h>
40 #include "mlx5_core.h"
41 #include "fpga/core.h"
45 MLX5_EQE_SIZE = sizeof(struct mlx5_eqe),
46 MLX5_EQE_OWNER_INIT_VAL = 0x1,
50 MLX5_EQ_STATE_ARMED = 0x9,
51 MLX5_EQ_STATE_FIRED = 0xa,
52 MLX5_EQ_STATE_ALWAYS_ARMED = 0xb,
56 MLX5_NUM_SPARE_EQE = 0x80,
57 MLX5_NUM_ASYNC_EQE = 0x1000,
58 MLX5_NUM_CMD_EQE = 32,
59 MLX5_NUM_PF_DRAIN = 64,
63 MLX5_EQ_DOORBEL_OFFSET = 0x40,
66 #define MLX5_ASYNC_EVENT_MASK ((1ull << MLX5_EVENT_TYPE_PATH_MIG) | \
67 (1ull << MLX5_EVENT_TYPE_COMM_EST) | \
68 (1ull << MLX5_EVENT_TYPE_SQ_DRAINED) | \
69 (1ull << MLX5_EVENT_TYPE_CQ_ERROR) | \
70 (1ull << MLX5_EVENT_TYPE_WQ_CATAS_ERROR) | \
71 (1ull << MLX5_EVENT_TYPE_PATH_MIG_FAILED) | \
72 (1ull << MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
73 (1ull << MLX5_EVENT_TYPE_WQ_ACCESS_ERROR) | \
74 (1ull << MLX5_EVENT_TYPE_PORT_CHANGE) | \
75 (1ull << MLX5_EVENT_TYPE_SRQ_CATAS_ERROR) | \
76 (1ull << MLX5_EVENT_TYPE_SRQ_LAST_WQE) | \
77 (1ull << MLX5_EVENT_TYPE_SRQ_RQ_LIMIT))
90 static int mlx5_cmd_destroy_eq(struct mlx5_core_dev *dev, u8 eqn)
92 u32 out[MLX5_ST_SZ_DW(destroy_eq_out)] = {0};
93 u32 in[MLX5_ST_SZ_DW(destroy_eq_in)] = {0};
95 MLX5_SET(destroy_eq_in, in, opcode, MLX5_CMD_OP_DESTROY_EQ);
96 MLX5_SET(destroy_eq_in, in, eq_number, eqn);
97 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
100 static struct mlx5_eqe *get_eqe(struct mlx5_eq *eq, u32 entry)
102 return mlx5_buf_offset(&eq->buf, entry * MLX5_EQE_SIZE);
105 static struct mlx5_eqe *next_eqe_sw(struct mlx5_eq *eq)
107 struct mlx5_eqe *eqe = get_eqe(eq, eq->cons_index & (eq->nent - 1));
109 return ((eqe->owner & 1) ^ !!(eq->cons_index & eq->nent)) ? NULL : eqe;
112 static const char *eqe_type_str(u8 type)
115 case MLX5_EVENT_TYPE_COMP:
116 return "MLX5_EVENT_TYPE_COMP";
117 case MLX5_EVENT_TYPE_PATH_MIG:
118 return "MLX5_EVENT_TYPE_PATH_MIG";
119 case MLX5_EVENT_TYPE_COMM_EST:
120 return "MLX5_EVENT_TYPE_COMM_EST";
121 case MLX5_EVENT_TYPE_SQ_DRAINED:
122 return "MLX5_EVENT_TYPE_SQ_DRAINED";
123 case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
124 return "MLX5_EVENT_TYPE_SRQ_LAST_WQE";
125 case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
126 return "MLX5_EVENT_TYPE_SRQ_RQ_LIMIT";
127 case MLX5_EVENT_TYPE_CQ_ERROR:
128 return "MLX5_EVENT_TYPE_CQ_ERROR";
129 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
130 return "MLX5_EVENT_TYPE_WQ_CATAS_ERROR";
131 case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
132 return "MLX5_EVENT_TYPE_PATH_MIG_FAILED";
133 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
134 return "MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR";
135 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
136 return "MLX5_EVENT_TYPE_WQ_ACCESS_ERROR";
137 case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
138 return "MLX5_EVENT_TYPE_SRQ_CATAS_ERROR";
139 case MLX5_EVENT_TYPE_INTERNAL_ERROR:
140 return "MLX5_EVENT_TYPE_INTERNAL_ERROR";
141 case MLX5_EVENT_TYPE_PORT_CHANGE:
142 return "MLX5_EVENT_TYPE_PORT_CHANGE";
143 case MLX5_EVENT_TYPE_GPIO_EVENT:
144 return "MLX5_EVENT_TYPE_GPIO_EVENT";
145 case MLX5_EVENT_TYPE_PORT_MODULE_EVENT:
146 return "MLX5_EVENT_TYPE_PORT_MODULE_EVENT";
147 case MLX5_EVENT_TYPE_TEMP_WARN_EVENT:
148 return "MLX5_EVENT_TYPE_TEMP_WARN_EVENT";
149 case MLX5_EVENT_TYPE_REMOTE_CONFIG:
150 return "MLX5_EVENT_TYPE_REMOTE_CONFIG";
151 case MLX5_EVENT_TYPE_DB_BF_CONGESTION:
152 return "MLX5_EVENT_TYPE_DB_BF_CONGESTION";
153 case MLX5_EVENT_TYPE_STALL_EVENT:
154 return "MLX5_EVENT_TYPE_STALL_EVENT";
155 case MLX5_EVENT_TYPE_CMD:
156 return "MLX5_EVENT_TYPE_CMD";
157 case MLX5_EVENT_TYPE_PAGE_REQUEST:
158 return "MLX5_EVENT_TYPE_PAGE_REQUEST";
159 case MLX5_EVENT_TYPE_PAGE_FAULT:
160 return "MLX5_EVENT_TYPE_PAGE_FAULT";
161 case MLX5_EVENT_TYPE_PPS_EVENT:
162 return "MLX5_EVENT_TYPE_PPS_EVENT";
163 case MLX5_EVENT_TYPE_NIC_VPORT_CHANGE:
164 return "MLX5_EVENT_TYPE_NIC_VPORT_CHANGE";
165 case MLX5_EVENT_TYPE_FPGA_ERROR:
166 return "MLX5_EVENT_TYPE_FPGA_ERROR";
167 case MLX5_EVENT_TYPE_FPGA_QP_ERROR:
168 return "MLX5_EVENT_TYPE_FPGA_QP_ERROR";
169 case MLX5_EVENT_TYPE_GENERAL_EVENT:
170 return "MLX5_EVENT_TYPE_GENERAL_EVENT";
172 return "Unrecognized event";
176 static enum mlx5_dev_event port_subtype_event(u8 subtype)
179 case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
180 return MLX5_DEV_EVENT_PORT_DOWN;
181 case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
182 return MLX5_DEV_EVENT_PORT_UP;
183 case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED:
184 return MLX5_DEV_EVENT_PORT_INITIALIZED;
185 case MLX5_PORT_CHANGE_SUBTYPE_LID:
186 return MLX5_DEV_EVENT_LID_CHANGE;
187 case MLX5_PORT_CHANGE_SUBTYPE_PKEY:
188 return MLX5_DEV_EVENT_PKEY_CHANGE;
189 case MLX5_PORT_CHANGE_SUBTYPE_GUID:
190 return MLX5_DEV_EVENT_GUID_CHANGE;
191 case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG:
192 return MLX5_DEV_EVENT_CLIENT_REREG;
197 static void eq_update_ci(struct mlx5_eq *eq, int arm)
199 __be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2);
200 u32 val = (eq->cons_index & 0xffffff) | (eq->eqn << 24);
202 __raw_writel((__force u32)cpu_to_be32(val), addr);
203 /* We still want ordering, just not swabbing, so add a barrier */
207 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
208 static void eqe_pf_action(struct work_struct *work)
210 struct mlx5_pagefault *pfault = container_of(work,
211 struct mlx5_pagefault,
213 struct mlx5_eq *eq = pfault->eq;
215 mlx5_core_page_fault(eq->dev, pfault);
216 mempool_free(pfault, eq->pf_ctx.pool);
219 static void eq_pf_process(struct mlx5_eq *eq)
221 struct mlx5_core_dev *dev = eq->dev;
222 struct mlx5_eqe_page_fault *pf_eqe;
223 struct mlx5_pagefault *pfault;
224 struct mlx5_eqe *eqe;
227 while ((eqe = next_eqe_sw(eq))) {
228 pfault = mempool_alloc(eq->pf_ctx.pool, GFP_ATOMIC);
230 schedule_work(&eq->pf_ctx.work);
235 pf_eqe = &eqe->data.page_fault;
236 pfault->event_subtype = eqe->sub_type;
237 pfault->bytes_committed = be32_to_cpu(pf_eqe->bytes_committed);
240 "PAGE_FAULT: subtype: 0x%02x, bytes_committed: 0x%06x\n",
241 eqe->sub_type, pfault->bytes_committed);
243 switch (eqe->sub_type) {
244 case MLX5_PFAULT_SUBTYPE_RDMA:
245 /* RDMA based event */
247 be32_to_cpu(pf_eqe->rdma.pftype_token) >> 24;
249 be32_to_cpu(pf_eqe->rdma.pftype_token) &
252 be32_to_cpu(pf_eqe->rdma.r_key);
253 pfault->rdma.packet_size =
254 be16_to_cpu(pf_eqe->rdma.packet_length);
255 pfault->rdma.rdma_op_len =
256 be32_to_cpu(pf_eqe->rdma.rdma_op_len);
257 pfault->rdma.rdma_va =
258 be64_to_cpu(pf_eqe->rdma.rdma_va);
260 "PAGE_FAULT: type:0x%x, token: 0x%06x, r_key: 0x%08x\n",
261 pfault->type, pfault->token,
264 "PAGE_FAULT: rdma_op_len: 0x%08x, rdma_va: 0x%016llx\n",
265 pfault->rdma.rdma_op_len,
266 pfault->rdma.rdma_va);
269 case MLX5_PFAULT_SUBTYPE_WQE:
270 /* WQE based event */
272 be32_to_cpu(pf_eqe->wqe.pftype_wq) >> 24;
274 be32_to_cpu(pf_eqe->wqe.token);
276 be32_to_cpu(pf_eqe->wqe.pftype_wq) &
278 pfault->wqe.wqe_index =
279 be16_to_cpu(pf_eqe->wqe.wqe_index);
280 pfault->wqe.packet_size =
281 be16_to_cpu(pf_eqe->wqe.packet_length);
283 "PAGE_FAULT: type:0x%x, token: 0x%06x, wq_num: 0x%06x, wqe_index: 0x%04x\n",
284 pfault->type, pfault->token,
286 pfault->wqe.wqe_index);
291 "Unsupported page fault event sub-type: 0x%02hhx\n",
293 /* Unsupported page faults should still be
294 * resolved by the page fault handler
299 INIT_WORK(&pfault->work, eqe_pf_action);
300 queue_work(eq->pf_ctx.wq, &pfault->work);
305 if (unlikely(set_ci >= MLX5_NUM_SPARE_EQE)) {
314 static irqreturn_t mlx5_eq_pf_int(int irq, void *eq_ptr)
316 struct mlx5_eq *eq = eq_ptr;
319 if (spin_trylock_irqsave(&eq->pf_ctx.lock, flags)) {
321 spin_unlock_irqrestore(&eq->pf_ctx.lock, flags);
323 schedule_work(&eq->pf_ctx.work);
329 /* mempool_refill() was proposed but unfortunately wasn't accepted
330 * http://lkml.iu.edu/hypermail/linux/kernel/1512.1/05073.html
333 static void mempool_refill(mempool_t *pool)
335 while (pool->curr_nr < pool->min_nr)
336 mempool_free(mempool_alloc(pool, GFP_KERNEL), pool);
339 static void eq_pf_action(struct work_struct *work)
341 struct mlx5_eq *eq = container_of(work, struct mlx5_eq, pf_ctx.work);
343 mempool_refill(eq->pf_ctx.pool);
345 spin_lock_irq(&eq->pf_ctx.lock);
347 spin_unlock_irq(&eq->pf_ctx.lock);
350 static int init_pf_ctx(struct mlx5_eq_pagefault *pf_ctx, const char *name)
352 spin_lock_init(&pf_ctx->lock);
353 INIT_WORK(&pf_ctx->work, eq_pf_action);
355 pf_ctx->wq = alloc_ordered_workqueue(name,
360 pf_ctx->pool = mempool_create_kmalloc_pool
361 (MLX5_NUM_PF_DRAIN, sizeof(struct mlx5_pagefault));
367 destroy_workqueue(pf_ctx->wq);
371 int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 token,
372 u32 wq_num, u8 type, int error)
374 u32 out[MLX5_ST_SZ_DW(page_fault_resume_out)] = {0};
375 u32 in[MLX5_ST_SZ_DW(page_fault_resume_in)] = {0};
377 MLX5_SET(page_fault_resume_in, in, opcode,
378 MLX5_CMD_OP_PAGE_FAULT_RESUME);
379 MLX5_SET(page_fault_resume_in, in, error, !!error);
380 MLX5_SET(page_fault_resume_in, in, page_fault_type, type);
381 MLX5_SET(page_fault_resume_in, in, wq_number, wq_num);
382 MLX5_SET(page_fault_resume_in, in, token, token);
384 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
386 EXPORT_SYMBOL_GPL(mlx5_core_page_fault_resume);
389 static void general_event_handler(struct mlx5_core_dev *dev,
390 struct mlx5_eqe *eqe)
392 switch (eqe->sub_type) {
393 case MLX5_GENERAL_SUBTYPE_DELAY_DROP_TIMEOUT:
395 dev->event(dev, MLX5_DEV_EVENT_DELAY_DROP_TIMEOUT, 0);
398 mlx5_core_dbg(dev, "General event with unrecognized subtype: sub_type %d\n",
403 static void mlx5_temp_warning_event(struct mlx5_core_dev *dev,
404 struct mlx5_eqe *eqe)
409 value_lsb = be64_to_cpu(eqe->data.temp_warning.sensor_warning_lsb);
410 value_msb = be64_to_cpu(eqe->data.temp_warning.sensor_warning_msb);
413 "High temperature on sensors with bit set %llx %llx",
414 value_msb, value_lsb);
417 /* caller must eventually call mlx5_cq_put on the returned cq */
418 static struct mlx5_core_cq *mlx5_eq_cq_get(struct mlx5_eq *eq, u32 cqn)
420 struct mlx5_cq_table *table = &eq->cq_table;
421 struct mlx5_core_cq *cq = NULL;
423 spin_lock(&table->lock);
424 cq = radix_tree_lookup(&table->tree, cqn);
427 spin_unlock(&table->lock);
432 static void mlx5_eq_cq_completion(struct mlx5_eq *eq, u32 cqn)
434 struct mlx5_core_cq *cq = mlx5_eq_cq_get(eq, cqn);
437 mlx5_core_warn(eq->dev, "Completion event for bogus CQ 0x%x\n", cqn);
448 static void mlx5_eq_cq_event(struct mlx5_eq *eq, u32 cqn, int event_type)
450 struct mlx5_core_cq *cq = mlx5_eq_cq_get(eq, cqn);
453 mlx5_core_warn(eq->dev, "Async event for bogus CQ 0x%x\n", cqn);
457 cq->event(cq, event_type);
462 static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr)
464 struct mlx5_eq *eq = eq_ptr;
465 struct mlx5_core_dev *dev = eq->dev;
466 struct mlx5_eqe *eqe;
472 while ((eqe = next_eqe_sw(eq))) {
474 * Make sure we read EQ entry contents after we've
475 * checked the ownership bit.
479 mlx5_core_dbg(eq->dev, "eqn %d, eqe type %s\n",
480 eq->eqn, eqe_type_str(eqe->type));
482 case MLX5_EVENT_TYPE_COMP:
483 cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff;
484 mlx5_eq_cq_completion(eq, cqn);
486 case MLX5_EVENT_TYPE_DCT_DRAINED:
487 rsn = be32_to_cpu(eqe->data.dct.dctn) & 0xffffff;
488 rsn |= (MLX5_RES_DCT << MLX5_USER_INDEX_LEN);
489 mlx5_rsc_event(dev, rsn, eqe->type);
491 case MLX5_EVENT_TYPE_PATH_MIG:
492 case MLX5_EVENT_TYPE_COMM_EST:
493 case MLX5_EVENT_TYPE_SQ_DRAINED:
494 case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
495 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
496 case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
497 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
498 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
499 rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
500 rsn |= (eqe->data.qp_srq.type << MLX5_USER_INDEX_LEN);
501 mlx5_core_dbg(dev, "event %s(%d) arrived on resource 0x%x\n",
502 eqe_type_str(eqe->type), eqe->type, rsn);
503 mlx5_rsc_event(dev, rsn, eqe->type);
506 case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
507 case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
508 rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
509 mlx5_core_dbg(dev, "SRQ event %s(%d): srqn 0x%x\n",
510 eqe_type_str(eqe->type), eqe->type, rsn);
511 mlx5_srq_event(dev, rsn, eqe->type);
514 case MLX5_EVENT_TYPE_CMD:
515 mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector), false);
518 case MLX5_EVENT_TYPE_PORT_CHANGE:
519 port = (eqe->data.port.port >> 4) & 0xf;
520 switch (eqe->sub_type) {
521 case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
522 case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
523 case MLX5_PORT_CHANGE_SUBTYPE_LID:
524 case MLX5_PORT_CHANGE_SUBTYPE_PKEY:
525 case MLX5_PORT_CHANGE_SUBTYPE_GUID:
526 case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG:
527 case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED:
529 dev->event(dev, port_subtype_event(eqe->sub_type),
530 (unsigned long)port);
533 mlx5_core_warn(dev, "Port event with unrecognized subtype: port %d, sub_type %d\n",
534 port, eqe->sub_type);
537 case MLX5_EVENT_TYPE_CQ_ERROR:
538 cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
539 mlx5_core_warn(dev, "CQ error on CQN 0x%x, syndrome 0x%x\n",
540 cqn, eqe->data.cq_err.syndrome);
541 mlx5_eq_cq_event(eq, cqn, eqe->type);
544 case MLX5_EVENT_TYPE_PAGE_REQUEST:
546 u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id);
547 s32 npages = be32_to_cpu(eqe->data.req_pages.num_pages);
549 mlx5_core_dbg(dev, "page request for func 0x%x, npages %d\n",
551 mlx5_core_req_pages_handler(dev, func_id, npages);
555 case MLX5_EVENT_TYPE_NIC_VPORT_CHANGE:
556 mlx5_eswitch_vport_event(dev->priv.eswitch, eqe);
559 case MLX5_EVENT_TYPE_PORT_MODULE_EVENT:
560 mlx5_port_module_event(dev, eqe);
563 case MLX5_EVENT_TYPE_PPS_EVENT:
564 mlx5_pps_event(dev, eqe);
567 case MLX5_EVENT_TYPE_FPGA_ERROR:
568 case MLX5_EVENT_TYPE_FPGA_QP_ERROR:
569 mlx5_fpga_event(dev, eqe->type, &eqe->data.raw);
572 case MLX5_EVENT_TYPE_TEMP_WARN_EVENT:
573 mlx5_temp_warning_event(dev, eqe);
576 case MLX5_EVENT_TYPE_GENERAL_EVENT:
577 general_event_handler(dev, eqe);
580 mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n",
588 /* The HCA will think the queue has overflowed if we
589 * don't tell it we've been processing events. We
590 * create our EQs with MLX5_NUM_SPARE_EQE extra
591 * entries, so we must update our consumer index at
594 if (unlikely(set_ci >= MLX5_NUM_SPARE_EQE)) {
603 tasklet_schedule(&eq->tasklet_ctx.task);
608 /* Some architectures don't latch interrupts when they are disabled, so using
609 * mlx5_eq_poll_irq_disabled could end up losing interrupts while trying to
610 * avoid losing them. It is not recommended to use it, unless this is the last
613 u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq *eq)
617 disable_irq(eq->irqn);
618 count_eqe = eq->cons_index;
619 mlx5_eq_int(eq->irqn, eq);
620 count_eqe = eq->cons_index - count_eqe;
621 enable_irq(eq->irqn);
626 static void init_eq_buf(struct mlx5_eq *eq)
628 struct mlx5_eqe *eqe;
631 for (i = 0; i < eq->nent; i++) {
632 eqe = get_eqe(eq, i);
633 eqe->owner = MLX5_EQE_OWNER_INIT_VAL;
637 int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
638 int nent, u64 mask, const char *name,
639 enum mlx5_eq_type type)
641 struct mlx5_cq_table *cq_table = &eq->cq_table;
642 u32 out[MLX5_ST_SZ_DW(create_eq_out)] = {0};
643 struct mlx5_priv *priv = &dev->priv;
644 irq_handler_t handler;
652 memset(cq_table, 0, sizeof(*cq_table));
653 spin_lock_init(&cq_table->lock);
654 INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC);
657 eq->nent = roundup_pow_of_two(nent + MLX5_NUM_SPARE_EQE);
659 err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, &eq->buf);
663 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
664 if (type == MLX5_EQ_TYPE_PF)
665 handler = mlx5_eq_pf_int;
668 handler = mlx5_eq_int;
672 inlen = MLX5_ST_SZ_BYTES(create_eq_in) +
673 MLX5_FLD_SZ_BYTES(create_eq_in, pas[0]) * eq->buf.npages;
675 in = kvzalloc(inlen, GFP_KERNEL);
681 pas = (__be64 *)MLX5_ADDR_OF(create_eq_in, in, pas);
682 mlx5_fill_page_array(&eq->buf, pas);
684 MLX5_SET(create_eq_in, in, opcode, MLX5_CMD_OP_CREATE_EQ);
685 MLX5_SET64(create_eq_in, in, event_bitmask, mask);
687 eqc = MLX5_ADDR_OF(create_eq_in, in, eq_context_entry);
688 MLX5_SET(eqc, eqc, log_eq_size, ilog2(eq->nent));
689 MLX5_SET(eqc, eqc, uar_page, priv->uar->index);
690 MLX5_SET(eqc, eqc, intr, vecidx);
691 MLX5_SET(eqc, eqc, log_page_size,
692 eq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
694 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
698 snprintf(priv->irq_info[vecidx].name, MLX5_MAX_IRQ_NAME, "%s@pci:%s",
699 name, pci_name(dev->pdev));
701 eq->eqn = MLX5_GET(create_eq_out, out, eq_number);
702 eq->irqn = pci_irq_vector(dev->pdev, vecidx);
704 eq->doorbell = priv->uar->map + MLX5_EQ_DOORBEL_OFFSET;
705 err = request_irq(eq->irqn, handler, 0,
706 priv->irq_info[vecidx].name, eq);
710 err = mlx5_debug_eq_add(dev, eq);
714 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
715 if (type == MLX5_EQ_TYPE_PF) {
716 err = init_pf_ctx(&eq->pf_ctx, name);
722 INIT_LIST_HEAD(&eq->tasklet_ctx.list);
723 INIT_LIST_HEAD(&eq->tasklet_ctx.process_list);
724 spin_lock_init(&eq->tasklet_ctx.lock);
725 tasklet_init(&eq->tasklet_ctx.task, mlx5_cq_tasklet_cb,
726 (unsigned long)&eq->tasklet_ctx);
729 /* EQs are created in ARMED state
737 free_irq(eq->irqn, eq);
740 mlx5_cmd_destroy_eq(dev, eq->eqn);
746 mlx5_buf_free(dev, &eq->buf);
750 int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
754 mlx5_debug_eq_remove(dev, eq);
755 free_irq(eq->irqn, eq);
756 err = mlx5_cmd_destroy_eq(dev, eq->eqn);
758 mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n",
760 synchronize_irq(eq->irqn);
762 if (eq->type == MLX5_EQ_TYPE_COMP) {
763 tasklet_disable(&eq->tasklet_ctx.task);
764 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
765 } else if (eq->type == MLX5_EQ_TYPE_PF) {
766 cancel_work_sync(&eq->pf_ctx.work);
767 destroy_workqueue(eq->pf_ctx.wq);
768 mempool_destroy(eq->pf_ctx.pool);
771 mlx5_buf_free(dev, &eq->buf);
776 int mlx5_eq_add_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
778 struct mlx5_cq_table *table = &eq->cq_table;
781 spin_lock_irq(&table->lock);
782 err = radix_tree_insert(&table->tree, cq->cqn, cq);
783 spin_unlock_irq(&table->lock);
788 int mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
790 struct mlx5_cq_table *table = &eq->cq_table;
791 struct mlx5_core_cq *tmp;
793 spin_lock_irq(&table->lock);
794 tmp = radix_tree_delete(&table->tree, cq->cqn);
795 spin_unlock_irq(&table->lock);
798 mlx5_core_warn(eq->dev, "cq 0x%x not found in eq 0x%x tree\n", eq->eqn, cq->cqn);
803 mlx5_core_warn(eq->dev, "corruption on cqn 0x%x in eq 0x%x\n", eq->eqn, cq->cqn);
810 int mlx5_eq_init(struct mlx5_core_dev *dev)
814 spin_lock_init(&dev->priv.eq_table.lock);
816 err = mlx5_eq_debugfs_init(dev);
821 void mlx5_eq_cleanup(struct mlx5_core_dev *dev)
823 mlx5_eq_debugfs_cleanup(dev);
826 int mlx5_start_eqs(struct mlx5_core_dev *dev)
828 struct mlx5_eq_table *table = &dev->priv.eq_table;
829 u64 async_event_mask = MLX5_ASYNC_EVENT_MASK;
832 if (MLX5_VPORT_MANAGER(dev))
833 async_event_mask |= (1ull << MLX5_EVENT_TYPE_NIC_VPORT_CHANGE);
835 if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH &&
836 MLX5_CAP_GEN(dev, general_notification_event))
837 async_event_mask |= (1ull << MLX5_EVENT_TYPE_GENERAL_EVENT);
839 if (MLX5_CAP_GEN(dev, port_module_event))
840 async_event_mask |= (1ull << MLX5_EVENT_TYPE_PORT_MODULE_EVENT);
842 mlx5_core_dbg(dev, "port_module_event is not set\n");
844 if (MLX5_PPS_CAP(dev))
845 async_event_mask |= (1ull << MLX5_EVENT_TYPE_PPS_EVENT);
847 if (MLX5_CAP_GEN(dev, fpga))
848 async_event_mask |= (1ull << MLX5_EVENT_TYPE_FPGA_ERROR) |
849 (1ull << MLX5_EVENT_TYPE_FPGA_QP_ERROR);
850 if (MLX5_CAP_GEN_MAX(dev, dct))
851 async_event_mask |= (1ull << MLX5_EVENT_TYPE_DCT_DRAINED);
853 if (MLX5_CAP_GEN(dev, temp_warn_event))
854 async_event_mask |= (1ull << MLX5_EVENT_TYPE_TEMP_WARN_EVENT);
856 err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD,
857 MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD,
858 "mlx5_cmd_eq", MLX5_EQ_TYPE_ASYNC);
860 mlx5_core_warn(dev, "failed to create cmd EQ %d\n", err);
864 mlx5_cmd_use_events(dev);
866 err = mlx5_create_map_eq(dev, &table->async_eq, MLX5_EQ_VEC_ASYNC,
867 MLX5_NUM_ASYNC_EQE, async_event_mask,
868 "mlx5_async_eq", MLX5_EQ_TYPE_ASYNC);
870 mlx5_core_warn(dev, "failed to create async EQ %d\n", err);
874 err = mlx5_create_map_eq(dev, &table->pages_eq,
876 /* TODO: sriov max_vf + */ 1,
877 1 << MLX5_EVENT_TYPE_PAGE_REQUEST, "mlx5_pages_eq",
880 mlx5_core_warn(dev, "failed to create pages EQ %d\n", err);
884 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
885 if (MLX5_CAP_GEN(dev, pg)) {
886 err = mlx5_create_map_eq(dev, &table->pfault_eq,
889 1 << MLX5_EVENT_TYPE_PAGE_FAULT,
890 "mlx5_page_fault_eq",
893 mlx5_core_warn(dev, "failed to create page fault EQ %d\n",
901 mlx5_destroy_unmap_eq(dev, &table->pages_eq);
907 mlx5_destroy_unmap_eq(dev, &table->async_eq);
910 mlx5_cmd_use_polling(dev);
911 mlx5_destroy_unmap_eq(dev, &table->cmd_eq);
915 void mlx5_stop_eqs(struct mlx5_core_dev *dev)
917 struct mlx5_eq_table *table = &dev->priv.eq_table;
920 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
921 if (MLX5_CAP_GEN(dev, pg)) {
922 err = mlx5_destroy_unmap_eq(dev, &table->pfault_eq);
924 mlx5_core_err(dev, "failed to destroy page fault eq, err(%d)\n",
929 err = mlx5_destroy_unmap_eq(dev, &table->pages_eq);
931 mlx5_core_err(dev, "failed to destroy pages eq, err(%d)\n",
934 err = mlx5_destroy_unmap_eq(dev, &table->async_eq);
936 mlx5_core_err(dev, "failed to destroy async eq, err(%d)\n",
938 mlx5_cmd_use_polling(dev);
940 err = mlx5_destroy_unmap_eq(dev, &table->cmd_eq);
942 mlx5_core_err(dev, "failed to destroy command eq, err(%d)\n",
946 int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
947 u32 *out, int outlen)
949 u32 in[MLX5_ST_SZ_DW(query_eq_in)] = {0};
951 MLX5_SET(query_eq_in, in, opcode, MLX5_CMD_OP_QUERY_EQ);
952 MLX5_SET(query_eq_in, in, eq_number, eq->eqn);
953 return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
956 /* This function should only be called after mlx5_cmd_force_teardown_hca */
957 void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev)
959 struct mlx5_eq_table *table = &dev->priv.eq_table;
962 #ifdef CONFIG_RFS_ACCEL
964 free_irq_cpu_rmap(dev->rmap);
968 list_for_each_entry(eq, &table->comp_eqs_list, list)
969 free_irq(eq->irqn, eq);
971 free_irq(table->pages_eq.irqn, &table->pages_eq);
972 free_irq(table->async_eq.irqn, &table->async_eq);
973 free_irq(table->cmd_eq.irqn, &table->cmd_eq);
974 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
975 if (MLX5_CAP_GEN(dev, pg))
976 free_irq(table->pfault_eq.irqn, &table->pfault_eq);
978 pci_free_irq_vectors(dev->pdev);