1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/export.h>
8 #include <linux/device.h>
10 #include <linux/interrupt.h>
11 #include <linux/wait.h>
12 #include <linux/types.h>
13 #include <linux/skbuff.h>
14 #include <linux/if_vlan.h>
15 #include <linux/log2.h>
16 #include <linux/string.h>
23 #include "resources.h"
25 #define mlxsw_pci_write32(mlxsw_pci, reg, val) \
26 iowrite32be(val, (mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg))
27 #define mlxsw_pci_read32(mlxsw_pci, reg) \
28 ioread32be((mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg))
30 enum mlxsw_pci_queue_type {
31 MLXSW_PCI_QUEUE_TYPE_SDQ,
32 MLXSW_PCI_QUEUE_TYPE_RDQ,
33 MLXSW_PCI_QUEUE_TYPE_CQ,
34 MLXSW_PCI_QUEUE_TYPE_EQ,
37 #define MLXSW_PCI_QUEUE_TYPE_COUNT 4
39 static const u16 mlxsw_pci_doorbell_type_offset[] = {
40 MLXSW_PCI_DOORBELL_SDQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_SDQ */
41 MLXSW_PCI_DOORBELL_RDQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_RDQ */
42 MLXSW_PCI_DOORBELL_CQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_CQ */
43 MLXSW_PCI_DOORBELL_EQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_EQ */
46 static const u16 mlxsw_pci_doorbell_arm_type_offset[] = {
49 MLXSW_PCI_DOORBELL_ARM_CQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_CQ */
50 MLXSW_PCI_DOORBELL_ARM_EQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_EQ */
53 struct mlxsw_pci_mem_item {
59 struct mlxsw_pci_queue_elem_info {
60 char *elem; /* pointer to actual dma mapped element mem chunk */
71 struct mlxsw_pci_queue {
72 spinlock_t lock; /* for queue accesses */
73 struct mlxsw_pci_mem_item mem_item;
74 struct mlxsw_pci_queue_elem_info *elem_info;
77 u16 count; /* number of elements in queue */
78 u8 num; /* queue number */
79 u8 elem_size; /* size of one element */
80 enum mlxsw_pci_queue_type type;
81 struct tasklet_struct tasklet; /* queue processing tasklet */
82 struct mlxsw_pci *pci;
87 enum mlxsw_pci_cqe_v v;
97 struct mlxsw_pci_queue_type_group {
98 struct mlxsw_pci_queue *q;
99 u8 count; /* number of queues in group */
103 struct pci_dev *pdev;
105 struct mlxsw_pci_queue_type_group queues[MLXSW_PCI_QUEUE_TYPE_COUNT];
107 struct mlxsw_core *core;
109 struct mlxsw_pci_mem_item *items;
113 struct mlxsw_pci_mem_item out_mbox;
114 struct mlxsw_pci_mem_item in_mbox;
115 struct mutex lock; /* Lock access to command registers */
117 wait_queue_head_t wait;
124 struct mlxsw_bus_info bus_info;
125 const struct pci_device_id *id;
126 enum mlxsw_pci_cqe_v max_cqe_ver; /* Maximal supported CQE version */
127 u8 num_sdq_cqs; /* Number of CQs used for SDQs */
130 static void mlxsw_pci_queue_tasklet_schedule(struct mlxsw_pci_queue *q)
132 tasklet_schedule(&q->tasklet);
135 static char *__mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q,
136 size_t elem_size, int elem_index)
138 return q->mem_item.buf + (elem_size * elem_index);
141 static struct mlxsw_pci_queue_elem_info *
142 mlxsw_pci_queue_elem_info_get(struct mlxsw_pci_queue *q, int elem_index)
144 return &q->elem_info[elem_index];
147 static struct mlxsw_pci_queue_elem_info *
148 mlxsw_pci_queue_elem_info_producer_get(struct mlxsw_pci_queue *q)
150 int index = q->producer_counter & (q->count - 1);
152 if ((u16) (q->producer_counter - q->consumer_counter) == q->count)
154 return mlxsw_pci_queue_elem_info_get(q, index);
157 static struct mlxsw_pci_queue_elem_info *
158 mlxsw_pci_queue_elem_info_consumer_get(struct mlxsw_pci_queue *q)
160 int index = q->consumer_counter & (q->count - 1);
162 return mlxsw_pci_queue_elem_info_get(q, index);
165 static char *mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q, int elem_index)
167 return mlxsw_pci_queue_elem_info_get(q, elem_index)->elem;
170 static bool mlxsw_pci_elem_hw_owned(struct mlxsw_pci_queue *q, bool owner_bit)
172 return owner_bit != !!(q->consumer_counter & q->count);
175 static struct mlxsw_pci_queue_type_group *
176 mlxsw_pci_queue_type_group_get(struct mlxsw_pci *mlxsw_pci,
177 enum mlxsw_pci_queue_type q_type)
179 return &mlxsw_pci->queues[q_type];
182 static u8 __mlxsw_pci_queue_count(struct mlxsw_pci *mlxsw_pci,
183 enum mlxsw_pci_queue_type q_type)
185 struct mlxsw_pci_queue_type_group *queue_group;
187 queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_type);
188 return queue_group->count;
191 static u8 mlxsw_pci_sdq_count(struct mlxsw_pci *mlxsw_pci)
193 return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_SDQ);
196 static u8 mlxsw_pci_cq_count(struct mlxsw_pci *mlxsw_pci)
198 return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_CQ);
201 static struct mlxsw_pci_queue *
202 __mlxsw_pci_queue_get(struct mlxsw_pci *mlxsw_pci,
203 enum mlxsw_pci_queue_type q_type, u8 q_num)
205 return &mlxsw_pci->queues[q_type].q[q_num];
208 static struct mlxsw_pci_queue *mlxsw_pci_sdq_get(struct mlxsw_pci *mlxsw_pci,
211 return __mlxsw_pci_queue_get(mlxsw_pci,
212 MLXSW_PCI_QUEUE_TYPE_SDQ, q_num);
215 static struct mlxsw_pci_queue *mlxsw_pci_rdq_get(struct mlxsw_pci *mlxsw_pci,
218 return __mlxsw_pci_queue_get(mlxsw_pci,
219 MLXSW_PCI_QUEUE_TYPE_RDQ, q_num);
222 static struct mlxsw_pci_queue *mlxsw_pci_cq_get(struct mlxsw_pci *mlxsw_pci,
225 return __mlxsw_pci_queue_get(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_CQ, q_num);
228 static struct mlxsw_pci_queue *mlxsw_pci_eq_get(struct mlxsw_pci *mlxsw_pci,
231 return __mlxsw_pci_queue_get(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_EQ, q_num);
234 static void __mlxsw_pci_queue_doorbell_set(struct mlxsw_pci *mlxsw_pci,
235 struct mlxsw_pci_queue *q,
238 mlxsw_pci_write32(mlxsw_pci,
239 DOORBELL(mlxsw_pci->doorbell_offset,
240 mlxsw_pci_doorbell_type_offset[q->type],
244 static void __mlxsw_pci_queue_doorbell_arm_set(struct mlxsw_pci *mlxsw_pci,
245 struct mlxsw_pci_queue *q,
248 mlxsw_pci_write32(mlxsw_pci,
249 DOORBELL(mlxsw_pci->doorbell_offset,
250 mlxsw_pci_doorbell_arm_type_offset[q->type],
254 static void mlxsw_pci_queue_doorbell_producer_ring(struct mlxsw_pci *mlxsw_pci,
255 struct mlxsw_pci_queue *q)
257 wmb(); /* ensure all writes are done before we ring a bell */
258 __mlxsw_pci_queue_doorbell_set(mlxsw_pci, q, q->producer_counter);
261 static void mlxsw_pci_queue_doorbell_consumer_ring(struct mlxsw_pci *mlxsw_pci,
262 struct mlxsw_pci_queue *q)
264 wmb(); /* ensure all writes are done before we ring a bell */
265 __mlxsw_pci_queue_doorbell_set(mlxsw_pci, q,
266 q->consumer_counter + q->count);
270 mlxsw_pci_queue_doorbell_arm_consumer_ring(struct mlxsw_pci *mlxsw_pci,
271 struct mlxsw_pci_queue *q)
273 wmb(); /* ensure all writes are done before we ring a bell */
274 __mlxsw_pci_queue_doorbell_arm_set(mlxsw_pci, q, q->consumer_counter);
277 static dma_addr_t __mlxsw_pci_queue_page_get(struct mlxsw_pci_queue *q,
280 return q->mem_item.mapaddr + MLXSW_PCI_PAGE_SIZE * page_index;
283 static int mlxsw_pci_sdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
284 struct mlxsw_pci_queue *q)
289 q->producer_counter = 0;
290 q->consumer_counter = 0;
292 /* Set CQ of same number of this SDQ. */
293 mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, q->num);
294 mlxsw_cmd_mbox_sw2hw_dq_sdq_tclass_set(mbox, 3);
295 mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */
296 for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
297 dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
299 mlxsw_cmd_mbox_sw2hw_dq_pa_set(mbox, i, mapaddr);
302 err = mlxsw_cmd_sw2hw_sdq(mlxsw_pci->core, mbox, q->num);
305 mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
309 static void mlxsw_pci_sdq_fini(struct mlxsw_pci *mlxsw_pci,
310 struct mlxsw_pci_queue *q)
312 mlxsw_cmd_hw2sw_sdq(mlxsw_pci->core, q->num);
315 static int mlxsw_pci_wqe_frag_map(struct mlxsw_pci *mlxsw_pci, char *wqe,
316 int index, char *frag_data, size_t frag_len,
319 struct pci_dev *pdev = mlxsw_pci->pdev;
322 mapaddr = pci_map_single(pdev, frag_data, frag_len, direction);
323 if (unlikely(pci_dma_mapping_error(pdev, mapaddr))) {
324 dev_err_ratelimited(&pdev->dev, "failed to dma map tx frag\n");
327 mlxsw_pci_wqe_address_set(wqe, index, mapaddr);
328 mlxsw_pci_wqe_byte_count_set(wqe, index, frag_len);
332 static void mlxsw_pci_wqe_frag_unmap(struct mlxsw_pci *mlxsw_pci, char *wqe,
333 int index, int direction)
335 struct pci_dev *pdev = mlxsw_pci->pdev;
336 size_t frag_len = mlxsw_pci_wqe_byte_count_get(wqe, index);
337 dma_addr_t mapaddr = mlxsw_pci_wqe_address_get(wqe, index);
341 pci_unmap_single(pdev, mapaddr, frag_len, direction);
344 static int mlxsw_pci_rdq_skb_alloc(struct mlxsw_pci *mlxsw_pci,
345 struct mlxsw_pci_queue_elem_info *elem_info)
347 size_t buf_len = MLXSW_PORT_MAX_MTU;
348 char *wqe = elem_info->elem;
352 elem_info->u.rdq.skb = NULL;
353 skb = netdev_alloc_skb_ip_align(NULL, buf_len);
357 /* Assume that wqe was previously zeroed. */
359 err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data,
360 buf_len, DMA_FROM_DEVICE);
364 elem_info->u.rdq.skb = skb;
368 dev_kfree_skb_any(skb);
372 static void mlxsw_pci_rdq_skb_free(struct mlxsw_pci *mlxsw_pci,
373 struct mlxsw_pci_queue_elem_info *elem_info)
378 skb = elem_info->u.rdq.skb;
379 wqe = elem_info->elem;
381 mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE);
382 dev_kfree_skb_any(skb);
385 static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
386 struct mlxsw_pci_queue *q)
388 struct mlxsw_pci_queue_elem_info *elem_info;
389 u8 sdq_count = mlxsw_pci_sdq_count(mlxsw_pci);
393 q->producer_counter = 0;
394 q->consumer_counter = 0;
396 /* Set CQ of same number of this RDQ with base
397 * above SDQ count as the lower ones are assigned to SDQs.
399 mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, sdq_count + q->num);
400 mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */
401 for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
402 dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
404 mlxsw_cmd_mbox_sw2hw_dq_pa_set(mbox, i, mapaddr);
407 err = mlxsw_cmd_sw2hw_rdq(mlxsw_pci->core, mbox, q->num);
411 mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
413 for (i = 0; i < q->count; i++) {
414 elem_info = mlxsw_pci_queue_elem_info_producer_get(q);
416 err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info);
419 /* Everything is set up, ring doorbell to pass elem to HW */
420 q->producer_counter++;
421 mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
427 for (i--; i >= 0; i--) {
428 elem_info = mlxsw_pci_queue_elem_info_get(q, i);
429 mlxsw_pci_rdq_skb_free(mlxsw_pci, elem_info);
431 mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num);
436 static void mlxsw_pci_rdq_fini(struct mlxsw_pci *mlxsw_pci,
437 struct mlxsw_pci_queue *q)
439 struct mlxsw_pci_queue_elem_info *elem_info;
442 mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num);
443 for (i = 0; i < q->count; i++) {
444 elem_info = mlxsw_pci_queue_elem_info_get(q, i);
445 mlxsw_pci_rdq_skb_free(mlxsw_pci, elem_info);
449 static void mlxsw_pci_cq_pre_init(struct mlxsw_pci *mlxsw_pci,
450 struct mlxsw_pci_queue *q)
452 q->u.cq.v = mlxsw_pci->max_cqe_ver;
454 /* For SDQ it is pointless to use CQEv2, so use CQEv1 instead */
455 if (q->u.cq.v == MLXSW_PCI_CQE_V2 &&
456 q->num < mlxsw_pci->num_sdq_cqs)
457 q->u.cq.v = MLXSW_PCI_CQE_V1;
460 static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
461 struct mlxsw_pci_queue *q)
466 q->consumer_counter = 0;
468 for (i = 0; i < q->count; i++) {
469 char *elem = mlxsw_pci_queue_elem_get(q, i);
471 mlxsw_pci_cqe_owner_set(q->u.cq.v, elem, 1);
474 if (q->u.cq.v == MLXSW_PCI_CQE_V1)
475 mlxsw_cmd_mbox_sw2hw_cq_cqe_ver_set(mbox,
476 MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_1);
477 else if (q->u.cq.v == MLXSW_PCI_CQE_V2)
478 mlxsw_cmd_mbox_sw2hw_cq_cqe_ver_set(mbox,
479 MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_2);
481 mlxsw_cmd_mbox_sw2hw_cq_c_eqn_set(mbox, MLXSW_PCI_EQ_COMP_NUM);
482 mlxsw_cmd_mbox_sw2hw_cq_st_set(mbox, 0);
483 mlxsw_cmd_mbox_sw2hw_cq_log_cq_size_set(mbox, ilog2(q->count));
484 for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
485 dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
487 mlxsw_cmd_mbox_sw2hw_cq_pa_set(mbox, i, mapaddr);
489 err = mlxsw_cmd_sw2hw_cq(mlxsw_pci->core, mbox, q->num);
492 mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
493 mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
497 static void mlxsw_pci_cq_fini(struct mlxsw_pci *mlxsw_pci,
498 struct mlxsw_pci_queue *q)
500 mlxsw_cmd_hw2sw_cq(mlxsw_pci->core, q->num);
503 static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci,
504 struct mlxsw_pci_queue *q,
505 u16 consumer_counter_limit,
508 struct pci_dev *pdev = mlxsw_pci->pdev;
509 struct mlxsw_pci_queue_elem_info *elem_info;
515 elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
516 skb = elem_info->u.sdq.skb;
517 wqe = elem_info->elem;
518 for (i = 0; i < MLXSW_PCI_WQE_SG_ENTRIES; i++)
519 mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, i, DMA_TO_DEVICE);
520 dev_kfree_skb_any(skb);
521 elem_info->u.sdq.skb = NULL;
523 if (q->consumer_counter++ != consumer_counter_limit)
524 dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in SDQ\n");
525 spin_unlock(&q->lock);
528 static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
529 struct mlxsw_pci_queue *q,
530 u16 consumer_counter_limit,
531 enum mlxsw_pci_cqe_v cqe_v, char *cqe)
533 struct pci_dev *pdev = mlxsw_pci->pdev;
534 struct mlxsw_pci_queue_elem_info *elem_info;
537 struct mlxsw_rx_info rx_info;
541 elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
542 skb = elem_info->u.sdq.skb;
545 wqe = elem_info->elem;
546 mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE);
548 if (q->consumer_counter++ != consumer_counter_limit)
549 dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in RDQ\n");
551 if (mlxsw_pci_cqe_lag_get(cqe_v, cqe)) {
552 rx_info.is_lag = true;
553 rx_info.u.lag_id = mlxsw_pci_cqe_lag_id_get(cqe_v, cqe);
554 rx_info.lag_port_index =
555 mlxsw_pci_cqe_lag_subport_get(cqe_v, cqe);
557 rx_info.is_lag = false;
558 rx_info.u.sys_port = mlxsw_pci_cqe_system_port_get(cqe);
561 rx_info.trap_id = mlxsw_pci_cqe_trap_id_get(cqe);
563 byte_count = mlxsw_pci_cqe_byte_count_get(cqe);
564 if (mlxsw_pci_cqe_crc_get(cqe_v, cqe))
565 byte_count -= ETH_FCS_LEN;
566 skb_put(skb, byte_count);
567 mlxsw_core_skb_receive(mlxsw_pci->core, skb, &rx_info);
569 memset(wqe, 0, q->elem_size);
570 err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info);
572 dev_dbg_ratelimited(&pdev->dev, "Failed to alloc skb for RDQ\n");
573 /* Everything is set up, ring doorbell to pass elem to HW */
574 q->producer_counter++;
575 mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
579 static char *mlxsw_pci_cq_sw_cqe_get(struct mlxsw_pci_queue *q)
581 struct mlxsw_pci_queue_elem_info *elem_info;
585 elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
586 elem = elem_info->elem;
587 owner_bit = mlxsw_pci_cqe_owner_get(q->u.cq.v, elem);
588 if (mlxsw_pci_elem_hw_owned(q, owner_bit))
590 q->consumer_counter++;
591 rmb(); /* make sure we read owned bit before the rest of elem */
595 static void mlxsw_pci_cq_tasklet(unsigned long data)
597 struct mlxsw_pci_queue *q = (struct mlxsw_pci_queue *) data;
598 struct mlxsw_pci *mlxsw_pci = q->pci;
601 int credits = q->count >> 1;
603 while ((cqe = mlxsw_pci_cq_sw_cqe_get(q))) {
604 u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(cqe);
605 u8 sendq = mlxsw_pci_cqe_sr_get(q->u.cq.v, cqe);
606 u8 dqn = mlxsw_pci_cqe_dqn_get(q->u.cq.v, cqe);
607 char ncqe[MLXSW_PCI_CQE_SIZE_MAX];
609 memcpy(ncqe, cqe, q->elem_size);
610 mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
613 struct mlxsw_pci_queue *sdq;
615 sdq = mlxsw_pci_sdq_get(mlxsw_pci, dqn);
616 mlxsw_pci_cqe_sdq_handle(mlxsw_pci, sdq,
618 q->u.cq.comp_sdq_count++;
620 struct mlxsw_pci_queue *rdq;
622 rdq = mlxsw_pci_rdq_get(mlxsw_pci, dqn);
623 mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq,
624 wqe_counter, q->u.cq.v, ncqe);
625 q->u.cq.comp_rdq_count++;
627 if (++items == credits)
631 mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
634 static u16 mlxsw_pci_cq_elem_count(const struct mlxsw_pci_queue *q)
636 return q->u.cq.v == MLXSW_PCI_CQE_V2 ? MLXSW_PCI_CQE2_COUNT :
637 MLXSW_PCI_CQE01_COUNT;
640 static u8 mlxsw_pci_cq_elem_size(const struct mlxsw_pci_queue *q)
642 return q->u.cq.v == MLXSW_PCI_CQE_V2 ? MLXSW_PCI_CQE2_SIZE :
643 MLXSW_PCI_CQE01_SIZE;
646 static int mlxsw_pci_eq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
647 struct mlxsw_pci_queue *q)
652 q->consumer_counter = 0;
654 for (i = 0; i < q->count; i++) {
655 char *elem = mlxsw_pci_queue_elem_get(q, i);
657 mlxsw_pci_eqe_owner_set(elem, 1);
660 mlxsw_cmd_mbox_sw2hw_eq_int_msix_set(mbox, 1); /* MSI-X used */
661 mlxsw_cmd_mbox_sw2hw_eq_st_set(mbox, 1); /* armed */
662 mlxsw_cmd_mbox_sw2hw_eq_log_eq_size_set(mbox, ilog2(q->count));
663 for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
664 dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
666 mlxsw_cmd_mbox_sw2hw_eq_pa_set(mbox, i, mapaddr);
668 err = mlxsw_cmd_sw2hw_eq(mlxsw_pci->core, mbox, q->num);
671 mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
672 mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
676 static void mlxsw_pci_eq_fini(struct mlxsw_pci *mlxsw_pci,
677 struct mlxsw_pci_queue *q)
679 mlxsw_cmd_hw2sw_eq(mlxsw_pci->core, q->num);
682 static void mlxsw_pci_eq_cmd_event(struct mlxsw_pci *mlxsw_pci, char *eqe)
684 mlxsw_pci->cmd.comp.status = mlxsw_pci_eqe_cmd_status_get(eqe);
685 mlxsw_pci->cmd.comp.out_param =
686 ((u64) mlxsw_pci_eqe_cmd_out_param_h_get(eqe)) << 32 |
687 mlxsw_pci_eqe_cmd_out_param_l_get(eqe);
688 mlxsw_pci->cmd.wait_done = true;
689 wake_up(&mlxsw_pci->cmd.wait);
692 static char *mlxsw_pci_eq_sw_eqe_get(struct mlxsw_pci_queue *q)
694 struct mlxsw_pci_queue_elem_info *elem_info;
698 elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
699 elem = elem_info->elem;
700 owner_bit = mlxsw_pci_eqe_owner_get(elem);
701 if (mlxsw_pci_elem_hw_owned(q, owner_bit))
703 q->consumer_counter++;
704 rmb(); /* make sure we read owned bit before the rest of elem */
708 static void mlxsw_pci_eq_tasklet(unsigned long data)
710 struct mlxsw_pci_queue *q = (struct mlxsw_pci_queue *) data;
711 struct mlxsw_pci *mlxsw_pci = q->pci;
712 u8 cq_count = mlxsw_pci_cq_count(mlxsw_pci);
713 unsigned long active_cqns[BITS_TO_LONGS(MLXSW_PCI_CQS_MAX)];
716 bool cq_handle = false;
718 int credits = q->count >> 1;
720 memset(&active_cqns, 0, sizeof(active_cqns));
722 while ((eqe = mlxsw_pci_eq_sw_eqe_get(q))) {
724 /* Command interface completion events are always received on
725 * queue MLXSW_PCI_EQ_ASYNC_NUM (EQ0) and completion events
726 * are mapped to queue MLXSW_PCI_EQ_COMP_NUM (EQ1).
729 case MLXSW_PCI_EQ_ASYNC_NUM:
730 mlxsw_pci_eq_cmd_event(mlxsw_pci, eqe);
731 q->u.eq.ev_cmd_count++;
733 case MLXSW_PCI_EQ_COMP_NUM:
734 cqn = mlxsw_pci_eqe_cqn_get(eqe);
735 set_bit(cqn, active_cqns);
737 q->u.eq.ev_comp_count++;
740 q->u.eq.ev_other_count++;
742 if (++items == credits)
746 mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
747 mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
752 for_each_set_bit(cqn, active_cqns, cq_count) {
753 q = mlxsw_pci_cq_get(mlxsw_pci, cqn);
754 mlxsw_pci_queue_tasklet_schedule(q);
758 struct mlxsw_pci_queue_ops {
760 enum mlxsw_pci_queue_type type;
761 void (*pre_init)(struct mlxsw_pci *mlxsw_pci,
762 struct mlxsw_pci_queue *q);
763 int (*init)(struct mlxsw_pci *mlxsw_pci, char *mbox,
764 struct mlxsw_pci_queue *q);
765 void (*fini)(struct mlxsw_pci *mlxsw_pci,
766 struct mlxsw_pci_queue *q);
767 void (*tasklet)(unsigned long data);
768 u16 (*elem_count_f)(const struct mlxsw_pci_queue *q);
769 u8 (*elem_size_f)(const struct mlxsw_pci_queue *q);
774 static const struct mlxsw_pci_queue_ops mlxsw_pci_sdq_ops = {
775 .type = MLXSW_PCI_QUEUE_TYPE_SDQ,
776 .init = mlxsw_pci_sdq_init,
777 .fini = mlxsw_pci_sdq_fini,
778 .elem_count = MLXSW_PCI_WQE_COUNT,
779 .elem_size = MLXSW_PCI_WQE_SIZE,
782 static const struct mlxsw_pci_queue_ops mlxsw_pci_rdq_ops = {
783 .type = MLXSW_PCI_QUEUE_TYPE_RDQ,
784 .init = mlxsw_pci_rdq_init,
785 .fini = mlxsw_pci_rdq_fini,
786 .elem_count = MLXSW_PCI_WQE_COUNT,
787 .elem_size = MLXSW_PCI_WQE_SIZE
790 static const struct mlxsw_pci_queue_ops mlxsw_pci_cq_ops = {
791 .type = MLXSW_PCI_QUEUE_TYPE_CQ,
792 .pre_init = mlxsw_pci_cq_pre_init,
793 .init = mlxsw_pci_cq_init,
794 .fini = mlxsw_pci_cq_fini,
795 .tasklet = mlxsw_pci_cq_tasklet,
796 .elem_count_f = mlxsw_pci_cq_elem_count,
797 .elem_size_f = mlxsw_pci_cq_elem_size
800 static const struct mlxsw_pci_queue_ops mlxsw_pci_eq_ops = {
801 .type = MLXSW_PCI_QUEUE_TYPE_EQ,
802 .init = mlxsw_pci_eq_init,
803 .fini = mlxsw_pci_eq_fini,
804 .tasklet = mlxsw_pci_eq_tasklet,
805 .elem_count = MLXSW_PCI_EQE_COUNT,
806 .elem_size = MLXSW_PCI_EQE_SIZE
809 static int mlxsw_pci_queue_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
810 const struct mlxsw_pci_queue_ops *q_ops,
811 struct mlxsw_pci_queue *q, u8 q_num)
813 struct mlxsw_pci_mem_item *mem_item = &q->mem_item;
819 q_ops->pre_init(mlxsw_pci, q);
821 spin_lock_init(&q->lock);
822 q->count = q_ops->elem_count_f ? q_ops->elem_count_f(q) :
824 q->elem_size = q_ops->elem_size_f ? q_ops->elem_size_f(q) :
826 q->type = q_ops->type;
830 tasklet_init(&q->tasklet, q_ops->tasklet, (unsigned long) q);
832 mem_item->size = MLXSW_PCI_AQ_SIZE;
833 mem_item->buf = pci_alloc_consistent(mlxsw_pci->pdev,
838 memset(mem_item->buf, 0, mem_item->size);
840 q->elem_info = kcalloc(q->count, sizeof(*q->elem_info), GFP_KERNEL);
843 goto err_elem_info_alloc;
846 /* Initialize dma mapped elements info elem_info for
847 * future easy access.
849 for (i = 0; i < q->count; i++) {
850 struct mlxsw_pci_queue_elem_info *elem_info;
852 elem_info = mlxsw_pci_queue_elem_info_get(q, i);
854 __mlxsw_pci_queue_elem_get(q, q->elem_size, i);
857 mlxsw_cmd_mbox_zero(mbox);
858 err = q_ops->init(mlxsw_pci, mbox, q);
866 pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
867 mem_item->buf, mem_item->mapaddr);
871 static void mlxsw_pci_queue_fini(struct mlxsw_pci *mlxsw_pci,
872 const struct mlxsw_pci_queue_ops *q_ops,
873 struct mlxsw_pci_queue *q)
875 struct mlxsw_pci_mem_item *mem_item = &q->mem_item;
877 q_ops->fini(mlxsw_pci, q);
879 pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
880 mem_item->buf, mem_item->mapaddr);
883 static int mlxsw_pci_queue_group_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
884 const struct mlxsw_pci_queue_ops *q_ops,
887 struct mlxsw_pci_queue_type_group *queue_group;
891 queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_ops->type);
892 queue_group->q = kcalloc(num_qs, sizeof(*queue_group->q), GFP_KERNEL);
896 for (i = 0; i < num_qs; i++) {
897 err = mlxsw_pci_queue_init(mlxsw_pci, mbox, q_ops,
898 &queue_group->q[i], i);
902 queue_group->count = num_qs;
907 for (i--; i >= 0; i--)
908 mlxsw_pci_queue_fini(mlxsw_pci, q_ops, &queue_group->q[i]);
909 kfree(queue_group->q);
913 static void mlxsw_pci_queue_group_fini(struct mlxsw_pci *mlxsw_pci,
914 const struct mlxsw_pci_queue_ops *q_ops)
916 struct mlxsw_pci_queue_type_group *queue_group;
919 queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_ops->type);
920 for (i = 0; i < queue_group->count; i++)
921 mlxsw_pci_queue_fini(mlxsw_pci, q_ops, &queue_group->q[i]);
922 kfree(queue_group->q);
925 static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox)
927 struct pci_dev *pdev = mlxsw_pci->pdev;
939 mlxsw_cmd_mbox_zero(mbox);
940 err = mlxsw_cmd_query_aq_cap(mlxsw_pci->core, mbox);
944 num_sdqs = mlxsw_cmd_mbox_query_aq_cap_max_num_sdqs_get(mbox);
945 sdq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_sdq_sz_get(mbox);
946 num_rdqs = mlxsw_cmd_mbox_query_aq_cap_max_num_rdqs_get(mbox);
947 rdq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_rdq_sz_get(mbox);
948 num_cqs = mlxsw_cmd_mbox_query_aq_cap_max_num_cqs_get(mbox);
949 cq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_cq_sz_get(mbox);
950 cqv2_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_cqv2_sz_get(mbox);
951 num_eqs = mlxsw_cmd_mbox_query_aq_cap_max_num_eqs_get(mbox);
952 eq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_eq_sz_get(mbox);
954 if (num_sdqs + num_rdqs > num_cqs ||
955 num_cqs > MLXSW_PCI_CQS_MAX || num_eqs != MLXSW_PCI_EQS_COUNT) {
956 dev_err(&pdev->dev, "Unsupported number of queues\n");
960 if ((1 << sdq_log2sz != MLXSW_PCI_WQE_COUNT) ||
961 (1 << rdq_log2sz != MLXSW_PCI_WQE_COUNT) ||
962 (1 << cq_log2sz != MLXSW_PCI_CQE01_COUNT) ||
963 (mlxsw_pci->max_cqe_ver == MLXSW_PCI_CQE_V2 &&
964 (1 << cqv2_log2sz != MLXSW_PCI_CQE2_COUNT)) ||
965 (1 << eq_log2sz != MLXSW_PCI_EQE_COUNT)) {
966 dev_err(&pdev->dev, "Unsupported number of async queue descriptors\n");
970 mlxsw_pci->num_sdq_cqs = num_sdqs;
972 err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_eq_ops,
975 dev_err(&pdev->dev, "Failed to initialize event queues\n");
979 err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_cq_ops,
982 dev_err(&pdev->dev, "Failed to initialize completion queues\n");
986 err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_sdq_ops,
989 dev_err(&pdev->dev, "Failed to initialize send descriptor queues\n");
993 err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_rdq_ops,
996 dev_err(&pdev->dev, "Failed to initialize receive descriptor queues\n");
1000 /* We have to poll in command interface until queues are initialized */
1001 mlxsw_pci->cmd.nopoll = true;
1005 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_sdq_ops);
1007 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_cq_ops);
1009 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_eq_ops);
1013 static void mlxsw_pci_aqs_fini(struct mlxsw_pci *mlxsw_pci)
1015 mlxsw_pci->cmd.nopoll = false;
1016 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_rdq_ops);
1017 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_sdq_ops);
1018 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_cq_ops);
1019 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_eq_ops);
1023 mlxsw_pci_config_profile_swid_config(struct mlxsw_pci *mlxsw_pci,
1024 char *mbox, int index,
1025 const struct mlxsw_swid_config *swid)
1029 if (swid->used_type) {
1030 mlxsw_cmd_mbox_config_profile_swid_config_type_set(
1031 mbox, index, swid->type);
1034 if (swid->used_properties) {
1035 mlxsw_cmd_mbox_config_profile_swid_config_properties_set(
1036 mbox, index, swid->properties);
1039 mlxsw_cmd_mbox_config_profile_swid_config_mask_set(mbox, index, mask);
1042 static int mlxsw_pci_resources_query(struct mlxsw_pci *mlxsw_pci, char *mbox,
1043 struct mlxsw_res *res)
1053 mlxsw_cmd_mbox_zero(mbox);
1055 for (index = 0; index < MLXSW_CMD_QUERY_RESOURCES_MAX_QUERIES;
1057 err = mlxsw_cmd_query_resources(mlxsw_pci->core, mbox, index);
1061 for (i = 0; i < MLXSW_CMD_QUERY_RESOURCES_PER_QUERY; i++) {
1062 id = mlxsw_cmd_mbox_query_resource_id_get(mbox, i);
1063 data = mlxsw_cmd_mbox_query_resource_data_get(mbox, i);
1065 if (id == MLXSW_CMD_QUERY_RESOURCES_TABLE_END_ID)
1068 mlxsw_res_parse(res, id, data);
1072 /* If after MLXSW_RESOURCES_QUERY_MAX_QUERIES we still didn't get
1073 * MLXSW_RESOURCES_TABLE_END_ID, something went bad in the FW.
1079 mlxsw_pci_profile_get_kvd_sizes(const struct mlxsw_pci *mlxsw_pci,
1080 const struct mlxsw_config_profile *profile,
1081 struct mlxsw_res *res)
1083 u64 single_size, double_size, linear_size;
1086 err = mlxsw_core_kvd_sizes_get(mlxsw_pci->core, profile,
1087 &single_size, &double_size,
1092 MLXSW_RES_SET(res, KVD_SINGLE_SIZE, single_size);
1093 MLXSW_RES_SET(res, KVD_DOUBLE_SIZE, double_size);
1094 MLXSW_RES_SET(res, KVD_LINEAR_SIZE, linear_size);
1099 static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
1100 const struct mlxsw_config_profile *profile,
1101 struct mlxsw_res *res)
1106 mlxsw_cmd_mbox_zero(mbox);
1108 if (profile->used_max_vepa_channels) {
1109 mlxsw_cmd_mbox_config_profile_set_max_vepa_channels_set(
1111 mlxsw_cmd_mbox_config_profile_max_vepa_channels_set(
1112 mbox, profile->max_vepa_channels);
1114 if (profile->used_max_mid) {
1115 mlxsw_cmd_mbox_config_profile_set_max_mid_set(
1117 mlxsw_cmd_mbox_config_profile_max_mid_set(
1118 mbox, profile->max_mid);
1120 if (profile->used_max_pgt) {
1121 mlxsw_cmd_mbox_config_profile_set_max_pgt_set(
1123 mlxsw_cmd_mbox_config_profile_max_pgt_set(
1124 mbox, profile->max_pgt);
1126 if (profile->used_max_system_port) {
1127 mlxsw_cmd_mbox_config_profile_set_max_system_port_set(
1129 mlxsw_cmd_mbox_config_profile_max_system_port_set(
1130 mbox, profile->max_system_port);
1132 if (profile->used_max_vlan_groups) {
1133 mlxsw_cmd_mbox_config_profile_set_max_vlan_groups_set(
1135 mlxsw_cmd_mbox_config_profile_max_vlan_groups_set(
1136 mbox, profile->max_vlan_groups);
1138 if (profile->used_max_regions) {
1139 mlxsw_cmd_mbox_config_profile_set_max_regions_set(
1141 mlxsw_cmd_mbox_config_profile_max_regions_set(
1142 mbox, profile->max_regions);
1144 if (profile->used_flood_tables) {
1145 mlxsw_cmd_mbox_config_profile_set_flood_tables_set(
1147 mlxsw_cmd_mbox_config_profile_max_flood_tables_set(
1148 mbox, profile->max_flood_tables);
1149 mlxsw_cmd_mbox_config_profile_max_vid_flood_tables_set(
1150 mbox, profile->max_vid_flood_tables);
1151 mlxsw_cmd_mbox_config_profile_max_fid_offset_flood_tables_set(
1152 mbox, profile->max_fid_offset_flood_tables);
1153 mlxsw_cmd_mbox_config_profile_fid_offset_flood_table_size_set(
1154 mbox, profile->fid_offset_flood_table_size);
1155 mlxsw_cmd_mbox_config_profile_max_fid_flood_tables_set(
1156 mbox, profile->max_fid_flood_tables);
1157 mlxsw_cmd_mbox_config_profile_fid_flood_table_size_set(
1158 mbox, profile->fid_flood_table_size);
1160 if (profile->used_flood_mode) {
1161 mlxsw_cmd_mbox_config_profile_set_flood_mode_set(
1163 mlxsw_cmd_mbox_config_profile_flood_mode_set(
1164 mbox, profile->flood_mode);
1166 if (profile->used_max_ib_mc) {
1167 mlxsw_cmd_mbox_config_profile_set_max_ib_mc_set(
1169 mlxsw_cmd_mbox_config_profile_max_ib_mc_set(
1170 mbox, profile->max_ib_mc);
1172 if (profile->used_max_pkey) {
1173 mlxsw_cmd_mbox_config_profile_set_max_pkey_set(
1175 mlxsw_cmd_mbox_config_profile_max_pkey_set(
1176 mbox, profile->max_pkey);
1178 if (profile->used_ar_sec) {
1179 mlxsw_cmd_mbox_config_profile_set_ar_sec_set(
1181 mlxsw_cmd_mbox_config_profile_ar_sec_set(
1182 mbox, profile->ar_sec);
1184 if (profile->used_adaptive_routing_group_cap) {
1185 mlxsw_cmd_mbox_config_profile_set_adaptive_routing_group_cap_set(
1187 mlxsw_cmd_mbox_config_profile_adaptive_routing_group_cap_set(
1188 mbox, profile->adaptive_routing_group_cap);
1190 if (profile->used_kvd_sizes && MLXSW_RES_VALID(res, KVD_SIZE)) {
1191 err = mlxsw_pci_profile_get_kvd_sizes(mlxsw_pci, profile, res);
1195 mlxsw_cmd_mbox_config_profile_set_kvd_linear_size_set(mbox, 1);
1196 mlxsw_cmd_mbox_config_profile_kvd_linear_size_set(mbox,
1197 MLXSW_RES_GET(res, KVD_LINEAR_SIZE));
1198 mlxsw_cmd_mbox_config_profile_set_kvd_hash_single_size_set(mbox,
1200 mlxsw_cmd_mbox_config_profile_kvd_hash_single_size_set(mbox,
1201 MLXSW_RES_GET(res, KVD_SINGLE_SIZE));
1202 mlxsw_cmd_mbox_config_profile_set_kvd_hash_double_size_set(
1204 mlxsw_cmd_mbox_config_profile_kvd_hash_double_size_set(mbox,
1205 MLXSW_RES_GET(res, KVD_DOUBLE_SIZE));
1208 for (i = 0; i < MLXSW_CONFIG_PROFILE_SWID_COUNT; i++)
1209 mlxsw_pci_config_profile_swid_config(mlxsw_pci, mbox, i,
1210 &profile->swid_config[i]);
1212 if (mlxsw_pci->max_cqe_ver > MLXSW_PCI_CQE_V0) {
1213 mlxsw_cmd_mbox_config_profile_set_cqe_version_set(mbox, 1);
1214 mlxsw_cmd_mbox_config_profile_cqe_version_set(mbox, 1);
1217 return mlxsw_cmd_config_profile_set(mlxsw_pci->core, mbox);
1220 static int mlxsw_pci_boardinfo(struct mlxsw_pci *mlxsw_pci, char *mbox)
1222 struct mlxsw_bus_info *bus_info = &mlxsw_pci->bus_info;
1225 mlxsw_cmd_mbox_zero(mbox);
1226 err = mlxsw_cmd_boardinfo(mlxsw_pci->core, mbox);
1229 mlxsw_cmd_mbox_boardinfo_vsd_memcpy_from(mbox, bus_info->vsd);
1230 mlxsw_cmd_mbox_boardinfo_psid_memcpy_from(mbox, bus_info->psid);
1234 static int mlxsw_pci_fw_area_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
1237 struct mlxsw_pci_mem_item *mem_item;
1242 mlxsw_pci->fw_area.items = kcalloc(num_pages, sizeof(*mem_item),
1244 if (!mlxsw_pci->fw_area.items)
1246 mlxsw_pci->fw_area.count = num_pages;
1248 mlxsw_cmd_mbox_zero(mbox);
1249 for (i = 0; i < num_pages; i++) {
1250 mem_item = &mlxsw_pci->fw_area.items[i];
1252 mem_item->size = MLXSW_PCI_PAGE_SIZE;
1253 mem_item->buf = pci_alloc_consistent(mlxsw_pci->pdev,
1255 &mem_item->mapaddr);
1256 if (!mem_item->buf) {
1260 mlxsw_cmd_mbox_map_fa_pa_set(mbox, nent, mem_item->mapaddr);
1261 mlxsw_cmd_mbox_map_fa_log2size_set(mbox, nent, 0); /* 1 page */
1262 if (++nent == MLXSW_CMD_MAP_FA_VPM_ENTRIES_MAX) {
1263 err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, nent);
1265 goto err_cmd_map_fa;
1267 mlxsw_cmd_mbox_zero(mbox);
1272 err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, nent);
1274 goto err_cmd_map_fa;
1281 for (i--; i >= 0; i--) {
1282 mem_item = &mlxsw_pci->fw_area.items[i];
1284 pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
1285 mem_item->buf, mem_item->mapaddr);
1287 kfree(mlxsw_pci->fw_area.items);
1291 static void mlxsw_pci_fw_area_fini(struct mlxsw_pci *mlxsw_pci)
1293 struct mlxsw_pci_mem_item *mem_item;
1296 mlxsw_cmd_unmap_fa(mlxsw_pci->core);
1298 for (i = 0; i < mlxsw_pci->fw_area.count; i++) {
1299 mem_item = &mlxsw_pci->fw_area.items[i];
1301 pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
1302 mem_item->buf, mem_item->mapaddr);
1304 kfree(mlxsw_pci->fw_area.items);
1307 static irqreturn_t mlxsw_pci_eq_irq_handler(int irq, void *dev_id)
1309 struct mlxsw_pci *mlxsw_pci = dev_id;
1310 struct mlxsw_pci_queue *q;
1313 for (i = 0; i < MLXSW_PCI_EQS_COUNT; i++) {
1314 q = mlxsw_pci_eq_get(mlxsw_pci, i);
1315 mlxsw_pci_queue_tasklet_schedule(q);
1320 static int mlxsw_pci_mbox_alloc(struct mlxsw_pci *mlxsw_pci,
1321 struct mlxsw_pci_mem_item *mbox)
1323 struct pci_dev *pdev = mlxsw_pci->pdev;
1326 mbox->size = MLXSW_CMD_MBOX_SIZE;
1327 mbox->buf = pci_alloc_consistent(pdev, MLXSW_CMD_MBOX_SIZE,
1330 dev_err(&pdev->dev, "Failed allocating memory for mailbox\n");
1337 static void mlxsw_pci_mbox_free(struct mlxsw_pci *mlxsw_pci,
1338 struct mlxsw_pci_mem_item *mbox)
1340 struct pci_dev *pdev = mlxsw_pci->pdev;
1342 pci_free_consistent(pdev, MLXSW_CMD_MBOX_SIZE, mbox->buf,
1346 static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,
1347 const struct pci_device_id *id)
1350 char mrsr_pl[MLXSW_REG_MRSR_LEN];
1353 mlxsw_reg_mrsr_pack(mrsr_pl);
1354 err = mlxsw_reg_write(mlxsw_pci->core, MLXSW_REG(mrsr), mrsr_pl);
1357 if (id->device == PCI_DEVICE_ID_MELLANOX_SWITCHX2) {
1358 msleep(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
1362 /* We must wait for the HW to become responsive once again. */
1363 msleep(MLXSW_PCI_SW_RESET_WAIT_MSECS);
1365 end = jiffies + msecs_to_jiffies(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
1367 u32 val = mlxsw_pci_read32(mlxsw_pci, FW_READY);
1369 if ((val & MLXSW_PCI_FW_READY_MASK) == MLXSW_PCI_FW_READY_MAGIC)
1372 } while (time_before(jiffies, end));
1376 static int mlxsw_pci_alloc_irq_vectors(struct mlxsw_pci *mlxsw_pci)
1380 err = pci_alloc_irq_vectors(mlxsw_pci->pdev, 1, 1, PCI_IRQ_MSIX);
1382 dev_err(&mlxsw_pci->pdev->dev, "MSI-X init failed\n");
1386 static void mlxsw_pci_free_irq_vectors(struct mlxsw_pci *mlxsw_pci)
1388 pci_free_irq_vectors(mlxsw_pci->pdev);
1391 static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
1392 const struct mlxsw_config_profile *profile,
1393 struct mlxsw_res *res)
1395 struct mlxsw_pci *mlxsw_pci = bus_priv;
1396 struct pci_dev *pdev = mlxsw_pci->pdev;
1401 mutex_init(&mlxsw_pci->cmd.lock);
1402 init_waitqueue_head(&mlxsw_pci->cmd.wait);
1404 mlxsw_pci->core = mlxsw_core;
1406 mbox = mlxsw_cmd_mbox_alloc();
1410 err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
1414 err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
1416 goto err_out_mbox_alloc;
1418 err = mlxsw_pci_sw_reset(mlxsw_pci, mlxsw_pci->id);
1422 err = mlxsw_pci_alloc_irq_vectors(mlxsw_pci);
1424 dev_err(&pdev->dev, "MSI-X init failed\n");
1428 err = mlxsw_cmd_query_fw(mlxsw_core, mbox);
1432 mlxsw_pci->bus_info.fw_rev.major =
1433 mlxsw_cmd_mbox_query_fw_fw_rev_major_get(mbox);
1434 mlxsw_pci->bus_info.fw_rev.minor =
1435 mlxsw_cmd_mbox_query_fw_fw_rev_minor_get(mbox);
1436 mlxsw_pci->bus_info.fw_rev.subminor =
1437 mlxsw_cmd_mbox_query_fw_fw_rev_subminor_get(mbox);
1439 if (mlxsw_cmd_mbox_query_fw_cmd_interface_rev_get(mbox) != 1) {
1440 dev_err(&pdev->dev, "Unsupported cmd interface revision ID queried from hw\n");
1444 if (mlxsw_cmd_mbox_query_fw_doorbell_page_bar_get(mbox) != 0) {
1445 dev_err(&pdev->dev, "Unsupported doorbell page bar queried from hw\n");
1447 goto err_doorbell_page_bar;
1450 mlxsw_pci->doorbell_offset =
1451 mlxsw_cmd_mbox_query_fw_doorbell_page_offset_get(mbox);
1453 num_pages = mlxsw_cmd_mbox_query_fw_fw_pages_get(mbox);
1454 err = mlxsw_pci_fw_area_init(mlxsw_pci, mbox, num_pages);
1456 goto err_fw_area_init;
1458 err = mlxsw_pci_boardinfo(mlxsw_pci, mbox);
1462 err = mlxsw_pci_resources_query(mlxsw_pci, mbox, res);
1464 goto err_query_resources;
1466 if (MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V2) &&
1467 MLXSW_CORE_RES_GET(mlxsw_core, CQE_V2))
1468 mlxsw_pci->max_cqe_ver = MLXSW_PCI_CQE_V2;
1469 else if (MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V1) &&
1470 MLXSW_CORE_RES_GET(mlxsw_core, CQE_V1))
1471 mlxsw_pci->max_cqe_ver = MLXSW_PCI_CQE_V1;
1472 else if ((MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V0) &&
1473 MLXSW_CORE_RES_GET(mlxsw_core, CQE_V0)) ||
1474 !MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V0)) {
1475 mlxsw_pci->max_cqe_ver = MLXSW_PCI_CQE_V0;
1477 dev_err(&pdev->dev, "Invalid supported CQE version combination reported\n");
1478 goto err_cqe_v_check;
1481 err = mlxsw_pci_config_profile(mlxsw_pci, mbox, profile, res);
1483 goto err_config_profile;
1485 err = mlxsw_pci_aqs_init(mlxsw_pci, mbox);
1489 err = request_irq(pci_irq_vector(pdev, 0),
1490 mlxsw_pci_eq_irq_handler, 0,
1491 mlxsw_pci->bus_info.device_kind, mlxsw_pci);
1493 dev_err(&pdev->dev, "IRQ request failed\n");
1494 goto err_request_eq_irq;
1500 mlxsw_pci_aqs_fini(mlxsw_pci);
1504 err_query_resources:
1506 mlxsw_pci_fw_area_fini(mlxsw_pci);
1508 err_doorbell_page_bar:
1511 mlxsw_pci_free_irq_vectors(mlxsw_pci);
1514 mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
1516 mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
1518 mlxsw_cmd_mbox_free(mbox);
1522 static void mlxsw_pci_fini(void *bus_priv)
1524 struct mlxsw_pci *mlxsw_pci = bus_priv;
1526 free_irq(pci_irq_vector(mlxsw_pci->pdev, 0), mlxsw_pci);
1527 mlxsw_pci_aqs_fini(mlxsw_pci);
1528 mlxsw_pci_fw_area_fini(mlxsw_pci);
1529 mlxsw_pci_free_irq_vectors(mlxsw_pci);
1530 mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
1531 mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
1534 static struct mlxsw_pci_queue *
1535 mlxsw_pci_sdq_pick(struct mlxsw_pci *mlxsw_pci,
1536 const struct mlxsw_tx_info *tx_info)
1538 u8 sdqn = tx_info->local_port % mlxsw_pci_sdq_count(mlxsw_pci);
1540 return mlxsw_pci_sdq_get(mlxsw_pci, sdqn);
1543 static bool mlxsw_pci_skb_transmit_busy(void *bus_priv,
1544 const struct mlxsw_tx_info *tx_info)
1546 struct mlxsw_pci *mlxsw_pci = bus_priv;
1547 struct mlxsw_pci_queue *q = mlxsw_pci_sdq_pick(mlxsw_pci, tx_info);
1549 return !mlxsw_pci_queue_elem_info_producer_get(q);
1552 static int mlxsw_pci_skb_transmit(void *bus_priv, struct sk_buff *skb,
1553 const struct mlxsw_tx_info *tx_info)
1555 struct mlxsw_pci *mlxsw_pci = bus_priv;
1556 struct mlxsw_pci_queue *q;
1557 struct mlxsw_pci_queue_elem_info *elem_info;
1562 if (skb_shinfo(skb)->nr_frags > MLXSW_PCI_WQE_SG_ENTRIES - 1) {
1563 err = skb_linearize(skb);
1568 q = mlxsw_pci_sdq_pick(mlxsw_pci, tx_info);
1569 spin_lock_bh(&q->lock);
1570 elem_info = mlxsw_pci_queue_elem_info_producer_get(q);
1576 elem_info->u.sdq.skb = skb;
1578 wqe = elem_info->elem;
1579 mlxsw_pci_wqe_c_set(wqe, 1); /* always report completion */
1580 mlxsw_pci_wqe_lp_set(wqe, !!tx_info->is_emad);
1581 mlxsw_pci_wqe_type_set(wqe, MLXSW_PCI_WQE_TYPE_ETHERNET);
1583 err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data,
1584 skb_headlen(skb), DMA_TO_DEVICE);
1588 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1589 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1591 err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, i + 1,
1592 skb_frag_address(frag),
1593 skb_frag_size(frag),
1599 /* Set unused sq entries byte count to zero. */
1600 for (i++; i < MLXSW_PCI_WQE_SG_ENTRIES; i++)
1601 mlxsw_pci_wqe_byte_count_set(wqe, i, 0);
1603 /* Everything is set up, ring producer doorbell to get HW going */
1604 q->producer_counter++;
1605 mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
1611 mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, i, DMA_TO_DEVICE);
1613 spin_unlock_bh(&q->lock);
1617 static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod,
1618 u32 in_mod, bool out_mbox_direct,
1619 char *in_mbox, size_t in_mbox_size,
1620 char *out_mbox, size_t out_mbox_size,
1623 struct mlxsw_pci *mlxsw_pci = bus_priv;
1624 dma_addr_t in_mapaddr = 0, out_mapaddr = 0;
1625 bool evreq = mlxsw_pci->cmd.nopoll;
1626 unsigned long timeout = msecs_to_jiffies(MLXSW_PCI_CIR_TIMEOUT_MSECS);
1627 bool *p_wait_done = &mlxsw_pci->cmd.wait_done;
1630 *p_status = MLXSW_CMD_STATUS_OK;
1632 err = mutex_lock_interruptible(&mlxsw_pci->cmd.lock);
1637 memcpy(mlxsw_pci->cmd.in_mbox.buf, in_mbox, in_mbox_size);
1638 in_mapaddr = mlxsw_pci->cmd.in_mbox.mapaddr;
1640 mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_HI, upper_32_bits(in_mapaddr));
1641 mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_LO, lower_32_bits(in_mapaddr));
1644 out_mapaddr = mlxsw_pci->cmd.out_mbox.mapaddr;
1645 mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_HI, upper_32_bits(out_mapaddr));
1646 mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_LO, lower_32_bits(out_mapaddr));
1648 mlxsw_pci_write32(mlxsw_pci, CIR_IN_MODIFIER, in_mod);
1649 mlxsw_pci_write32(mlxsw_pci, CIR_TOKEN, 0);
1651 *p_wait_done = false;
1653 wmb(); /* all needs to be written before we write control register */
1654 mlxsw_pci_write32(mlxsw_pci, CIR_CTRL,
1655 MLXSW_PCI_CIR_CTRL_GO_BIT |
1656 (evreq ? MLXSW_PCI_CIR_CTRL_EVREQ_BIT : 0) |
1657 (opcode_mod << MLXSW_PCI_CIR_CTRL_OPCODE_MOD_SHIFT) |
1663 end = jiffies + timeout;
1665 u32 ctrl = mlxsw_pci_read32(mlxsw_pci, CIR_CTRL);
1667 if (!(ctrl & MLXSW_PCI_CIR_CTRL_GO_BIT)) {
1668 *p_wait_done = true;
1669 *p_status = ctrl >> MLXSW_PCI_CIR_CTRL_STATUS_SHIFT;
1673 } while (time_before(jiffies, end));
1675 wait_event_timeout(mlxsw_pci->cmd.wait, *p_wait_done, timeout);
1676 *p_status = mlxsw_pci->cmd.comp.status;
1687 if (!err && out_mbox && out_mbox_direct) {
1688 /* Some commands don't use output param as address to mailbox
1689 * but they store output directly into registers. In that case,
1690 * copy registers into mbox buffer.
1695 tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci,
1697 memcpy(out_mbox, &tmp, sizeof(tmp));
1698 tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci,
1700 memcpy(out_mbox + sizeof(tmp), &tmp, sizeof(tmp));
1702 } else if (!err && out_mbox) {
1703 memcpy(out_mbox, mlxsw_pci->cmd.out_mbox.buf, out_mbox_size);
1706 mutex_unlock(&mlxsw_pci->cmd.lock);
1711 static const struct mlxsw_bus mlxsw_pci_bus = {
1713 .init = mlxsw_pci_init,
1714 .fini = mlxsw_pci_fini,
1715 .skb_transmit_busy = mlxsw_pci_skb_transmit_busy,
1716 .skb_transmit = mlxsw_pci_skb_transmit,
1717 .cmd_exec = mlxsw_pci_cmd_exec,
1718 .features = MLXSW_BUS_F_TXRX | MLXSW_BUS_F_RESET,
1721 static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1723 const char *driver_name = pdev->driver->name;
1724 struct mlxsw_pci *mlxsw_pci;
1727 mlxsw_pci = kzalloc(sizeof(*mlxsw_pci), GFP_KERNEL);
1731 err = pci_enable_device(pdev);
1733 dev_err(&pdev->dev, "pci_enable_device failed\n");
1734 goto err_pci_enable_device;
1737 err = pci_request_regions(pdev, driver_name);
1739 dev_err(&pdev->dev, "pci_request_regions failed\n");
1740 goto err_pci_request_regions;
1743 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1745 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1747 dev_err(&pdev->dev, "pci_set_consistent_dma_mask failed\n");
1748 goto err_pci_set_dma_mask;
1751 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1753 dev_err(&pdev->dev, "pci_set_dma_mask failed\n");
1754 goto err_pci_set_dma_mask;
1758 if (pci_resource_len(pdev, 0) < MLXSW_PCI_BAR0_SIZE) {
1759 dev_err(&pdev->dev, "invalid PCI region size\n");
1761 goto err_pci_resource_len_check;
1764 mlxsw_pci->hw_addr = ioremap(pci_resource_start(pdev, 0),
1765 pci_resource_len(pdev, 0));
1766 if (!mlxsw_pci->hw_addr) {
1767 dev_err(&pdev->dev, "ioremap failed\n");
1771 pci_set_master(pdev);
1773 mlxsw_pci->pdev = pdev;
1774 pci_set_drvdata(pdev, mlxsw_pci);
1776 mlxsw_pci->bus_info.device_kind = driver_name;
1777 mlxsw_pci->bus_info.device_name = pci_name(mlxsw_pci->pdev);
1778 mlxsw_pci->bus_info.dev = &pdev->dev;
1781 err = mlxsw_core_bus_device_register(&mlxsw_pci->bus_info,
1782 &mlxsw_pci_bus, mlxsw_pci, false,
1785 dev_err(&pdev->dev, "cannot register bus device\n");
1786 goto err_bus_device_register;
1791 err_bus_device_register:
1792 iounmap(mlxsw_pci->hw_addr);
1794 err_pci_resource_len_check:
1795 err_pci_set_dma_mask:
1796 pci_release_regions(pdev);
1797 err_pci_request_regions:
1798 pci_disable_device(pdev);
1799 err_pci_enable_device:
1804 static void mlxsw_pci_remove(struct pci_dev *pdev)
1806 struct mlxsw_pci *mlxsw_pci = pci_get_drvdata(pdev);
1808 mlxsw_core_bus_device_unregister(mlxsw_pci->core, false);
1809 iounmap(mlxsw_pci->hw_addr);
1810 pci_release_regions(mlxsw_pci->pdev);
1811 pci_disable_device(mlxsw_pci->pdev);
1815 int mlxsw_pci_driver_register(struct pci_driver *pci_driver)
1817 pci_driver->probe = mlxsw_pci_probe;
1818 pci_driver->remove = mlxsw_pci_remove;
1819 return pci_register_driver(pci_driver);
1821 EXPORT_SYMBOL(mlxsw_pci_driver_register);
1823 void mlxsw_pci_driver_unregister(struct pci_driver *pci_driver)
1825 pci_unregister_driver(pci_driver);
1827 EXPORT_SYMBOL(mlxsw_pci_driver_unregister);
1829 static int __init mlxsw_pci_module_init(void)
1834 static void __exit mlxsw_pci_module_exit(void)
1838 module_init(mlxsw_pci_module_init);
1839 module_exit(mlxsw_pci_module_exit);
1841 MODULE_LICENSE("Dual BSD/GPL");
1842 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
1843 MODULE_DESCRIPTION("Mellanox switch PCI interface driver");