2 * Copyright 2015 Amazon.com, Inc. or its affiliates.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 /*****************************************************************************/
36 /*****************************************************************************/
38 /* Timeout in micro-sec */
39 #define ADMIN_CMD_TIMEOUT_US (3000000)
41 #define ENA_ASYNC_QUEUE_DEPTH 16
42 #define ENA_ADMIN_QUEUE_DEPTH 32
44 #define MIN_ENA_VER (((ENA_COMMON_SPEC_VERSION_MAJOR) << \
45 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) \
46 | (ENA_COMMON_SPEC_VERSION_MINOR))
48 #define ENA_CTRL_MAJOR 0
49 #define ENA_CTRL_MINOR 0
50 #define ENA_CTRL_SUB_MINOR 1
52 #define MIN_ENA_CTRL_VER \
53 (((ENA_CTRL_MAJOR) << \
54 (ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT)) | \
55 ((ENA_CTRL_MINOR) << \
56 (ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT)) | \
59 #define ENA_DMA_ADDR_TO_UINT32_LOW(x) ((u32)((u64)(x)))
60 #define ENA_DMA_ADDR_TO_UINT32_HIGH(x) ((u32)(((u64)(x)) >> 32))
62 #define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF
64 #define ENA_REGS_ADMIN_INTR_MASK 1
68 /*****************************************************************************/
69 /*****************************************************************************/
70 /*****************************************************************************/
75 /* Abort - canceled by the driver */
80 struct completion wait_event;
81 struct ena_admin_acq_entry *user_cqe;
83 enum ena_cmd_status status;
84 /* status from the device */
90 struct ena_com_stats_ctx {
91 struct ena_admin_aq_get_stats_cmd get_cmd;
92 struct ena_admin_acq_get_stats_resp get_resp;
95 static inline int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
96 struct ena_common_mem_addr *ena_addr,
99 if ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr) {
100 pr_err("dma address has more bits that the device supports\n");
104 ena_addr->mem_addr_low = lower_32_bits(addr);
105 ena_addr->mem_addr_high = (u16)upper_32_bits(addr);
110 static int ena_com_admin_init_sq(struct ena_com_admin_queue *queue)
112 struct ena_com_admin_sq *sq = &queue->sq;
113 u16 size = ADMIN_SQ_SIZE(queue->q_depth);
115 sq->entries = dma_zalloc_coherent(queue->q_dmadev, size, &sq->dma_addr,
119 pr_err("memory allocation failed");
132 static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue)
134 struct ena_com_admin_cq *cq = &queue->cq;
135 u16 size = ADMIN_CQ_SIZE(queue->q_depth);
137 cq->entries = dma_zalloc_coherent(queue->q_dmadev, size, &cq->dma_addr,
141 pr_err("memory allocation failed");
151 static int ena_com_admin_init_aenq(struct ena_com_dev *dev,
152 struct ena_aenq_handlers *aenq_handlers)
154 struct ena_com_aenq *aenq = &dev->aenq;
155 u32 addr_low, addr_high, aenq_caps;
158 dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
159 size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH);
160 aenq->entries = dma_zalloc_coherent(dev->dmadev, size, &aenq->dma_addr,
163 if (!aenq->entries) {
164 pr_err("memory allocation failed");
168 aenq->head = aenq->q_depth;
171 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(aenq->dma_addr);
172 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(aenq->dma_addr);
174 writel(addr_low, dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF);
175 writel(addr_high, dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF);
178 aenq_caps |= dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
179 aenq_caps |= (sizeof(struct ena_admin_aenq_entry)
180 << ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
181 ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
182 writel(aenq_caps, dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF);
184 if (unlikely(!aenq_handlers)) {
185 pr_err("aenq handlers pointer is NULL\n");
189 aenq->aenq_handlers = aenq_handlers;
194 static inline void comp_ctxt_release(struct ena_com_admin_queue *queue,
195 struct ena_comp_ctx *comp_ctx)
197 comp_ctx->occupied = false;
198 atomic_dec(&queue->outstanding_cmds);
201 static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue,
202 u16 command_id, bool capture)
204 if (unlikely(command_id >= queue->q_depth)) {
205 pr_err("command id is larger than the queue size. cmd_id: %u queue size %d\n",
206 command_id, queue->q_depth);
210 if (unlikely(queue->comp_ctx[command_id].occupied && capture)) {
211 pr_err("Completion context is occupied\n");
216 atomic_inc(&queue->outstanding_cmds);
217 queue->comp_ctx[command_id].occupied = true;
220 return &queue->comp_ctx[command_id];
223 static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
224 struct ena_admin_aq_entry *cmd,
225 size_t cmd_size_in_bytes,
226 struct ena_admin_acq_entry *comp,
227 size_t comp_size_in_bytes)
229 struct ena_comp_ctx *comp_ctx;
230 u16 tail_masked, cmd_id;
234 queue_size_mask = admin_queue->q_depth - 1;
236 tail_masked = admin_queue->sq.tail & queue_size_mask;
238 /* In case of queue FULL */
239 cnt = atomic_read(&admin_queue->outstanding_cmds);
240 if (cnt >= admin_queue->q_depth) {
241 pr_debug("admin queue is full.\n");
242 admin_queue->stats.out_of_space++;
243 return ERR_PTR(-ENOSPC);
246 cmd_id = admin_queue->curr_cmd_id;
248 cmd->aq_common_descriptor.flags |= admin_queue->sq.phase &
249 ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK;
251 cmd->aq_common_descriptor.command_id |= cmd_id &
252 ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
254 comp_ctx = get_comp_ctxt(admin_queue, cmd_id, true);
255 if (unlikely(!comp_ctx))
256 return ERR_PTR(-EINVAL);
258 comp_ctx->status = ENA_CMD_SUBMITTED;
259 comp_ctx->comp_size = (u32)comp_size_in_bytes;
260 comp_ctx->user_cqe = comp;
261 comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode;
263 reinit_completion(&comp_ctx->wait_event);
265 memcpy(&admin_queue->sq.entries[tail_masked], cmd, cmd_size_in_bytes);
267 admin_queue->curr_cmd_id = (admin_queue->curr_cmd_id + 1) &
270 admin_queue->sq.tail++;
271 admin_queue->stats.submitted_cmd++;
273 if (unlikely((admin_queue->sq.tail & queue_size_mask) == 0))
274 admin_queue->sq.phase = !admin_queue->sq.phase;
276 writel(admin_queue->sq.tail, admin_queue->sq.db_addr);
281 static inline int ena_com_init_comp_ctxt(struct ena_com_admin_queue *queue)
283 size_t size = queue->q_depth * sizeof(struct ena_comp_ctx);
284 struct ena_comp_ctx *comp_ctx;
287 queue->comp_ctx = devm_kzalloc(queue->q_dmadev, size, GFP_KERNEL);
288 if (unlikely(!queue->comp_ctx)) {
289 pr_err("memory allocation failed");
293 for (i = 0; i < queue->q_depth; i++) {
294 comp_ctx = get_comp_ctxt(queue, i, false);
296 init_completion(&comp_ctx->wait_event);
302 static struct ena_comp_ctx *ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
303 struct ena_admin_aq_entry *cmd,
304 size_t cmd_size_in_bytes,
305 struct ena_admin_acq_entry *comp,
306 size_t comp_size_in_bytes)
309 struct ena_comp_ctx *comp_ctx;
311 spin_lock_irqsave(&admin_queue->q_lock, flags);
312 if (unlikely(!admin_queue->running_state)) {
313 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
314 return ERR_PTR(-ENODEV);
316 comp_ctx = __ena_com_submit_admin_cmd(admin_queue, cmd,
320 if (IS_ERR(comp_ctx))
321 admin_queue->running_state = false;
322 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
327 static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
328 struct ena_com_create_io_ctx *ctx,
329 struct ena_com_io_sq *io_sq)
334 memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr));
336 io_sq->desc_entry_size =
337 (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
338 sizeof(struct ena_eth_io_tx_desc) :
339 sizeof(struct ena_eth_io_rx_desc);
341 size = io_sq->desc_entry_size * io_sq->q_depth;
343 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
344 dev_node = dev_to_node(ena_dev->dmadev);
345 set_dev_node(ena_dev->dmadev, ctx->numa_node);
346 io_sq->desc_addr.virt_addr =
347 dma_zalloc_coherent(ena_dev->dmadev, size,
348 &io_sq->desc_addr.phys_addr,
350 set_dev_node(ena_dev->dmadev, dev_node);
351 if (!io_sq->desc_addr.virt_addr) {
352 io_sq->desc_addr.virt_addr =
353 dma_zalloc_coherent(ena_dev->dmadev, size,
354 &io_sq->desc_addr.phys_addr,
358 dev_node = dev_to_node(ena_dev->dmadev);
359 set_dev_node(ena_dev->dmadev, ctx->numa_node);
360 io_sq->desc_addr.virt_addr =
361 devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
362 set_dev_node(ena_dev->dmadev, dev_node);
363 if (!io_sq->desc_addr.virt_addr) {
364 io_sq->desc_addr.virt_addr =
365 devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
369 if (!io_sq->desc_addr.virt_addr) {
370 pr_err("memory allocation failed");
375 io_sq->next_to_comp = 0;
381 static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
382 struct ena_com_create_io_ctx *ctx,
383 struct ena_com_io_cq *io_cq)
388 memset(&io_cq->cdesc_addr, 0x0, sizeof(io_cq->cdesc_addr));
390 /* Use the basic completion descriptor for Rx */
391 io_cq->cdesc_entry_size_in_bytes =
392 (io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
393 sizeof(struct ena_eth_io_tx_cdesc) :
394 sizeof(struct ena_eth_io_rx_cdesc_base);
396 size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
398 prev_node = dev_to_node(ena_dev->dmadev);
399 set_dev_node(ena_dev->dmadev, ctx->numa_node);
400 io_cq->cdesc_addr.virt_addr =
401 dma_zalloc_coherent(ena_dev->dmadev, size,
402 &io_cq->cdesc_addr.phys_addr, GFP_KERNEL);
403 set_dev_node(ena_dev->dmadev, prev_node);
404 if (!io_cq->cdesc_addr.virt_addr) {
405 io_cq->cdesc_addr.virt_addr =
406 dma_zalloc_coherent(ena_dev->dmadev, size,
407 &io_cq->cdesc_addr.phys_addr,
411 if (!io_cq->cdesc_addr.virt_addr) {
412 pr_err("memory allocation failed");
422 static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue *admin_queue,
423 struct ena_admin_acq_entry *cqe)
425 struct ena_comp_ctx *comp_ctx;
428 cmd_id = cqe->acq_common_descriptor.command &
429 ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;
431 comp_ctx = get_comp_ctxt(admin_queue, cmd_id, false);
432 if (unlikely(!comp_ctx)) {
433 pr_err("comp_ctx is NULL. Changing the admin queue running state\n");
434 admin_queue->running_state = false;
438 comp_ctx->status = ENA_CMD_COMPLETED;
439 comp_ctx->comp_status = cqe->acq_common_descriptor.status;
441 if (comp_ctx->user_cqe)
442 memcpy(comp_ctx->user_cqe, (void *)cqe, comp_ctx->comp_size);
444 if (!admin_queue->polling)
445 complete(&comp_ctx->wait_event);
448 static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_queue)
450 struct ena_admin_acq_entry *cqe = NULL;
455 head_masked = admin_queue->cq.head & (admin_queue->q_depth - 1);
456 phase = admin_queue->cq.phase;
458 cqe = &admin_queue->cq.entries[head_masked];
460 /* Go over all the completions */
461 while ((cqe->acq_common_descriptor.flags &
462 ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
463 /* Do not read the rest of the completion entry before the
464 * phase bit was validated
467 ena_com_handle_single_admin_completion(admin_queue, cqe);
471 if (unlikely(head_masked == admin_queue->q_depth)) {
476 cqe = &admin_queue->cq.entries[head_masked];
479 admin_queue->cq.head += comp_num;
480 admin_queue->cq.phase = phase;
481 admin_queue->sq.head += comp_num;
482 admin_queue->stats.completed_cmd += comp_num;
485 static int ena_com_comp_status_to_errno(u8 comp_status)
487 if (unlikely(comp_status != 0))
488 pr_err("admin command failed[%u]\n", comp_status);
490 if (unlikely(comp_status > ENA_ADMIN_UNKNOWN_ERROR))
493 switch (comp_status) {
494 case ENA_ADMIN_SUCCESS:
496 case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE:
498 case ENA_ADMIN_UNSUPPORTED_OPCODE:
500 case ENA_ADMIN_BAD_OPCODE:
501 case ENA_ADMIN_MALFORMED_REQUEST:
502 case ENA_ADMIN_ILLEGAL_PARAMETER:
503 case ENA_ADMIN_UNKNOWN_ERROR:
510 static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx,
511 struct ena_com_admin_queue *admin_queue)
513 unsigned long flags, timeout;
516 timeout = jiffies + usecs_to_jiffies(admin_queue->completion_timeout);
519 spin_lock_irqsave(&admin_queue->q_lock, flags);
520 ena_com_handle_admin_completion(admin_queue);
521 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
523 if (comp_ctx->status != ENA_CMD_SUBMITTED)
526 if (time_is_before_jiffies(timeout)) {
527 pr_err("Wait for completion (polling) timeout\n");
528 /* ENA didn't have any completion */
529 spin_lock_irqsave(&admin_queue->q_lock, flags);
530 admin_queue->stats.no_completion++;
531 admin_queue->running_state = false;
532 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
541 if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) {
542 pr_err("Command was aborted\n");
543 spin_lock_irqsave(&admin_queue->q_lock, flags);
544 admin_queue->stats.aborted_cmd++;
545 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
550 WARN(comp_ctx->status != ENA_CMD_COMPLETED, "Invalid comp status %d\n",
553 ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
555 comp_ctxt_release(admin_queue, comp_ctx);
559 static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx,
560 struct ena_com_admin_queue *admin_queue)
565 wait_for_completion_timeout(&comp_ctx->wait_event,
567 admin_queue->completion_timeout));
569 /* In case the command wasn't completed find out the root cause.
570 * There might be 2 kinds of errors
571 * 1) No completion (timeout reached)
572 * 2) There is completion but the device didn't get any msi-x interrupt.
574 if (unlikely(comp_ctx->status == ENA_CMD_SUBMITTED)) {
575 spin_lock_irqsave(&admin_queue->q_lock, flags);
576 ena_com_handle_admin_completion(admin_queue);
577 admin_queue->stats.no_completion++;
578 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
580 if (comp_ctx->status == ENA_CMD_COMPLETED)
581 pr_err("The ena device have completion but the driver didn't receive any MSI-X interrupt (cmd %d)\n",
582 comp_ctx->cmd_opcode);
584 pr_err("The ena device doesn't send any completion for the admin cmd %d status %d\n",
585 comp_ctx->cmd_opcode, comp_ctx->status);
587 admin_queue->running_state = false;
592 ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
594 comp_ctxt_release(admin_queue, comp_ctx);
598 /* This method read the hardware device register through posting writes
599 * and waiting for response
600 * On timeout the function will return ENA_MMIO_READ_TIMEOUT
602 static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
604 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
605 volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp =
606 mmio_read->read_resp;
607 u32 mmio_read_reg, ret, i;
609 u32 timeout = mmio_read->reg_read_to;
614 timeout = ENA_REG_READ_TIMEOUT;
616 /* If readless is disabled, perform regular read */
617 if (!mmio_read->readless_supported)
618 return readl(ena_dev->reg_bar + offset);
620 spin_lock_irqsave(&mmio_read->lock, flags);
621 mmio_read->seq_num++;
623 read_resp->req_id = mmio_read->seq_num + 0xDEAD;
624 mmio_read_reg = (offset << ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT) &
625 ENA_REGS_MMIO_REG_READ_REG_OFF_MASK;
626 mmio_read_reg |= mmio_read->seq_num &
627 ENA_REGS_MMIO_REG_READ_REQ_ID_MASK;
629 /* make sure read_resp->req_id get updated before the hw can write
634 writel_relaxed(mmio_read_reg,
635 ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
638 for (i = 0; i < timeout; i++) {
639 if (read_resp->req_id == mmio_read->seq_num)
645 if (unlikely(i == timeout)) {
646 pr_err("reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n",
647 mmio_read->seq_num, offset, read_resp->req_id,
649 ret = ENA_MMIO_READ_TIMEOUT;
653 if (read_resp->reg_off != offset) {
654 pr_err("Read failure: wrong offset provided");
655 ret = ENA_MMIO_READ_TIMEOUT;
657 ret = read_resp->reg_val;
660 spin_unlock_irqrestore(&mmio_read->lock, flags);
665 /* There are two types to wait for completion.
666 * Polling mode - wait until the completion is available.
667 * Async mode - wait on wait queue until the completion is ready
668 * (or the timeout expired).
669 * It is expected that the IRQ called ena_com_handle_admin_completion
670 * to mark the completions.
672 static int ena_com_wait_and_process_admin_cq(struct ena_comp_ctx *comp_ctx,
673 struct ena_com_admin_queue *admin_queue)
675 if (admin_queue->polling)
676 return ena_com_wait_and_process_admin_cq_polling(comp_ctx,
679 return ena_com_wait_and_process_admin_cq_interrupts(comp_ctx,
683 static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev,
684 struct ena_com_io_sq *io_sq)
686 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
687 struct ena_admin_aq_destroy_sq_cmd destroy_cmd;
688 struct ena_admin_acq_destroy_sq_resp_desc destroy_resp;
692 memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
694 if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
695 direction = ENA_ADMIN_SQ_DIRECTION_TX;
697 direction = ENA_ADMIN_SQ_DIRECTION_RX;
699 destroy_cmd.sq.sq_identity |= (direction <<
700 ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT) &
701 ENA_ADMIN_SQ_SQ_DIRECTION_MASK;
703 destroy_cmd.sq.sq_idx = io_sq->idx;
704 destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_SQ;
706 ret = ena_com_execute_admin_command(admin_queue,
707 (struct ena_admin_aq_entry *)&destroy_cmd,
709 (struct ena_admin_acq_entry *)&destroy_resp,
710 sizeof(destroy_resp));
712 if (unlikely(ret && (ret != -ENODEV)))
713 pr_err("failed to destroy io sq error: %d\n", ret);
718 static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
719 struct ena_com_io_sq *io_sq,
720 struct ena_com_io_cq *io_cq)
724 if (io_cq->cdesc_addr.virt_addr) {
725 size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
727 dma_free_coherent(ena_dev->dmadev, size,
728 io_cq->cdesc_addr.virt_addr,
729 io_cq->cdesc_addr.phys_addr);
731 io_cq->cdesc_addr.virt_addr = NULL;
734 if (io_sq->desc_addr.virt_addr) {
735 size = io_sq->desc_entry_size * io_sq->q_depth;
737 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
738 dma_free_coherent(ena_dev->dmadev, size,
739 io_sq->desc_addr.virt_addr,
740 io_sq->desc_addr.phys_addr);
742 devm_kfree(ena_dev->dmadev, io_sq->desc_addr.virt_addr);
744 io_sq->desc_addr.virt_addr = NULL;
748 static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
753 /* Convert timeout from resolution of 100ms to ENA_POLL_MS */
754 timeout = (timeout * 100) / ENA_POLL_MS;
756 for (i = 0; i < timeout; i++) {
757 val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
759 if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) {
760 pr_err("Reg read timeout occurred\n");
764 if ((val & ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK) ==
774 static bool ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev,
775 enum ena_admin_aq_feature_id feature_id)
777 u32 feature_mask = 1 << feature_id;
779 /* Device attributes is always supported */
780 if ((feature_id != ENA_ADMIN_DEVICE_ATTRIBUTES) &&
781 !(ena_dev->supported_features & feature_mask))
787 static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
788 struct ena_admin_get_feat_resp *get_resp,
789 enum ena_admin_aq_feature_id feature_id,
790 dma_addr_t control_buf_dma_addr,
791 u32 control_buff_size)
793 struct ena_com_admin_queue *admin_queue;
794 struct ena_admin_get_feat_cmd get_cmd;
797 if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) {
798 pr_debug("Feature %d isn't supported\n", feature_id);
802 memset(&get_cmd, 0x0, sizeof(get_cmd));
803 admin_queue = &ena_dev->admin_queue;
805 get_cmd.aq_common_descriptor.opcode = ENA_ADMIN_GET_FEATURE;
807 if (control_buff_size)
808 get_cmd.aq_common_descriptor.flags =
809 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
811 get_cmd.aq_common_descriptor.flags = 0;
813 ret = ena_com_mem_addr_set(ena_dev,
814 &get_cmd.control_buffer.address,
815 control_buf_dma_addr);
817 pr_err("memory address set failed\n");
821 get_cmd.control_buffer.length = control_buff_size;
823 get_cmd.feat_common.feature_id = feature_id;
825 ret = ena_com_execute_admin_command(admin_queue,
826 (struct ena_admin_aq_entry *)
829 (struct ena_admin_acq_entry *)
834 pr_err("Failed to submit get_feature command %d error: %d\n",
840 static int ena_com_get_feature(struct ena_com_dev *ena_dev,
841 struct ena_admin_get_feat_resp *get_resp,
842 enum ena_admin_aq_feature_id feature_id)
844 return ena_com_get_feature_ex(ena_dev,
851 static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
853 struct ena_rss *rss = &ena_dev->rss;
856 dma_zalloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
857 &rss->hash_key_dma_addr, GFP_KERNEL);
859 if (unlikely(!rss->hash_key))
865 static void ena_com_hash_key_destroy(struct ena_com_dev *ena_dev)
867 struct ena_rss *rss = &ena_dev->rss;
870 dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
871 rss->hash_key, rss->hash_key_dma_addr);
872 rss->hash_key = NULL;
875 static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev)
877 struct ena_rss *rss = &ena_dev->rss;
880 dma_zalloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
881 &rss->hash_ctrl_dma_addr, GFP_KERNEL);
883 if (unlikely(!rss->hash_ctrl))
889 static void ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev)
891 struct ena_rss *rss = &ena_dev->rss;
894 dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
895 rss->hash_ctrl, rss->hash_ctrl_dma_addr);
896 rss->hash_ctrl = NULL;
899 static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
902 struct ena_rss *rss = &ena_dev->rss;
903 struct ena_admin_get_feat_resp get_resp;
907 ret = ena_com_get_feature(ena_dev, &get_resp,
908 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
912 if ((get_resp.u.ind_table.min_size > log_size) ||
913 (get_resp.u.ind_table.max_size < log_size)) {
914 pr_err("indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n",
915 1 << log_size, 1 << get_resp.u.ind_table.min_size,
916 1 << get_resp.u.ind_table.max_size);
920 tbl_size = (1ULL << log_size) *
921 sizeof(struct ena_admin_rss_ind_table_entry);
924 dma_zalloc_coherent(ena_dev->dmadev, tbl_size,
925 &rss->rss_ind_tbl_dma_addr, GFP_KERNEL);
926 if (unlikely(!rss->rss_ind_tbl))
929 tbl_size = (1ULL << log_size) * sizeof(u16);
930 rss->host_rss_ind_tbl =
931 devm_kzalloc(ena_dev->dmadev, tbl_size, GFP_KERNEL);
932 if (unlikely(!rss->host_rss_ind_tbl))
935 rss->tbl_log_size = log_size;
940 tbl_size = (1ULL << log_size) *
941 sizeof(struct ena_admin_rss_ind_table_entry);
943 dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl,
944 rss->rss_ind_tbl_dma_addr);
945 rss->rss_ind_tbl = NULL;
947 rss->tbl_log_size = 0;
951 static void ena_com_indirect_table_destroy(struct ena_com_dev *ena_dev)
953 struct ena_rss *rss = &ena_dev->rss;
954 size_t tbl_size = (1ULL << rss->tbl_log_size) *
955 sizeof(struct ena_admin_rss_ind_table_entry);
957 if (rss->rss_ind_tbl)
958 dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl,
959 rss->rss_ind_tbl_dma_addr);
960 rss->rss_ind_tbl = NULL;
962 if (rss->host_rss_ind_tbl)
963 devm_kfree(ena_dev->dmadev, rss->host_rss_ind_tbl);
964 rss->host_rss_ind_tbl = NULL;
967 static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
968 struct ena_com_io_sq *io_sq, u16 cq_idx)
970 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
971 struct ena_admin_aq_create_sq_cmd create_cmd;
972 struct ena_admin_acq_create_sq_resp_desc cmd_completion;
976 memset(&create_cmd, 0x0, sizeof(create_cmd));
978 create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_SQ;
980 if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
981 direction = ENA_ADMIN_SQ_DIRECTION_TX;
983 direction = ENA_ADMIN_SQ_DIRECTION_RX;
985 create_cmd.sq_identity |= (direction <<
986 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT) &
987 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK;
989 create_cmd.sq_caps_2 |= io_sq->mem_queue_type &
990 ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK;
992 create_cmd.sq_caps_2 |= (ENA_ADMIN_COMPLETION_POLICY_DESC <<
993 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT) &
994 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK;
996 create_cmd.sq_caps_3 |=
997 ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK;
999 create_cmd.cq_idx = cq_idx;
1000 create_cmd.sq_depth = io_sq->q_depth;
1002 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
1003 ret = ena_com_mem_addr_set(ena_dev,
1005 io_sq->desc_addr.phys_addr);
1006 if (unlikely(ret)) {
1007 pr_err("memory address set failed\n");
1012 ret = ena_com_execute_admin_command(admin_queue,
1013 (struct ena_admin_aq_entry *)&create_cmd,
1015 (struct ena_admin_acq_entry *)&cmd_completion,
1016 sizeof(cmd_completion));
1017 if (unlikely(ret)) {
1018 pr_err("Failed to create IO SQ. error: %d\n", ret);
1022 io_sq->idx = cmd_completion.sq_idx;
1024 io_sq->db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1025 (uintptr_t)cmd_completion.sq_doorbell_offset);
1027 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
1028 io_sq->header_addr = (u8 __iomem *)((uintptr_t)ena_dev->mem_bar
1029 + cmd_completion.llq_headers_offset);
1031 io_sq->desc_addr.pbuf_dev_addr =
1032 (u8 __iomem *)((uintptr_t)ena_dev->mem_bar +
1033 cmd_completion.llq_descriptors_offset);
1036 pr_debug("created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth);
1041 static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev)
1043 struct ena_rss *rss = &ena_dev->rss;
1044 struct ena_com_io_sq *io_sq;
1048 for (i = 0; i < 1 << rss->tbl_log_size; i++) {
1049 qid = rss->host_rss_ind_tbl[i];
1050 if (qid >= ENA_TOTAL_NUM_QUEUES)
1053 io_sq = &ena_dev->io_sq_queues[qid];
1055 if (io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX)
1058 rss->rss_ind_tbl[i].cq_idx = io_sq->idx;
1064 static int ena_com_ind_tbl_convert_from_device(struct ena_com_dev *ena_dev)
1066 u16 dev_idx_to_host_tbl[ENA_TOTAL_NUM_QUEUES] = { (u16)-1 };
1067 struct ena_rss *rss = &ena_dev->rss;
1071 for (i = 0; i < ENA_TOTAL_NUM_QUEUES; i++)
1072 dev_idx_to_host_tbl[ena_dev->io_sq_queues[i].idx] = i;
1074 for (i = 0; i < 1 << rss->tbl_log_size; i++) {
1075 if (rss->rss_ind_tbl[i].cq_idx > ENA_TOTAL_NUM_QUEUES)
1077 idx = (u8)rss->rss_ind_tbl[i].cq_idx;
1079 if (dev_idx_to_host_tbl[idx] > ENA_TOTAL_NUM_QUEUES)
1082 rss->host_rss_ind_tbl[i] = dev_idx_to_host_tbl[idx];
1088 static int ena_com_init_interrupt_moderation_table(struct ena_com_dev *ena_dev)
1092 size = sizeof(struct ena_intr_moder_entry) * ENA_INTR_MAX_NUM_OF_LEVELS;
1094 ena_dev->intr_moder_tbl =
1095 devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
1096 if (!ena_dev->intr_moder_tbl)
1099 ena_com_config_default_interrupt_moderation_table(ena_dev);
1104 static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev,
1105 u16 intr_delay_resolution)
1107 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
1110 if (!intr_delay_resolution) {
1111 pr_err("Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n");
1112 intr_delay_resolution = 1;
1114 ena_dev->intr_delay_resolution = intr_delay_resolution;
1117 for (i = 0; i < ENA_INTR_MAX_NUM_OF_LEVELS; i++)
1118 intr_moder_tbl[i].intr_moder_interval /= intr_delay_resolution;
1121 ena_dev->intr_moder_tx_interval /= intr_delay_resolution;
1124 /*****************************************************************************/
1125 /******************************* API ******************************/
1126 /*****************************************************************************/
1128 int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue,
1129 struct ena_admin_aq_entry *cmd,
1131 struct ena_admin_acq_entry *comp,
1134 struct ena_comp_ctx *comp_ctx;
1137 comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size,
1139 if (IS_ERR(comp_ctx)) {
1140 if (comp_ctx == ERR_PTR(-ENODEV))
1141 pr_debug("Failed to submit command [%ld]\n",
1144 pr_err("Failed to submit command [%ld]\n",
1147 return PTR_ERR(comp_ctx);
1150 ret = ena_com_wait_and_process_admin_cq(comp_ctx, admin_queue);
1151 if (unlikely(ret)) {
1152 if (admin_queue->running_state)
1153 pr_err("Failed to process command. ret = %d\n", ret);
1155 pr_debug("Failed to process command. ret = %d\n", ret);
1160 int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
1161 struct ena_com_io_cq *io_cq)
1163 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1164 struct ena_admin_aq_create_cq_cmd create_cmd;
1165 struct ena_admin_acq_create_cq_resp_desc cmd_completion;
1168 memset(&create_cmd, 0x0, sizeof(create_cmd));
1170 create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_CQ;
1172 create_cmd.cq_caps_2 |= (io_cq->cdesc_entry_size_in_bytes / 4) &
1173 ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK;
1174 create_cmd.cq_caps_1 |=
1175 ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK;
1177 create_cmd.msix_vector = io_cq->msix_vector;
1178 create_cmd.cq_depth = io_cq->q_depth;
1180 ret = ena_com_mem_addr_set(ena_dev,
1182 io_cq->cdesc_addr.phys_addr);
1183 if (unlikely(ret)) {
1184 pr_err("memory address set failed\n");
1188 ret = ena_com_execute_admin_command(admin_queue,
1189 (struct ena_admin_aq_entry *)&create_cmd,
1191 (struct ena_admin_acq_entry *)&cmd_completion,
1192 sizeof(cmd_completion));
1193 if (unlikely(ret)) {
1194 pr_err("Failed to create IO CQ. error: %d\n", ret);
1198 io_cq->idx = cmd_completion.cq_idx;
1200 io_cq->unmask_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1201 cmd_completion.cq_interrupt_unmask_register_offset);
1203 if (cmd_completion.cq_head_db_register_offset)
1204 io_cq->cq_head_db_reg =
1205 (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1206 cmd_completion.cq_head_db_register_offset);
1208 if (cmd_completion.numa_node_register_offset)
1209 io_cq->numa_node_cfg_reg =
1210 (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1211 cmd_completion.numa_node_register_offset);
1213 pr_debug("created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth);
1218 int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid,
1219 struct ena_com_io_sq **io_sq,
1220 struct ena_com_io_cq **io_cq)
1222 if (qid >= ENA_TOTAL_NUM_QUEUES) {
1223 pr_err("Invalid queue number %d but the max is %d\n", qid,
1224 ENA_TOTAL_NUM_QUEUES);
1228 *io_sq = &ena_dev->io_sq_queues[qid];
1229 *io_cq = &ena_dev->io_cq_queues[qid];
1234 void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev)
1236 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1237 struct ena_comp_ctx *comp_ctx;
1240 if (!admin_queue->comp_ctx)
1243 for (i = 0; i < admin_queue->q_depth; i++) {
1244 comp_ctx = get_comp_ctxt(admin_queue, i, false);
1245 if (unlikely(!comp_ctx))
1248 comp_ctx->status = ENA_CMD_ABORTED;
1250 complete(&comp_ctx->wait_event);
1254 void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev)
1256 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1257 unsigned long flags;
1259 spin_lock_irqsave(&admin_queue->q_lock, flags);
1260 while (atomic_read(&admin_queue->outstanding_cmds) != 0) {
1261 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
1262 msleep(ENA_POLL_MS);
1263 spin_lock_irqsave(&admin_queue->q_lock, flags);
1265 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
1268 int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
1269 struct ena_com_io_cq *io_cq)
1271 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1272 struct ena_admin_aq_destroy_cq_cmd destroy_cmd;
1273 struct ena_admin_acq_destroy_cq_resp_desc destroy_resp;
1276 memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
1278 destroy_cmd.cq_idx = io_cq->idx;
1279 destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_CQ;
1281 ret = ena_com_execute_admin_command(admin_queue,
1282 (struct ena_admin_aq_entry *)&destroy_cmd,
1283 sizeof(destroy_cmd),
1284 (struct ena_admin_acq_entry *)&destroy_resp,
1285 sizeof(destroy_resp));
1287 if (unlikely(ret && (ret != -ENODEV)))
1288 pr_err("Failed to destroy IO CQ. error: %d\n", ret);
1293 bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev)
1295 return ena_dev->admin_queue.running_state;
1298 void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state)
1300 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1301 unsigned long flags;
1303 spin_lock_irqsave(&admin_queue->q_lock, flags);
1304 ena_dev->admin_queue.running_state = state;
1305 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
1308 void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev)
1310 u16 depth = ena_dev->aenq.q_depth;
1312 WARN(ena_dev->aenq.head != depth, "Invalid AENQ state\n");
1314 /* Init head_db to mark that all entries in the queue
1315 * are initially available
1317 writel(depth, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
1320 int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
1322 struct ena_com_admin_queue *admin_queue;
1323 struct ena_admin_set_feat_cmd cmd;
1324 struct ena_admin_set_feat_resp resp;
1325 struct ena_admin_get_feat_resp get_resp;
1328 ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG);
1330 pr_info("Can't get aenq configuration\n");
1334 if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) {
1335 pr_warn("Trying to set unsupported aenq events. supported flag: %x asked flag: %x\n",
1336 get_resp.u.aenq.supported_groups, groups_flag);
1340 memset(&cmd, 0x0, sizeof(cmd));
1341 admin_queue = &ena_dev->admin_queue;
1343 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
1344 cmd.aq_common_descriptor.flags = 0;
1345 cmd.feat_common.feature_id = ENA_ADMIN_AENQ_CONFIG;
1346 cmd.u.aenq.enabled_groups = groups_flag;
1348 ret = ena_com_execute_admin_command(admin_queue,
1349 (struct ena_admin_aq_entry *)&cmd,
1351 (struct ena_admin_acq_entry *)&resp,
1355 pr_err("Failed to config AENQ ret: %d\n", ret);
1360 int ena_com_get_dma_width(struct ena_com_dev *ena_dev)
1362 u32 caps = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
1365 if (unlikely(caps == ENA_MMIO_READ_TIMEOUT)) {
1366 pr_err("Reg read timeout occurred\n");
1370 width = (caps & ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK) >>
1371 ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT;
1373 pr_debug("ENA dma width: %d\n", width);
1375 if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) {
1376 pr_err("DMA width illegal value: %d\n", width);
1380 ena_dev->dma_addr_bits = width;
1385 int ena_com_validate_version(struct ena_com_dev *ena_dev)
1389 u32 ctrl_ver_masked;
1391 /* Make sure the ENA version and the controller version are at least
1392 * as the driver expects
1394 ver = ena_com_reg_bar_read32(ena_dev, ENA_REGS_VERSION_OFF);
1395 ctrl_ver = ena_com_reg_bar_read32(ena_dev,
1396 ENA_REGS_CONTROLLER_VERSION_OFF);
1398 if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) ||
1399 (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) {
1400 pr_err("Reg read timeout occurred\n");
1404 pr_info("ena device version: %d.%d\n",
1405 (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >>
1406 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
1407 ver & ENA_REGS_VERSION_MINOR_VERSION_MASK);
1409 if (ver < MIN_ENA_VER) {
1410 pr_err("ENA version is lower than the minimal version the driver supports\n");
1414 pr_info("ena controller version: %d.%d.%d implementation version %d\n",
1415 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) >>
1416 ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
1417 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) >>
1418 ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT,
1419 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK),
1420 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK) >>
1421 ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT);
1424 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) |
1425 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) |
1426 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK);
1428 /* Validate the ctrl version without the implementation ID */
1429 if (ctrl_ver_masked < MIN_ENA_CTRL_VER) {
1430 pr_err("ENA ctrl version is lower than the minimal ctrl version the driver supports\n");
1437 void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
1439 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1440 struct ena_com_admin_cq *cq = &admin_queue->cq;
1441 struct ena_com_admin_sq *sq = &admin_queue->sq;
1442 struct ena_com_aenq *aenq = &ena_dev->aenq;
1445 if (admin_queue->comp_ctx)
1446 devm_kfree(ena_dev->dmadev, admin_queue->comp_ctx);
1447 admin_queue->comp_ctx = NULL;
1448 size = ADMIN_SQ_SIZE(admin_queue->q_depth);
1450 dma_free_coherent(ena_dev->dmadev, size, sq->entries,
1454 size = ADMIN_CQ_SIZE(admin_queue->q_depth);
1456 dma_free_coherent(ena_dev->dmadev, size, cq->entries,
1460 size = ADMIN_AENQ_SIZE(aenq->q_depth);
1461 if (ena_dev->aenq.entries)
1462 dma_free_coherent(ena_dev->dmadev, size, aenq->entries,
1464 aenq->entries = NULL;
1467 void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
1472 mask_value = ENA_REGS_ADMIN_INTR_MASK;
1474 writel(mask_value, ena_dev->reg_bar + ENA_REGS_INTR_MASK_OFF);
1475 ena_dev->admin_queue.polling = polling;
1478 int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
1480 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1482 spin_lock_init(&mmio_read->lock);
1483 mmio_read->read_resp =
1484 dma_zalloc_coherent(ena_dev->dmadev,
1485 sizeof(*mmio_read->read_resp),
1486 &mmio_read->read_resp_dma_addr, GFP_KERNEL);
1487 if (unlikely(!mmio_read->read_resp))
1490 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
1492 mmio_read->read_resp->req_id = 0x0;
1493 mmio_read->seq_num = 0x0;
1494 mmio_read->readless_supported = true;
1499 void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported)
1501 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1503 mmio_read->readless_supported = readless_supported;
1506 void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev)
1508 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1510 writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
1511 writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
1513 dma_free_coherent(ena_dev->dmadev, sizeof(*mmio_read->read_resp),
1514 mmio_read->read_resp, mmio_read->read_resp_dma_addr);
1516 mmio_read->read_resp = NULL;
1519 void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev)
1521 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1522 u32 addr_low, addr_high;
1524 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(mmio_read->read_resp_dma_addr);
1525 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(mmio_read->read_resp_dma_addr);
1527 writel(addr_low, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
1528 writel(addr_high, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
1531 int ena_com_admin_init(struct ena_com_dev *ena_dev,
1532 struct ena_aenq_handlers *aenq_handlers,
1535 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1536 u32 aq_caps, acq_caps, dev_sts, addr_low, addr_high;
1539 dev_sts = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
1541 if (unlikely(dev_sts == ENA_MMIO_READ_TIMEOUT)) {
1542 pr_err("Reg read timeout occurred\n");
1546 if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) {
1547 pr_err("Device isn't ready, abort com init\n");
1551 admin_queue->q_depth = ENA_ADMIN_QUEUE_DEPTH;
1553 admin_queue->q_dmadev = ena_dev->dmadev;
1554 admin_queue->polling = false;
1555 admin_queue->curr_cmd_id = 0;
1557 atomic_set(&admin_queue->outstanding_cmds, 0);
1560 spin_lock_init(&admin_queue->q_lock);
1562 ret = ena_com_init_comp_ctxt(admin_queue);
1566 ret = ena_com_admin_init_sq(admin_queue);
1570 ret = ena_com_admin_init_cq(admin_queue);
1574 admin_queue->sq.db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1575 ENA_REGS_AQ_DB_OFF);
1577 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->sq.dma_addr);
1578 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->sq.dma_addr);
1580 writel(addr_low, ena_dev->reg_bar + ENA_REGS_AQ_BASE_LO_OFF);
1581 writel(addr_high, ena_dev->reg_bar + ENA_REGS_AQ_BASE_HI_OFF);
1583 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->cq.dma_addr);
1584 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->cq.dma_addr);
1586 writel(addr_low, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_LO_OFF);
1587 writel(addr_high, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_HI_OFF);
1590 aq_caps |= admin_queue->q_depth & ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK;
1591 aq_caps |= (sizeof(struct ena_admin_aq_entry) <<
1592 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT) &
1593 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK;
1596 acq_caps |= admin_queue->q_depth & ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK;
1597 acq_caps |= (sizeof(struct ena_admin_acq_entry) <<
1598 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT) &
1599 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK;
1601 writel(aq_caps, ena_dev->reg_bar + ENA_REGS_AQ_CAPS_OFF);
1602 writel(acq_caps, ena_dev->reg_bar + ENA_REGS_ACQ_CAPS_OFF);
1603 ret = ena_com_admin_init_aenq(ena_dev, aenq_handlers);
1607 admin_queue->running_state = true;
1611 ena_com_admin_destroy(ena_dev);
1616 int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
1617 struct ena_com_create_io_ctx *ctx)
1619 struct ena_com_io_sq *io_sq;
1620 struct ena_com_io_cq *io_cq;
1623 if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) {
1624 pr_err("Qid (%d) is bigger than max num of queues (%d)\n",
1625 ctx->qid, ENA_TOTAL_NUM_QUEUES);
1629 io_sq = &ena_dev->io_sq_queues[ctx->qid];
1630 io_cq = &ena_dev->io_cq_queues[ctx->qid];
1632 memset(io_sq, 0x0, sizeof(*io_sq));
1633 memset(io_cq, 0x0, sizeof(*io_cq));
1636 io_cq->q_depth = ctx->queue_size;
1637 io_cq->direction = ctx->direction;
1638 io_cq->qid = ctx->qid;
1640 io_cq->msix_vector = ctx->msix_vector;
1642 io_sq->q_depth = ctx->queue_size;
1643 io_sq->direction = ctx->direction;
1644 io_sq->qid = ctx->qid;
1646 io_sq->mem_queue_type = ctx->mem_queue_type;
1648 if (ctx->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
1649 /* header length is limited to 8 bits */
1650 io_sq->tx_max_header_size =
1651 min_t(u32, ena_dev->tx_max_header_size, SZ_256);
1653 ret = ena_com_init_io_sq(ena_dev, ctx, io_sq);
1656 ret = ena_com_init_io_cq(ena_dev, ctx, io_cq);
1660 ret = ena_com_create_io_cq(ena_dev, io_cq);
1664 ret = ena_com_create_io_sq(ena_dev, io_sq, io_cq->idx);
1671 ena_com_destroy_io_cq(ena_dev, io_cq);
1673 ena_com_io_queue_free(ena_dev, io_sq, io_cq);
1677 void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid)
1679 struct ena_com_io_sq *io_sq;
1680 struct ena_com_io_cq *io_cq;
1682 if (qid >= ENA_TOTAL_NUM_QUEUES) {
1683 pr_err("Qid (%d) is bigger than max num of queues (%d)\n", qid,
1684 ENA_TOTAL_NUM_QUEUES);
1688 io_sq = &ena_dev->io_sq_queues[qid];
1689 io_cq = &ena_dev->io_cq_queues[qid];
1691 ena_com_destroy_io_sq(ena_dev, io_sq);
1692 ena_com_destroy_io_cq(ena_dev, io_cq);
1694 ena_com_io_queue_free(ena_dev, io_sq, io_cq);
1697 int ena_com_get_link_params(struct ena_com_dev *ena_dev,
1698 struct ena_admin_get_feat_resp *resp)
1700 return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG);
1703 int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
1704 struct ena_com_dev_get_features_ctx *get_feat_ctx)
1706 struct ena_admin_get_feat_resp get_resp;
1709 rc = ena_com_get_feature(ena_dev, &get_resp,
1710 ENA_ADMIN_DEVICE_ATTRIBUTES);
1714 memcpy(&get_feat_ctx->dev_attr, &get_resp.u.dev_attr,
1715 sizeof(get_resp.u.dev_attr));
1716 ena_dev->supported_features = get_resp.u.dev_attr.supported_features;
1718 rc = ena_com_get_feature(ena_dev, &get_resp,
1719 ENA_ADMIN_MAX_QUEUES_NUM);
1723 memcpy(&get_feat_ctx->max_queues, &get_resp.u.max_queue,
1724 sizeof(get_resp.u.max_queue));
1725 ena_dev->tx_max_header_size = get_resp.u.max_queue.max_header_size;
1727 rc = ena_com_get_feature(ena_dev, &get_resp,
1728 ENA_ADMIN_AENQ_CONFIG);
1732 memcpy(&get_feat_ctx->aenq, &get_resp.u.aenq,
1733 sizeof(get_resp.u.aenq));
1735 rc = ena_com_get_feature(ena_dev, &get_resp,
1736 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG);
1740 memcpy(&get_feat_ctx->offload, &get_resp.u.offload,
1741 sizeof(get_resp.u.offload));
1743 /* Driver hints isn't mandatory admin command. So in case the
1744 * command isn't supported set driver hints to 0
1746 rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_HW_HINTS);
1749 memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints,
1750 sizeof(get_resp.u.hw_hints));
1751 else if (rc == -EOPNOTSUPP)
1752 memset(&get_feat_ctx->hw_hints, 0x0,
1753 sizeof(get_feat_ctx->hw_hints));
1760 void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev)
1762 ena_com_handle_admin_completion(&ena_dev->admin_queue);
1765 /* ena_handle_specific_aenq_event:
1766 * return the handler that is relevant to the specific event group
1768 static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *dev,
1771 struct ena_aenq_handlers *aenq_handlers = dev->aenq.aenq_handlers;
1773 if ((group < ENA_MAX_HANDLERS) && aenq_handlers->handlers[group])
1774 return aenq_handlers->handlers[group];
1776 return aenq_handlers->unimplemented_handler;
1779 /* ena_aenq_intr_handler:
1780 * handles the aenq incoming events.
1781 * pop events from the queue and apply the specific handler
1783 void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
1785 struct ena_admin_aenq_entry *aenq_e;
1786 struct ena_admin_aenq_common_desc *aenq_common;
1787 struct ena_com_aenq *aenq = &dev->aenq;
1788 ena_aenq_handler handler_cb;
1789 u16 masked_head, processed = 0;
1792 masked_head = aenq->head & (aenq->q_depth - 1);
1793 phase = aenq->phase;
1794 aenq_e = &aenq->entries[masked_head]; /* Get first entry */
1795 aenq_common = &aenq_e->aenq_common_desc;
1797 /* Go over all the events */
1798 while ((aenq_common->flags & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) ==
1800 pr_debug("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n",
1801 aenq_common->group, aenq_common->syndrom,
1802 (u64)aenq_common->timestamp_low +
1803 ((u64)aenq_common->timestamp_high << 32));
1805 /* Handle specific event*/
1806 handler_cb = ena_com_get_specific_aenq_cb(dev,
1807 aenq_common->group);
1808 handler_cb(data, aenq_e); /* call the actual event handler*/
1810 /* Get next event entry */
1814 if (unlikely(masked_head == aenq->q_depth)) {
1818 aenq_e = &aenq->entries[masked_head];
1819 aenq_common = &aenq_e->aenq_common_desc;
1822 aenq->head += processed;
1823 aenq->phase = phase;
1825 /* Don't update aenq doorbell if there weren't any processed events */
1829 /* write the aenq doorbell after all AENQ descriptors were read */
1831 writel_relaxed((u32)aenq->head,
1832 dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
1836 int ena_com_dev_reset(struct ena_com_dev *ena_dev,
1837 enum ena_regs_reset_reason_types reset_reason)
1839 u32 stat, timeout, cap, reset_val;
1842 stat = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
1843 cap = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
1845 if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) ||
1846 (cap == ENA_MMIO_READ_TIMEOUT))) {
1847 pr_err("Reg read32 timeout occurred\n");
1851 if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) {
1852 pr_err("Device isn't ready, can't reset device\n");
1856 timeout = (cap & ENA_REGS_CAPS_RESET_TIMEOUT_MASK) >>
1857 ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT;
1859 pr_err("Invalid timeout value\n");
1864 reset_val = ENA_REGS_DEV_CTL_DEV_RESET_MASK;
1865 reset_val |= (reset_reason << ENA_REGS_DEV_CTL_RESET_REASON_SHIFT) &
1866 ENA_REGS_DEV_CTL_RESET_REASON_MASK;
1867 writel(reset_val, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
1869 /* Write again the MMIO read request address */
1870 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
1872 rc = wait_for_reset_state(ena_dev, timeout,
1873 ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK);
1875 pr_err("Reset indication didn't turn on\n");
1880 writel(0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
1881 rc = wait_for_reset_state(ena_dev, timeout, 0);
1883 pr_err("Reset indication didn't turn off\n");
1887 timeout = (cap & ENA_REGS_CAPS_ADMIN_CMD_TO_MASK) >>
1888 ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT;
1890 /* the resolution of timeout reg is 100ms */
1891 ena_dev->admin_queue.completion_timeout = timeout * 100000;
1893 ena_dev->admin_queue.completion_timeout = ADMIN_CMD_TIMEOUT_US;
1898 static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
1899 struct ena_com_stats_ctx *ctx,
1900 enum ena_admin_get_stats_type type)
1902 struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx->get_cmd;
1903 struct ena_admin_acq_get_stats_resp *get_resp = &ctx->get_resp;
1904 struct ena_com_admin_queue *admin_queue;
1907 admin_queue = &ena_dev->admin_queue;
1909 get_cmd->aq_common_descriptor.opcode = ENA_ADMIN_GET_STATS;
1910 get_cmd->aq_common_descriptor.flags = 0;
1911 get_cmd->type = type;
1913 ret = ena_com_execute_admin_command(admin_queue,
1914 (struct ena_admin_aq_entry *)get_cmd,
1916 (struct ena_admin_acq_entry *)get_resp,
1920 pr_err("Failed to get stats. error: %d\n", ret);
1925 int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
1926 struct ena_admin_basic_stats *stats)
1928 struct ena_com_stats_ctx ctx;
1931 memset(&ctx, 0x0, sizeof(ctx));
1932 ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_BASIC);
1933 if (likely(ret == 0))
1934 memcpy(stats, &ctx.get_resp.basic_stats,
1935 sizeof(ctx.get_resp.basic_stats));
1940 int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu)
1942 struct ena_com_admin_queue *admin_queue;
1943 struct ena_admin_set_feat_cmd cmd;
1944 struct ena_admin_set_feat_resp resp;
1947 if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) {
1948 pr_debug("Feature %d isn't supported\n", ENA_ADMIN_MTU);
1952 memset(&cmd, 0x0, sizeof(cmd));
1953 admin_queue = &ena_dev->admin_queue;
1955 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
1956 cmd.aq_common_descriptor.flags = 0;
1957 cmd.feat_common.feature_id = ENA_ADMIN_MTU;
1958 cmd.u.mtu.mtu = mtu;
1960 ret = ena_com_execute_admin_command(admin_queue,
1961 (struct ena_admin_aq_entry *)&cmd,
1963 (struct ena_admin_acq_entry *)&resp,
1967 pr_err("Failed to set mtu %d. error: %d\n", mtu, ret);
1972 int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
1973 struct ena_admin_feature_offload_desc *offload)
1976 struct ena_admin_get_feat_resp resp;
1978 ret = ena_com_get_feature(ena_dev, &resp,
1979 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG);
1980 if (unlikely(ret)) {
1981 pr_err("Failed to get offload capabilities %d\n", ret);
1985 memcpy(offload, &resp.u.offload, sizeof(resp.u.offload));
1990 int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
1992 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1993 struct ena_rss *rss = &ena_dev->rss;
1994 struct ena_admin_set_feat_cmd cmd;
1995 struct ena_admin_set_feat_resp resp;
1996 struct ena_admin_get_feat_resp get_resp;
1999 if (!ena_com_check_supported_feature_id(ena_dev,
2000 ENA_ADMIN_RSS_HASH_FUNCTION)) {
2001 pr_debug("Feature %d isn't supported\n",
2002 ENA_ADMIN_RSS_HASH_FUNCTION);
2006 /* Validate hash function is supported */
2007 ret = ena_com_get_feature(ena_dev, &get_resp,
2008 ENA_ADMIN_RSS_HASH_FUNCTION);
2012 if (get_resp.u.flow_hash_func.supported_func & (1 << rss->hash_func)) {
2013 pr_err("Func hash %d isn't supported by device, abort\n",
2018 memset(&cmd, 0x0, sizeof(cmd));
2020 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2021 cmd.aq_common_descriptor.flags =
2022 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2023 cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_FUNCTION;
2024 cmd.u.flow_hash_func.init_val = rss->hash_init_val;
2025 cmd.u.flow_hash_func.selected_func = 1 << rss->hash_func;
2027 ret = ena_com_mem_addr_set(ena_dev,
2028 &cmd.control_buffer.address,
2029 rss->hash_key_dma_addr);
2030 if (unlikely(ret)) {
2031 pr_err("memory address set failed\n");
2035 cmd.control_buffer.length = sizeof(*rss->hash_key);
2037 ret = ena_com_execute_admin_command(admin_queue,
2038 (struct ena_admin_aq_entry *)&cmd,
2040 (struct ena_admin_acq_entry *)&resp,
2042 if (unlikely(ret)) {
2043 pr_err("Failed to set hash function %d. error: %d\n",
2044 rss->hash_func, ret);
2051 int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
2052 enum ena_admin_hash_functions func,
2053 const u8 *key, u16 key_len, u32 init_val)
2055 struct ena_rss *rss = &ena_dev->rss;
2056 struct ena_admin_get_feat_resp get_resp;
2057 struct ena_admin_feature_rss_flow_hash_control *hash_key =
2061 /* Make sure size is a mult of DWs */
2062 if (unlikely(key_len & 0x3))
2065 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2066 ENA_ADMIN_RSS_HASH_FUNCTION,
2067 rss->hash_key_dma_addr,
2068 sizeof(*rss->hash_key));
2072 if (!((1 << func) & get_resp.u.flow_hash_func.supported_func)) {
2073 pr_err("Flow hash function %d isn't supported\n", func);
2078 case ENA_ADMIN_TOEPLITZ:
2079 if (key_len > sizeof(hash_key->key)) {
2080 pr_err("key len (%hu) is bigger than the max supported (%zu)\n",
2081 key_len, sizeof(hash_key->key));
2085 memcpy(hash_key->key, key, key_len);
2086 rss->hash_init_val = init_val;
2087 hash_key->keys_num = key_len >> 2;
2089 case ENA_ADMIN_CRC32:
2090 rss->hash_init_val = init_val;
2093 pr_err("Invalid hash function (%d)\n", func);
2097 rc = ena_com_set_hash_function(ena_dev);
2099 /* Restore the old function */
2101 ena_com_get_hash_function(ena_dev, NULL, NULL);
2106 int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
2107 enum ena_admin_hash_functions *func,
2110 struct ena_rss *rss = &ena_dev->rss;
2111 struct ena_admin_get_feat_resp get_resp;
2112 struct ena_admin_feature_rss_flow_hash_control *hash_key =
2116 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2117 ENA_ADMIN_RSS_HASH_FUNCTION,
2118 rss->hash_key_dma_addr,
2119 sizeof(*rss->hash_key));
2123 rss->hash_func = get_resp.u.flow_hash_func.selected_func;
2125 *func = rss->hash_func;
2128 memcpy(key, hash_key->key, (size_t)(hash_key->keys_num) << 2);
2133 int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev,
2134 enum ena_admin_flow_hash_proto proto,
2137 struct ena_rss *rss = &ena_dev->rss;
2138 struct ena_admin_get_feat_resp get_resp;
2141 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2142 ENA_ADMIN_RSS_HASH_INPUT,
2143 rss->hash_ctrl_dma_addr,
2144 sizeof(*rss->hash_ctrl));
2149 *fields = rss->hash_ctrl->selected_fields[proto].fields;
2154 int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
2156 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2157 struct ena_rss *rss = &ena_dev->rss;
2158 struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
2159 struct ena_admin_set_feat_cmd cmd;
2160 struct ena_admin_set_feat_resp resp;
2163 if (!ena_com_check_supported_feature_id(ena_dev,
2164 ENA_ADMIN_RSS_HASH_INPUT)) {
2165 pr_debug("Feature %d isn't supported\n",
2166 ENA_ADMIN_RSS_HASH_INPUT);
2170 memset(&cmd, 0x0, sizeof(cmd));
2172 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2173 cmd.aq_common_descriptor.flags =
2174 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2175 cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_INPUT;
2176 cmd.u.flow_hash_input.enabled_input_sort =
2177 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK |
2178 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK;
2180 ret = ena_com_mem_addr_set(ena_dev,
2181 &cmd.control_buffer.address,
2182 rss->hash_ctrl_dma_addr);
2183 if (unlikely(ret)) {
2184 pr_err("memory address set failed\n");
2187 cmd.control_buffer.length = sizeof(*hash_ctrl);
2189 ret = ena_com_execute_admin_command(admin_queue,
2190 (struct ena_admin_aq_entry *)&cmd,
2192 (struct ena_admin_acq_entry *)&resp,
2195 pr_err("Failed to set hash input. error: %d\n", ret);
2200 int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
2202 struct ena_rss *rss = &ena_dev->rss;
2203 struct ena_admin_feature_rss_hash_control *hash_ctrl =
2205 u16 available_fields = 0;
2208 /* Get the supported hash input */
2209 rc = ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2213 hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP4].fields =
2214 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2215 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2217 hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP4].fields =
2218 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2219 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2221 hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP6].fields =
2222 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2223 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2225 hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP6].fields =
2226 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2227 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2229 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4].fields =
2230 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2232 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP6].fields =
2233 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2235 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields =
2236 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2238 hash_ctrl->selected_fields[ENA_ADMIN_RSS_NOT_IP].fields =
2239 ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA;
2241 for (i = 0; i < ENA_ADMIN_RSS_PROTO_NUM; i++) {
2242 available_fields = hash_ctrl->selected_fields[i].fields &
2243 hash_ctrl->supported_fields[i].fields;
2244 if (available_fields != hash_ctrl->selected_fields[i].fields) {
2245 pr_err("hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
2246 i, hash_ctrl->supported_fields[i].fields,
2247 hash_ctrl->selected_fields[i].fields);
2252 rc = ena_com_set_hash_ctrl(ena_dev);
2254 /* In case of failure, restore the old hash ctrl */
2256 ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2261 int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
2262 enum ena_admin_flow_hash_proto proto,
2265 struct ena_rss *rss = &ena_dev->rss;
2266 struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
2267 u16 supported_fields;
2270 if (proto >= ENA_ADMIN_RSS_PROTO_NUM) {
2271 pr_err("Invalid proto num (%u)\n", proto);
2275 /* Get the ctrl table */
2276 rc = ena_com_get_hash_ctrl(ena_dev, proto, NULL);
2280 /* Make sure all the fields are supported */
2281 supported_fields = hash_ctrl->supported_fields[proto].fields;
2282 if ((hash_fields & supported_fields) != hash_fields) {
2283 pr_err("proto %d doesn't support the required fields %x. supports only: %x\n",
2284 proto, hash_fields, supported_fields);
2287 hash_ctrl->selected_fields[proto].fields = hash_fields;
2289 rc = ena_com_set_hash_ctrl(ena_dev);
2291 /* In case of failure, restore the old hash ctrl */
2293 ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2298 int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev,
2299 u16 entry_idx, u16 entry_value)
2301 struct ena_rss *rss = &ena_dev->rss;
2303 if (unlikely(entry_idx >= (1 << rss->tbl_log_size)))
2306 if (unlikely((entry_value > ENA_TOTAL_NUM_QUEUES)))
2309 rss->host_rss_ind_tbl[entry_idx] = entry_value;
2314 int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
2316 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2317 struct ena_rss *rss = &ena_dev->rss;
2318 struct ena_admin_set_feat_cmd cmd;
2319 struct ena_admin_set_feat_resp resp;
2322 if (!ena_com_check_supported_feature_id(
2323 ena_dev, ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG)) {
2324 pr_debug("Feature %d isn't supported\n",
2325 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
2329 ret = ena_com_ind_tbl_convert_to_device(ena_dev);
2331 pr_err("Failed to convert host indirection table to device table\n");
2335 memset(&cmd, 0x0, sizeof(cmd));
2337 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2338 cmd.aq_common_descriptor.flags =
2339 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2340 cmd.feat_common.feature_id = ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG;
2341 cmd.u.ind_table.size = rss->tbl_log_size;
2342 cmd.u.ind_table.inline_index = 0xFFFFFFFF;
2344 ret = ena_com_mem_addr_set(ena_dev,
2345 &cmd.control_buffer.address,
2346 rss->rss_ind_tbl_dma_addr);
2347 if (unlikely(ret)) {
2348 pr_err("memory address set failed\n");
2352 cmd.control_buffer.length = (1ULL << rss->tbl_log_size) *
2353 sizeof(struct ena_admin_rss_ind_table_entry);
2355 ret = ena_com_execute_admin_command(admin_queue,
2356 (struct ena_admin_aq_entry *)&cmd,
2358 (struct ena_admin_acq_entry *)&resp,
2362 pr_err("Failed to set indirect table. error: %d\n", ret);
2367 int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl)
2369 struct ena_rss *rss = &ena_dev->rss;
2370 struct ena_admin_get_feat_resp get_resp;
2374 tbl_size = (1ULL << rss->tbl_log_size) *
2375 sizeof(struct ena_admin_rss_ind_table_entry);
2377 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2378 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG,
2379 rss->rss_ind_tbl_dma_addr,
2387 rc = ena_com_ind_tbl_convert_from_device(ena_dev);
2391 for (i = 0; i < (1 << rss->tbl_log_size); i++)
2392 ind_tbl[i] = rss->host_rss_ind_tbl[i];
2397 int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size)
2401 memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
2403 rc = ena_com_indirect_table_allocate(ena_dev, indr_tbl_log_size);
2407 rc = ena_com_hash_key_allocate(ena_dev);
2411 rc = ena_com_hash_ctrl_init(ena_dev);
2418 ena_com_hash_key_destroy(ena_dev);
2420 ena_com_indirect_table_destroy(ena_dev);
2426 void ena_com_rss_destroy(struct ena_com_dev *ena_dev)
2428 ena_com_indirect_table_destroy(ena_dev);
2429 ena_com_hash_key_destroy(ena_dev);
2430 ena_com_hash_ctrl_destroy(ena_dev);
2432 memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
2435 int ena_com_allocate_host_info(struct ena_com_dev *ena_dev)
2437 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2439 host_attr->host_info =
2440 dma_zalloc_coherent(ena_dev->dmadev, SZ_4K,
2441 &host_attr->host_info_dma_addr, GFP_KERNEL);
2442 if (unlikely(!host_attr->host_info))
2448 int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
2449 u32 debug_area_size)
2451 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2453 host_attr->debug_area_virt_addr =
2454 dma_zalloc_coherent(ena_dev->dmadev, debug_area_size,
2455 &host_attr->debug_area_dma_addr, GFP_KERNEL);
2456 if (unlikely(!host_attr->debug_area_virt_addr)) {
2457 host_attr->debug_area_size = 0;
2461 host_attr->debug_area_size = debug_area_size;
2466 void ena_com_delete_host_info(struct ena_com_dev *ena_dev)
2468 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2470 if (host_attr->host_info) {
2471 dma_free_coherent(ena_dev->dmadev, SZ_4K, host_attr->host_info,
2472 host_attr->host_info_dma_addr);
2473 host_attr->host_info = NULL;
2477 void ena_com_delete_debug_area(struct ena_com_dev *ena_dev)
2479 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2481 if (host_attr->debug_area_virt_addr) {
2482 dma_free_coherent(ena_dev->dmadev, host_attr->debug_area_size,
2483 host_attr->debug_area_virt_addr,
2484 host_attr->debug_area_dma_addr);
2485 host_attr->debug_area_virt_addr = NULL;
2489 int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
2491 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2492 struct ena_com_admin_queue *admin_queue;
2493 struct ena_admin_set_feat_cmd cmd;
2494 struct ena_admin_set_feat_resp resp;
2498 /* Host attribute config is called before ena_com_get_dev_attr_feat
2499 * so ena_com can't check if the feature is supported.
2502 memset(&cmd, 0x0, sizeof(cmd));
2503 admin_queue = &ena_dev->admin_queue;
2505 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2506 cmd.feat_common.feature_id = ENA_ADMIN_HOST_ATTR_CONFIG;
2508 ret = ena_com_mem_addr_set(ena_dev,
2509 &cmd.u.host_attr.debug_ba,
2510 host_attr->debug_area_dma_addr);
2511 if (unlikely(ret)) {
2512 pr_err("memory address set failed\n");
2516 ret = ena_com_mem_addr_set(ena_dev,
2517 &cmd.u.host_attr.os_info_ba,
2518 host_attr->host_info_dma_addr);
2519 if (unlikely(ret)) {
2520 pr_err("memory address set failed\n");
2524 cmd.u.host_attr.debug_area_size = host_attr->debug_area_size;
2526 ret = ena_com_execute_admin_command(admin_queue,
2527 (struct ena_admin_aq_entry *)&cmd,
2529 (struct ena_admin_acq_entry *)&resp,
2533 pr_err("Failed to set host attributes: %d\n", ret);
2538 /* Interrupt moderation */
2539 bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev)
2541 return ena_com_check_supported_feature_id(ena_dev,
2542 ENA_ADMIN_INTERRUPT_MODERATION);
2545 int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
2546 u32 tx_coalesce_usecs)
2548 if (!ena_dev->intr_delay_resolution) {
2549 pr_err("Illegal interrupt delay granularity value\n");
2553 ena_dev->intr_moder_tx_interval = tx_coalesce_usecs /
2554 ena_dev->intr_delay_resolution;
2559 int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
2560 u32 rx_coalesce_usecs)
2562 if (!ena_dev->intr_delay_resolution) {
2563 pr_err("Illegal interrupt delay granularity value\n");
2567 /* We use LOWEST entry of moderation table for storing
2568 * nonadaptive interrupt coalescing values
2570 ena_dev->intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval =
2571 rx_coalesce_usecs / ena_dev->intr_delay_resolution;
2576 void ena_com_destroy_interrupt_moderation(struct ena_com_dev *ena_dev)
2578 if (ena_dev->intr_moder_tbl)
2579 devm_kfree(ena_dev->dmadev, ena_dev->intr_moder_tbl);
2580 ena_dev->intr_moder_tbl = NULL;
2583 int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
2585 struct ena_admin_get_feat_resp get_resp;
2586 u16 delay_resolution;
2589 rc = ena_com_get_feature(ena_dev, &get_resp,
2590 ENA_ADMIN_INTERRUPT_MODERATION);
2593 if (rc == -EOPNOTSUPP) {
2594 pr_debug("Feature %d isn't supported\n",
2595 ENA_ADMIN_INTERRUPT_MODERATION);
2598 pr_err("Failed to get interrupt moderation admin cmd. rc: %d\n",
2602 /* no moderation supported, disable adaptive support */
2603 ena_com_disable_adaptive_moderation(ena_dev);
2607 rc = ena_com_init_interrupt_moderation_table(ena_dev);
2611 /* if moderation is supported by device we set adaptive moderation */
2612 delay_resolution = get_resp.u.intr_moderation.intr_delay_resolution;
2613 ena_com_update_intr_delay_resolution(ena_dev, delay_resolution);
2614 ena_com_enable_adaptive_moderation(ena_dev);
2618 ena_com_destroy_interrupt_moderation(ena_dev);
2622 void ena_com_config_default_interrupt_moderation_table(struct ena_com_dev *ena_dev)
2624 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
2626 if (!intr_moder_tbl)
2629 intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval =
2630 ENA_INTR_LOWEST_USECS;
2631 intr_moder_tbl[ENA_INTR_MODER_LOWEST].pkts_per_interval =
2632 ENA_INTR_LOWEST_PKTS;
2633 intr_moder_tbl[ENA_INTR_MODER_LOWEST].bytes_per_interval =
2634 ENA_INTR_LOWEST_BYTES;
2636 intr_moder_tbl[ENA_INTR_MODER_LOW].intr_moder_interval =
2638 intr_moder_tbl[ENA_INTR_MODER_LOW].pkts_per_interval =
2640 intr_moder_tbl[ENA_INTR_MODER_LOW].bytes_per_interval =
2643 intr_moder_tbl[ENA_INTR_MODER_MID].intr_moder_interval =
2645 intr_moder_tbl[ENA_INTR_MODER_MID].pkts_per_interval =
2647 intr_moder_tbl[ENA_INTR_MODER_MID].bytes_per_interval =
2650 intr_moder_tbl[ENA_INTR_MODER_HIGH].intr_moder_interval =
2651 ENA_INTR_HIGH_USECS;
2652 intr_moder_tbl[ENA_INTR_MODER_HIGH].pkts_per_interval =
2654 intr_moder_tbl[ENA_INTR_MODER_HIGH].bytes_per_interval =
2655 ENA_INTR_HIGH_BYTES;
2657 intr_moder_tbl[ENA_INTR_MODER_HIGHEST].intr_moder_interval =
2658 ENA_INTR_HIGHEST_USECS;
2659 intr_moder_tbl[ENA_INTR_MODER_HIGHEST].pkts_per_interval =
2660 ENA_INTR_HIGHEST_PKTS;
2661 intr_moder_tbl[ENA_INTR_MODER_HIGHEST].bytes_per_interval =
2662 ENA_INTR_HIGHEST_BYTES;
2665 unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev)
2667 return ena_dev->intr_moder_tx_interval;
2670 unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev)
2672 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
2675 return intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval;
2680 void ena_com_init_intr_moderation_entry(struct ena_com_dev *ena_dev,
2681 enum ena_intr_moder_level level,
2682 struct ena_intr_moder_entry *entry)
2684 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
2686 if (level >= ENA_INTR_MAX_NUM_OF_LEVELS)
2689 intr_moder_tbl[level].intr_moder_interval = entry->intr_moder_interval;
2690 if (ena_dev->intr_delay_resolution)
2691 intr_moder_tbl[level].intr_moder_interval /=
2692 ena_dev->intr_delay_resolution;
2693 intr_moder_tbl[level].pkts_per_interval = entry->pkts_per_interval;
2695 /* use hardcoded value until ethtool supports bytecount parameter */
2696 if (entry->bytes_per_interval != ENA_INTR_BYTE_COUNT_NOT_SUPPORTED)
2697 intr_moder_tbl[level].bytes_per_interval = entry->bytes_per_interval;
2700 void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev,
2701 enum ena_intr_moder_level level,
2702 struct ena_intr_moder_entry *entry)
2704 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
2706 if (level >= ENA_INTR_MAX_NUM_OF_LEVELS)
2709 entry->intr_moder_interval = intr_moder_tbl[level].intr_moder_interval;
2710 if (ena_dev->intr_delay_resolution)
2711 entry->intr_moder_interval *= ena_dev->intr_delay_resolution;
2712 entry->pkts_per_interval =
2713 intr_moder_tbl[level].pkts_per_interval;
2714 entry->bytes_per_interval = intr_moder_tbl[level].bytes_per_interval;