2 * Copyright 2015 Amazon.com, Inc. or its affiliates.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 /*****************************************************************************/
36 /*****************************************************************************/
38 /* Timeout in micro-sec */
39 #define ADMIN_CMD_TIMEOUT_US (3000000)
41 #define ENA_ASYNC_QUEUE_DEPTH 16
42 #define ENA_ADMIN_QUEUE_DEPTH 32
45 #define ENA_CTRL_MAJOR 0
46 #define ENA_CTRL_MINOR 0
47 #define ENA_CTRL_SUB_MINOR 1
49 #define MIN_ENA_CTRL_VER \
50 (((ENA_CTRL_MAJOR) << \
51 (ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT)) | \
52 ((ENA_CTRL_MINOR) << \
53 (ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT)) | \
56 #define ENA_DMA_ADDR_TO_UINT32_LOW(x) ((u32)((u64)(x)))
57 #define ENA_DMA_ADDR_TO_UINT32_HIGH(x) ((u32)(((u64)(x)) >> 32))
59 #define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF
61 #define ENA_COM_BOUNCE_BUFFER_CNTRL_CNT 4
63 #define ENA_REGS_ADMIN_INTR_MASK 1
67 /*****************************************************************************/
68 /*****************************************************************************/
69 /*****************************************************************************/
74 /* Abort - canceled by the driver */
79 struct completion wait_event;
80 struct ena_admin_acq_entry *user_cqe;
82 enum ena_cmd_status status;
83 /* status from the device */
89 struct ena_com_stats_ctx {
90 struct ena_admin_aq_get_stats_cmd get_cmd;
91 struct ena_admin_acq_get_stats_resp get_resp;
94 static int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
95 struct ena_common_mem_addr *ena_addr,
98 if ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr) {
99 pr_err("dma address has more bits that the device supports\n");
103 ena_addr->mem_addr_low = lower_32_bits(addr);
104 ena_addr->mem_addr_high = (u16)upper_32_bits(addr);
109 static int ena_com_admin_init_sq(struct ena_com_admin_queue *queue)
111 struct ena_com_admin_sq *sq = &queue->sq;
112 u16 size = ADMIN_SQ_SIZE(queue->q_depth);
114 sq->entries = dma_alloc_coherent(queue->q_dmadev, size, &sq->dma_addr,
118 pr_err("memory allocation failed\n");
131 static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue)
133 struct ena_com_admin_cq *cq = &queue->cq;
134 u16 size = ADMIN_CQ_SIZE(queue->q_depth);
136 cq->entries = dma_alloc_coherent(queue->q_dmadev, size, &cq->dma_addr,
140 pr_err("memory allocation failed\n");
150 static int ena_com_admin_init_aenq(struct ena_com_dev *dev,
151 struct ena_aenq_handlers *aenq_handlers)
153 struct ena_com_aenq *aenq = &dev->aenq;
154 u32 addr_low, addr_high, aenq_caps;
157 dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
158 size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH);
159 aenq->entries = dma_alloc_coherent(dev->dmadev, size, &aenq->dma_addr,
162 if (!aenq->entries) {
163 pr_err("memory allocation failed\n");
167 aenq->head = aenq->q_depth;
170 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(aenq->dma_addr);
171 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(aenq->dma_addr);
173 writel(addr_low, dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF);
174 writel(addr_high, dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF);
177 aenq_caps |= dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
178 aenq_caps |= (sizeof(struct ena_admin_aenq_entry)
179 << ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
180 ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
181 writel(aenq_caps, dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF);
183 if (unlikely(!aenq_handlers)) {
184 pr_err("aenq handlers pointer is NULL\n");
188 aenq->aenq_handlers = aenq_handlers;
193 static void comp_ctxt_release(struct ena_com_admin_queue *queue,
194 struct ena_comp_ctx *comp_ctx)
196 comp_ctx->occupied = false;
197 atomic_dec(&queue->outstanding_cmds);
200 static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue,
201 u16 command_id, bool capture)
203 if (unlikely(!queue->comp_ctx)) {
204 pr_err("Completion context is NULL\n");
208 if (unlikely(command_id >= queue->q_depth)) {
209 pr_err("command id is larger than the queue size. cmd_id: %u queue size %d\n",
210 command_id, queue->q_depth);
214 if (unlikely(queue->comp_ctx[command_id].occupied && capture)) {
215 pr_err("Completion context is occupied\n");
220 atomic_inc(&queue->outstanding_cmds);
221 queue->comp_ctx[command_id].occupied = true;
224 return &queue->comp_ctx[command_id];
227 static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
228 struct ena_admin_aq_entry *cmd,
229 size_t cmd_size_in_bytes,
230 struct ena_admin_acq_entry *comp,
231 size_t comp_size_in_bytes)
233 struct ena_comp_ctx *comp_ctx;
234 u16 tail_masked, cmd_id;
238 queue_size_mask = admin_queue->q_depth - 1;
240 tail_masked = admin_queue->sq.tail & queue_size_mask;
242 /* In case of queue FULL */
243 cnt = (u16)atomic_read(&admin_queue->outstanding_cmds);
244 if (cnt >= admin_queue->q_depth) {
245 pr_debug("admin queue is full.\n");
246 admin_queue->stats.out_of_space++;
247 return ERR_PTR(-ENOSPC);
250 cmd_id = admin_queue->curr_cmd_id;
252 cmd->aq_common_descriptor.flags |= admin_queue->sq.phase &
253 ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK;
255 cmd->aq_common_descriptor.command_id |= cmd_id &
256 ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
258 comp_ctx = get_comp_ctxt(admin_queue, cmd_id, true);
259 if (unlikely(!comp_ctx))
260 return ERR_PTR(-EINVAL);
262 comp_ctx->status = ENA_CMD_SUBMITTED;
263 comp_ctx->comp_size = (u32)comp_size_in_bytes;
264 comp_ctx->user_cqe = comp;
265 comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode;
267 reinit_completion(&comp_ctx->wait_event);
269 memcpy(&admin_queue->sq.entries[tail_masked], cmd, cmd_size_in_bytes);
271 admin_queue->curr_cmd_id = (admin_queue->curr_cmd_id + 1) &
274 admin_queue->sq.tail++;
275 admin_queue->stats.submitted_cmd++;
277 if (unlikely((admin_queue->sq.tail & queue_size_mask) == 0))
278 admin_queue->sq.phase = !admin_queue->sq.phase;
280 writel(admin_queue->sq.tail, admin_queue->sq.db_addr);
285 static int ena_com_init_comp_ctxt(struct ena_com_admin_queue *queue)
287 size_t size = queue->q_depth * sizeof(struct ena_comp_ctx);
288 struct ena_comp_ctx *comp_ctx;
291 queue->comp_ctx = devm_kzalloc(queue->q_dmadev, size, GFP_KERNEL);
292 if (unlikely(!queue->comp_ctx)) {
293 pr_err("memory allocation failed\n");
297 for (i = 0; i < queue->q_depth; i++) {
298 comp_ctx = get_comp_ctxt(queue, i, false);
300 init_completion(&comp_ctx->wait_event);
306 static struct ena_comp_ctx *ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
307 struct ena_admin_aq_entry *cmd,
308 size_t cmd_size_in_bytes,
309 struct ena_admin_acq_entry *comp,
310 size_t comp_size_in_bytes)
312 unsigned long flags = 0;
313 struct ena_comp_ctx *comp_ctx;
315 spin_lock_irqsave(&admin_queue->q_lock, flags);
316 if (unlikely(!admin_queue->running_state)) {
317 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
318 return ERR_PTR(-ENODEV);
320 comp_ctx = __ena_com_submit_admin_cmd(admin_queue, cmd,
324 if (IS_ERR(comp_ctx))
325 admin_queue->running_state = false;
326 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
331 static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
332 struct ena_com_create_io_ctx *ctx,
333 struct ena_com_io_sq *io_sq)
338 memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr));
340 io_sq->dma_addr_bits = (u8)ena_dev->dma_addr_bits;
341 io_sq->desc_entry_size =
342 (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
343 sizeof(struct ena_eth_io_tx_desc) :
344 sizeof(struct ena_eth_io_rx_desc);
346 size = io_sq->desc_entry_size * io_sq->q_depth;
348 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
349 dev_node = dev_to_node(ena_dev->dmadev);
350 set_dev_node(ena_dev->dmadev, ctx->numa_node);
351 io_sq->desc_addr.virt_addr =
352 dma_alloc_coherent(ena_dev->dmadev, size,
353 &io_sq->desc_addr.phys_addr,
355 set_dev_node(ena_dev->dmadev, dev_node);
356 if (!io_sq->desc_addr.virt_addr) {
357 io_sq->desc_addr.virt_addr =
358 dma_alloc_coherent(ena_dev->dmadev, size,
359 &io_sq->desc_addr.phys_addr,
363 if (!io_sq->desc_addr.virt_addr) {
364 pr_err("memory allocation failed\n");
369 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
370 /* Allocate bounce buffers */
371 io_sq->bounce_buf_ctrl.buffer_size =
372 ena_dev->llq_info.desc_list_entry_size;
373 io_sq->bounce_buf_ctrl.buffers_num =
374 ENA_COM_BOUNCE_BUFFER_CNTRL_CNT;
375 io_sq->bounce_buf_ctrl.next_to_use = 0;
377 size = io_sq->bounce_buf_ctrl.buffer_size *
378 io_sq->bounce_buf_ctrl.buffers_num;
380 dev_node = dev_to_node(ena_dev->dmadev);
381 set_dev_node(ena_dev->dmadev, ctx->numa_node);
382 io_sq->bounce_buf_ctrl.base_buffer =
383 devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
384 set_dev_node(ena_dev->dmadev, dev_node);
385 if (!io_sq->bounce_buf_ctrl.base_buffer)
386 io_sq->bounce_buf_ctrl.base_buffer =
387 devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
389 if (!io_sq->bounce_buf_ctrl.base_buffer) {
390 pr_err("bounce buffer memory allocation failed\n");
394 memcpy(&io_sq->llq_info, &ena_dev->llq_info,
395 sizeof(io_sq->llq_info));
397 /* Initiate the first bounce buffer */
398 io_sq->llq_buf_ctrl.curr_bounce_buf =
399 ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
400 memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
401 0x0, io_sq->llq_info.desc_list_entry_size);
402 io_sq->llq_buf_ctrl.descs_left_in_line =
403 io_sq->llq_info.descs_num_before_header;
405 if (io_sq->llq_info.max_entries_in_tx_burst > 0)
406 io_sq->entries_in_tx_burst_left =
407 io_sq->llq_info.max_entries_in_tx_burst;
411 io_sq->next_to_comp = 0;
417 static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
418 struct ena_com_create_io_ctx *ctx,
419 struct ena_com_io_cq *io_cq)
424 memset(&io_cq->cdesc_addr, 0x0, sizeof(io_cq->cdesc_addr));
426 /* Use the basic completion descriptor for Rx */
427 io_cq->cdesc_entry_size_in_bytes =
428 (io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
429 sizeof(struct ena_eth_io_tx_cdesc) :
430 sizeof(struct ena_eth_io_rx_cdesc_base);
432 size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
434 prev_node = dev_to_node(ena_dev->dmadev);
435 set_dev_node(ena_dev->dmadev, ctx->numa_node);
436 io_cq->cdesc_addr.virt_addr =
437 dma_alloc_coherent(ena_dev->dmadev, size,
438 &io_cq->cdesc_addr.phys_addr, GFP_KERNEL);
439 set_dev_node(ena_dev->dmadev, prev_node);
440 if (!io_cq->cdesc_addr.virt_addr) {
441 io_cq->cdesc_addr.virt_addr =
442 dma_alloc_coherent(ena_dev->dmadev, size,
443 &io_cq->cdesc_addr.phys_addr,
447 if (!io_cq->cdesc_addr.virt_addr) {
448 pr_err("memory allocation failed\n");
458 static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue *admin_queue,
459 struct ena_admin_acq_entry *cqe)
461 struct ena_comp_ctx *comp_ctx;
464 cmd_id = cqe->acq_common_descriptor.command &
465 ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;
467 comp_ctx = get_comp_ctxt(admin_queue, cmd_id, false);
468 if (unlikely(!comp_ctx)) {
469 pr_err("comp_ctx is NULL. Changing the admin queue running state\n");
470 admin_queue->running_state = false;
474 comp_ctx->status = ENA_CMD_COMPLETED;
475 comp_ctx->comp_status = cqe->acq_common_descriptor.status;
477 if (comp_ctx->user_cqe)
478 memcpy(comp_ctx->user_cqe, (void *)cqe, comp_ctx->comp_size);
480 if (!admin_queue->polling)
481 complete(&comp_ctx->wait_event);
484 static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_queue)
486 struct ena_admin_acq_entry *cqe = NULL;
491 head_masked = admin_queue->cq.head & (admin_queue->q_depth - 1);
492 phase = admin_queue->cq.phase;
494 cqe = &admin_queue->cq.entries[head_masked];
496 /* Go over all the completions */
497 while ((READ_ONCE(cqe->acq_common_descriptor.flags) &
498 ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
499 /* Do not read the rest of the completion entry before the
500 * phase bit was validated
503 ena_com_handle_single_admin_completion(admin_queue, cqe);
507 if (unlikely(head_masked == admin_queue->q_depth)) {
512 cqe = &admin_queue->cq.entries[head_masked];
515 admin_queue->cq.head += comp_num;
516 admin_queue->cq.phase = phase;
517 admin_queue->sq.head += comp_num;
518 admin_queue->stats.completed_cmd += comp_num;
521 static int ena_com_comp_status_to_errno(u8 comp_status)
523 if (unlikely(comp_status != 0))
524 pr_err("admin command failed[%u]\n", comp_status);
526 if (unlikely(comp_status > ENA_ADMIN_UNKNOWN_ERROR))
529 switch (comp_status) {
530 case ENA_ADMIN_SUCCESS:
532 case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE:
534 case ENA_ADMIN_UNSUPPORTED_OPCODE:
536 case ENA_ADMIN_BAD_OPCODE:
537 case ENA_ADMIN_MALFORMED_REQUEST:
538 case ENA_ADMIN_ILLEGAL_PARAMETER:
539 case ENA_ADMIN_UNKNOWN_ERROR:
546 static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx,
547 struct ena_com_admin_queue *admin_queue)
549 unsigned long flags = 0;
550 unsigned long timeout;
553 timeout = jiffies + usecs_to_jiffies(admin_queue->completion_timeout);
556 spin_lock_irqsave(&admin_queue->q_lock, flags);
557 ena_com_handle_admin_completion(admin_queue);
558 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
560 if (comp_ctx->status != ENA_CMD_SUBMITTED)
563 if (time_is_before_jiffies(timeout)) {
564 pr_err("Wait for completion (polling) timeout\n");
565 /* ENA didn't have any completion */
566 spin_lock_irqsave(&admin_queue->q_lock, flags);
567 admin_queue->stats.no_completion++;
568 admin_queue->running_state = false;
569 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
578 if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) {
579 pr_err("Command was aborted\n");
580 spin_lock_irqsave(&admin_queue->q_lock, flags);
581 admin_queue->stats.aborted_cmd++;
582 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
587 WARN(comp_ctx->status != ENA_CMD_COMPLETED, "Invalid comp status %d\n",
590 ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
592 comp_ctxt_release(admin_queue, comp_ctx);
597 * Set the LLQ configurations of the firmware
599 * The driver provides only the enabled feature values to the device,
600 * which in turn, checks if they are supported.
602 static int ena_com_set_llq(struct ena_com_dev *ena_dev)
604 struct ena_com_admin_queue *admin_queue;
605 struct ena_admin_set_feat_cmd cmd;
606 struct ena_admin_set_feat_resp resp;
607 struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
610 memset(&cmd, 0x0, sizeof(cmd));
611 admin_queue = &ena_dev->admin_queue;
613 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
614 cmd.feat_common.feature_id = ENA_ADMIN_LLQ;
616 cmd.u.llq.header_location_ctrl_enabled = llq_info->header_location_ctrl;
617 cmd.u.llq.entry_size_ctrl_enabled = llq_info->desc_list_entry_size_ctrl;
618 cmd.u.llq.desc_num_before_header_enabled = llq_info->descs_num_before_header;
619 cmd.u.llq.descriptors_stride_ctrl_enabled = llq_info->desc_stride_ctrl;
621 ret = ena_com_execute_admin_command(admin_queue,
622 (struct ena_admin_aq_entry *)&cmd,
624 (struct ena_admin_acq_entry *)&resp,
628 pr_err("Failed to set LLQ configurations: %d\n", ret);
633 static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
634 struct ena_admin_feature_llq_desc *llq_features,
635 struct ena_llq_configurations *llq_default_cfg)
637 struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
641 memset(llq_info, 0, sizeof(*llq_info));
643 supported_feat = llq_features->header_location_ctrl_supported;
645 if (likely(supported_feat & llq_default_cfg->llq_header_location)) {
646 llq_info->header_location_ctrl =
647 llq_default_cfg->llq_header_location;
649 pr_err("Invalid header location control, supported: 0x%x\n",
654 if (likely(llq_info->header_location_ctrl == ENA_ADMIN_INLINE_HEADER)) {
655 supported_feat = llq_features->descriptors_stride_ctrl_supported;
656 if (likely(supported_feat & llq_default_cfg->llq_stride_ctrl)) {
657 llq_info->desc_stride_ctrl = llq_default_cfg->llq_stride_ctrl;
659 if (supported_feat & ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY) {
660 llq_info->desc_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
661 } else if (supported_feat & ENA_ADMIN_SINGLE_DESC_PER_ENTRY) {
662 llq_info->desc_stride_ctrl = ENA_ADMIN_SINGLE_DESC_PER_ENTRY;
664 pr_err("Invalid desc_stride_ctrl, supported: 0x%x\n",
669 pr_err("Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
670 llq_default_cfg->llq_stride_ctrl, supported_feat,
671 llq_info->desc_stride_ctrl);
674 llq_info->desc_stride_ctrl = 0;
677 supported_feat = llq_features->entry_size_ctrl_supported;
678 if (likely(supported_feat & llq_default_cfg->llq_ring_entry_size)) {
679 llq_info->desc_list_entry_size_ctrl = llq_default_cfg->llq_ring_entry_size;
680 llq_info->desc_list_entry_size = llq_default_cfg->llq_ring_entry_size_value;
682 if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_128B) {
683 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
684 llq_info->desc_list_entry_size = 128;
685 } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_192B) {
686 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_192B;
687 llq_info->desc_list_entry_size = 192;
688 } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_256B) {
689 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_256B;
690 llq_info->desc_list_entry_size = 256;
692 pr_err("Invalid entry_size_ctrl, supported: 0x%x\n",
697 pr_err("Default llq ring entry size is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
698 llq_default_cfg->llq_ring_entry_size, supported_feat,
699 llq_info->desc_list_entry_size);
701 if (unlikely(llq_info->desc_list_entry_size & 0x7)) {
702 /* The desc list entry size should be whole multiply of 8
703 * This requirement comes from __iowrite64_copy()
705 pr_err("illegal entry size %d\n",
706 llq_info->desc_list_entry_size);
710 if (llq_info->desc_stride_ctrl == ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY)
711 llq_info->descs_per_entry = llq_info->desc_list_entry_size /
712 sizeof(struct ena_eth_io_tx_desc);
714 llq_info->descs_per_entry = 1;
716 supported_feat = llq_features->desc_num_before_header_supported;
717 if (likely(supported_feat & llq_default_cfg->llq_num_decs_before_header)) {
718 llq_info->descs_num_before_header = llq_default_cfg->llq_num_decs_before_header;
720 if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2) {
721 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
722 } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1) {
723 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1;
724 } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4) {
725 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4;
726 } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8) {
727 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8;
729 pr_err("Invalid descs_num_before_header, supported: 0x%x\n",
734 pr_err("Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
735 llq_default_cfg->llq_num_decs_before_header,
736 supported_feat, llq_info->descs_num_before_header);
739 llq_info->max_entries_in_tx_burst =
740 (u16)(llq_features->max_tx_burst_size / llq_default_cfg->llq_ring_entry_size_value);
742 rc = ena_com_set_llq(ena_dev);
744 pr_err("Cannot set LLQ configuration: %d\n", rc);
749 static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx,
750 struct ena_com_admin_queue *admin_queue)
752 unsigned long flags = 0;
755 wait_for_completion_timeout(&comp_ctx->wait_event,
757 admin_queue->completion_timeout));
759 /* In case the command wasn't completed find out the root cause.
760 * There might be 2 kinds of errors
761 * 1) No completion (timeout reached)
762 * 2) There is completion but the device didn't get any msi-x interrupt.
764 if (unlikely(comp_ctx->status == ENA_CMD_SUBMITTED)) {
765 spin_lock_irqsave(&admin_queue->q_lock, flags);
766 ena_com_handle_admin_completion(admin_queue);
767 admin_queue->stats.no_completion++;
768 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
770 if (comp_ctx->status == ENA_CMD_COMPLETED) {
771 pr_err("The ena device sent a completion but the driver didn't receive a MSI-X interrupt (cmd %d), autopolling mode is %s\n",
772 comp_ctx->cmd_opcode,
773 admin_queue->auto_polling ? "ON" : "OFF");
774 /* Check if fallback to polling is enabled */
775 if (admin_queue->auto_polling)
776 admin_queue->polling = true;
778 pr_err("The ena device doesn't send a completion for the admin cmd %d status %d\n",
779 comp_ctx->cmd_opcode, comp_ctx->status);
781 /* Check if shifted to polling mode.
782 * This will happen if there is a completion without an interrupt
783 * and autopolling mode is enabled. Continuing normal execution in such case
785 if (!admin_queue->polling) {
786 admin_queue->running_state = false;
792 ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
794 comp_ctxt_release(admin_queue, comp_ctx);
798 /* This method read the hardware device register through posting writes
799 * and waiting for response
800 * On timeout the function will return ENA_MMIO_READ_TIMEOUT
802 static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
804 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
805 volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp =
806 mmio_read->read_resp;
807 u32 mmio_read_reg, ret, i;
808 unsigned long flags = 0;
809 u32 timeout = mmio_read->reg_read_to;
814 timeout = ENA_REG_READ_TIMEOUT;
816 /* If readless is disabled, perform regular read */
817 if (!mmio_read->readless_supported)
818 return readl(ena_dev->reg_bar + offset);
820 spin_lock_irqsave(&mmio_read->lock, flags);
821 mmio_read->seq_num++;
823 read_resp->req_id = mmio_read->seq_num + 0xDEAD;
824 mmio_read_reg = (offset << ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT) &
825 ENA_REGS_MMIO_REG_READ_REG_OFF_MASK;
826 mmio_read_reg |= mmio_read->seq_num &
827 ENA_REGS_MMIO_REG_READ_REQ_ID_MASK;
829 writel(mmio_read_reg, ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
831 for (i = 0; i < timeout; i++) {
832 if (READ_ONCE(read_resp->req_id) == mmio_read->seq_num)
838 if (unlikely(i == timeout)) {
839 pr_err("reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n",
840 mmio_read->seq_num, offset, read_resp->req_id,
842 ret = ENA_MMIO_READ_TIMEOUT;
846 if (read_resp->reg_off != offset) {
847 pr_err("Read failure: wrong offset provided\n");
848 ret = ENA_MMIO_READ_TIMEOUT;
850 ret = read_resp->reg_val;
853 spin_unlock_irqrestore(&mmio_read->lock, flags);
858 /* There are two types to wait for completion.
859 * Polling mode - wait until the completion is available.
860 * Async mode - wait on wait queue until the completion is ready
861 * (or the timeout expired).
862 * It is expected that the IRQ called ena_com_handle_admin_completion
863 * to mark the completions.
865 static int ena_com_wait_and_process_admin_cq(struct ena_comp_ctx *comp_ctx,
866 struct ena_com_admin_queue *admin_queue)
868 if (admin_queue->polling)
869 return ena_com_wait_and_process_admin_cq_polling(comp_ctx,
872 return ena_com_wait_and_process_admin_cq_interrupts(comp_ctx,
876 static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev,
877 struct ena_com_io_sq *io_sq)
879 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
880 struct ena_admin_aq_destroy_sq_cmd destroy_cmd;
881 struct ena_admin_acq_destroy_sq_resp_desc destroy_resp;
885 memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
887 if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
888 direction = ENA_ADMIN_SQ_DIRECTION_TX;
890 direction = ENA_ADMIN_SQ_DIRECTION_RX;
892 destroy_cmd.sq.sq_identity |= (direction <<
893 ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT) &
894 ENA_ADMIN_SQ_SQ_DIRECTION_MASK;
896 destroy_cmd.sq.sq_idx = io_sq->idx;
897 destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_SQ;
899 ret = ena_com_execute_admin_command(admin_queue,
900 (struct ena_admin_aq_entry *)&destroy_cmd,
902 (struct ena_admin_acq_entry *)&destroy_resp,
903 sizeof(destroy_resp));
905 if (unlikely(ret && (ret != -ENODEV)))
906 pr_err("failed to destroy io sq error: %d\n", ret);
911 static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
912 struct ena_com_io_sq *io_sq,
913 struct ena_com_io_cq *io_cq)
917 if (io_cq->cdesc_addr.virt_addr) {
918 size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
920 dma_free_coherent(ena_dev->dmadev, size,
921 io_cq->cdesc_addr.virt_addr,
922 io_cq->cdesc_addr.phys_addr);
924 io_cq->cdesc_addr.virt_addr = NULL;
927 if (io_sq->desc_addr.virt_addr) {
928 size = io_sq->desc_entry_size * io_sq->q_depth;
930 dma_free_coherent(ena_dev->dmadev, size,
931 io_sq->desc_addr.virt_addr,
932 io_sq->desc_addr.phys_addr);
934 io_sq->desc_addr.virt_addr = NULL;
937 if (io_sq->bounce_buf_ctrl.base_buffer) {
938 devm_kfree(ena_dev->dmadev, io_sq->bounce_buf_ctrl.base_buffer);
939 io_sq->bounce_buf_ctrl.base_buffer = NULL;
943 static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
948 /* Convert timeout from resolution of 100ms to ENA_POLL_MS */
949 timeout = (timeout * 100) / ENA_POLL_MS;
951 for (i = 0; i < timeout; i++) {
952 val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
954 if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) {
955 pr_err("Reg read timeout occurred\n");
959 if ((val & ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK) ==
969 static bool ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev,
970 enum ena_admin_aq_feature_id feature_id)
972 u32 feature_mask = 1 << feature_id;
974 /* Device attributes is always supported */
975 if ((feature_id != ENA_ADMIN_DEVICE_ATTRIBUTES) &&
976 !(ena_dev->supported_features & feature_mask))
982 static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
983 struct ena_admin_get_feat_resp *get_resp,
984 enum ena_admin_aq_feature_id feature_id,
985 dma_addr_t control_buf_dma_addr,
986 u32 control_buff_size,
989 struct ena_com_admin_queue *admin_queue;
990 struct ena_admin_get_feat_cmd get_cmd;
993 if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) {
994 pr_debug("Feature %d isn't supported\n", feature_id);
998 memset(&get_cmd, 0x0, sizeof(get_cmd));
999 admin_queue = &ena_dev->admin_queue;
1001 get_cmd.aq_common_descriptor.opcode = ENA_ADMIN_GET_FEATURE;
1003 if (control_buff_size)
1004 get_cmd.aq_common_descriptor.flags =
1005 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
1007 get_cmd.aq_common_descriptor.flags = 0;
1009 ret = ena_com_mem_addr_set(ena_dev,
1010 &get_cmd.control_buffer.address,
1011 control_buf_dma_addr);
1012 if (unlikely(ret)) {
1013 pr_err("memory address set failed\n");
1017 get_cmd.control_buffer.length = control_buff_size;
1018 get_cmd.feat_common.feature_version = feature_ver;
1019 get_cmd.feat_common.feature_id = feature_id;
1021 ret = ena_com_execute_admin_command(admin_queue,
1022 (struct ena_admin_aq_entry *)
1025 (struct ena_admin_acq_entry *)
1030 pr_err("Failed to submit get_feature command %d error: %d\n",
1036 static int ena_com_get_feature(struct ena_com_dev *ena_dev,
1037 struct ena_admin_get_feat_resp *get_resp,
1038 enum ena_admin_aq_feature_id feature_id,
1041 return ena_com_get_feature_ex(ena_dev,
1049 int ena_com_get_current_hash_function(struct ena_com_dev *ena_dev)
1051 return ena_dev->rss.hash_func;
1054 static void ena_com_hash_key_fill_default_key(struct ena_com_dev *ena_dev)
1056 struct ena_admin_feature_rss_flow_hash_control *hash_key =
1057 (ena_dev->rss).hash_key;
1059 netdev_rss_key_fill(&hash_key->key, sizeof(hash_key->key));
1060 /* The key is stored in the device in u32 array
1061 * as well as the API requires the key to be passed in this
1062 * format. Thus the size of our array should be divided by 4
1064 hash_key->keys_num = sizeof(hash_key->key) / sizeof(u32);
1067 static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
1069 struct ena_rss *rss = &ena_dev->rss;
1070 struct ena_admin_get_feat_resp get_resp;
1073 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
1074 ENA_ADMIN_RSS_HASH_FUNCTION,
1075 ena_dev->rss.hash_key_dma_addr,
1076 sizeof(ena_dev->rss.hash_key), 0);
1082 dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
1083 &rss->hash_key_dma_addr, GFP_KERNEL);
1085 if (unlikely(!rss->hash_key))
1091 static void ena_com_hash_key_destroy(struct ena_com_dev *ena_dev)
1093 struct ena_rss *rss = &ena_dev->rss;
1096 dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
1097 rss->hash_key, rss->hash_key_dma_addr);
1098 rss->hash_key = NULL;
1101 static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev)
1103 struct ena_rss *rss = &ena_dev->rss;
1106 dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
1107 &rss->hash_ctrl_dma_addr, GFP_KERNEL);
1109 if (unlikely(!rss->hash_ctrl))
1115 static void ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev)
1117 struct ena_rss *rss = &ena_dev->rss;
1120 dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
1121 rss->hash_ctrl, rss->hash_ctrl_dma_addr);
1122 rss->hash_ctrl = NULL;
1125 static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
1128 struct ena_rss *rss = &ena_dev->rss;
1129 struct ena_admin_get_feat_resp get_resp;
1133 ret = ena_com_get_feature(ena_dev, &get_resp,
1134 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG, 0);
1138 if ((get_resp.u.ind_table.min_size > log_size) ||
1139 (get_resp.u.ind_table.max_size < log_size)) {
1140 pr_err("indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n",
1141 1 << log_size, 1 << get_resp.u.ind_table.min_size,
1142 1 << get_resp.u.ind_table.max_size);
1146 tbl_size = (1ULL << log_size) *
1147 sizeof(struct ena_admin_rss_ind_table_entry);
1150 dma_alloc_coherent(ena_dev->dmadev, tbl_size,
1151 &rss->rss_ind_tbl_dma_addr, GFP_KERNEL);
1152 if (unlikely(!rss->rss_ind_tbl))
1155 tbl_size = (1ULL << log_size) * sizeof(u16);
1156 rss->host_rss_ind_tbl =
1157 devm_kzalloc(ena_dev->dmadev, tbl_size, GFP_KERNEL);
1158 if (unlikely(!rss->host_rss_ind_tbl))
1161 rss->tbl_log_size = log_size;
1166 tbl_size = (1ULL << log_size) *
1167 sizeof(struct ena_admin_rss_ind_table_entry);
1169 dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl,
1170 rss->rss_ind_tbl_dma_addr);
1171 rss->rss_ind_tbl = NULL;
1173 rss->tbl_log_size = 0;
1177 static void ena_com_indirect_table_destroy(struct ena_com_dev *ena_dev)
1179 struct ena_rss *rss = &ena_dev->rss;
1180 size_t tbl_size = (1ULL << rss->tbl_log_size) *
1181 sizeof(struct ena_admin_rss_ind_table_entry);
1183 if (rss->rss_ind_tbl)
1184 dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl,
1185 rss->rss_ind_tbl_dma_addr);
1186 rss->rss_ind_tbl = NULL;
1188 if (rss->host_rss_ind_tbl)
1189 devm_kfree(ena_dev->dmadev, rss->host_rss_ind_tbl);
1190 rss->host_rss_ind_tbl = NULL;
1193 static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
1194 struct ena_com_io_sq *io_sq, u16 cq_idx)
1196 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1197 struct ena_admin_aq_create_sq_cmd create_cmd;
1198 struct ena_admin_acq_create_sq_resp_desc cmd_completion;
1202 memset(&create_cmd, 0x0, sizeof(create_cmd));
1204 create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_SQ;
1206 if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
1207 direction = ENA_ADMIN_SQ_DIRECTION_TX;
1209 direction = ENA_ADMIN_SQ_DIRECTION_RX;
1211 create_cmd.sq_identity |= (direction <<
1212 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT) &
1213 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK;
1215 create_cmd.sq_caps_2 |= io_sq->mem_queue_type &
1216 ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK;
1218 create_cmd.sq_caps_2 |= (ENA_ADMIN_COMPLETION_POLICY_DESC <<
1219 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT) &
1220 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK;
1222 create_cmd.sq_caps_3 |=
1223 ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK;
1225 create_cmd.cq_idx = cq_idx;
1226 create_cmd.sq_depth = io_sq->q_depth;
1228 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
1229 ret = ena_com_mem_addr_set(ena_dev,
1231 io_sq->desc_addr.phys_addr);
1232 if (unlikely(ret)) {
1233 pr_err("memory address set failed\n");
1238 ret = ena_com_execute_admin_command(admin_queue,
1239 (struct ena_admin_aq_entry *)&create_cmd,
1241 (struct ena_admin_acq_entry *)&cmd_completion,
1242 sizeof(cmd_completion));
1243 if (unlikely(ret)) {
1244 pr_err("Failed to create IO SQ. error: %d\n", ret);
1248 io_sq->idx = cmd_completion.sq_idx;
1250 io_sq->db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1251 (uintptr_t)cmd_completion.sq_doorbell_offset);
1253 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
1254 io_sq->header_addr = (u8 __iomem *)((uintptr_t)ena_dev->mem_bar
1255 + cmd_completion.llq_headers_offset);
1257 io_sq->desc_addr.pbuf_dev_addr =
1258 (u8 __iomem *)((uintptr_t)ena_dev->mem_bar +
1259 cmd_completion.llq_descriptors_offset);
1262 pr_debug("created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth);
1267 static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev)
1269 struct ena_rss *rss = &ena_dev->rss;
1270 struct ena_com_io_sq *io_sq;
1274 for (i = 0; i < 1 << rss->tbl_log_size; i++) {
1275 qid = rss->host_rss_ind_tbl[i];
1276 if (qid >= ENA_TOTAL_NUM_QUEUES)
1279 io_sq = &ena_dev->io_sq_queues[qid];
1281 if (io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX)
1284 rss->rss_ind_tbl[i].cq_idx = io_sq->idx;
1290 static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev,
1291 u16 intr_delay_resolution)
1293 /* Initial value of intr_delay_resolution might be 0 */
1294 u16 prev_intr_delay_resolution =
1295 ena_dev->intr_delay_resolution ?
1296 ena_dev->intr_delay_resolution :
1297 ENA_DEFAULT_INTR_DELAY_RESOLUTION;
1299 if (!intr_delay_resolution) {
1300 pr_err("Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n");
1301 intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION;
1305 ena_dev->intr_moder_rx_interval =
1306 ena_dev->intr_moder_rx_interval *
1307 prev_intr_delay_resolution /
1308 intr_delay_resolution;
1311 ena_dev->intr_moder_tx_interval =
1312 ena_dev->intr_moder_tx_interval *
1313 prev_intr_delay_resolution /
1314 intr_delay_resolution;
1316 ena_dev->intr_delay_resolution = intr_delay_resolution;
1319 /*****************************************************************************/
1320 /******************************* API ******************************/
1321 /*****************************************************************************/
1323 int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue,
1324 struct ena_admin_aq_entry *cmd,
1326 struct ena_admin_acq_entry *comp,
1329 struct ena_comp_ctx *comp_ctx;
1332 comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size,
1334 if (IS_ERR(comp_ctx)) {
1335 if (comp_ctx == ERR_PTR(-ENODEV))
1336 pr_debug("Failed to submit command [%ld]\n",
1339 pr_err("Failed to submit command [%ld]\n",
1342 return PTR_ERR(comp_ctx);
1345 ret = ena_com_wait_and_process_admin_cq(comp_ctx, admin_queue);
1346 if (unlikely(ret)) {
1347 if (admin_queue->running_state)
1348 pr_err("Failed to process command. ret = %d\n", ret);
1350 pr_debug("Failed to process command. ret = %d\n", ret);
1355 int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
1356 struct ena_com_io_cq *io_cq)
1358 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1359 struct ena_admin_aq_create_cq_cmd create_cmd;
1360 struct ena_admin_acq_create_cq_resp_desc cmd_completion;
1363 memset(&create_cmd, 0x0, sizeof(create_cmd));
1365 create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_CQ;
1367 create_cmd.cq_caps_2 |= (io_cq->cdesc_entry_size_in_bytes / 4) &
1368 ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK;
1369 create_cmd.cq_caps_1 |=
1370 ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK;
1372 create_cmd.msix_vector = io_cq->msix_vector;
1373 create_cmd.cq_depth = io_cq->q_depth;
1375 ret = ena_com_mem_addr_set(ena_dev,
1377 io_cq->cdesc_addr.phys_addr);
1378 if (unlikely(ret)) {
1379 pr_err("memory address set failed\n");
1383 ret = ena_com_execute_admin_command(admin_queue,
1384 (struct ena_admin_aq_entry *)&create_cmd,
1386 (struct ena_admin_acq_entry *)&cmd_completion,
1387 sizeof(cmd_completion));
1388 if (unlikely(ret)) {
1389 pr_err("Failed to create IO CQ. error: %d\n", ret);
1393 io_cq->idx = cmd_completion.cq_idx;
1395 io_cq->unmask_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1396 cmd_completion.cq_interrupt_unmask_register_offset);
1398 if (cmd_completion.cq_head_db_register_offset)
1399 io_cq->cq_head_db_reg =
1400 (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1401 cmd_completion.cq_head_db_register_offset);
1403 if (cmd_completion.numa_node_register_offset)
1404 io_cq->numa_node_cfg_reg =
1405 (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1406 cmd_completion.numa_node_register_offset);
1408 pr_debug("created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth);
1413 int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid,
1414 struct ena_com_io_sq **io_sq,
1415 struct ena_com_io_cq **io_cq)
1417 if (qid >= ENA_TOTAL_NUM_QUEUES) {
1418 pr_err("Invalid queue number %d but the max is %d\n", qid,
1419 ENA_TOTAL_NUM_QUEUES);
1423 *io_sq = &ena_dev->io_sq_queues[qid];
1424 *io_cq = &ena_dev->io_cq_queues[qid];
1429 void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev)
1431 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1432 struct ena_comp_ctx *comp_ctx;
1435 if (!admin_queue->comp_ctx)
1438 for (i = 0; i < admin_queue->q_depth; i++) {
1439 comp_ctx = get_comp_ctxt(admin_queue, i, false);
1440 if (unlikely(!comp_ctx))
1443 comp_ctx->status = ENA_CMD_ABORTED;
1445 complete(&comp_ctx->wait_event);
1449 void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev)
1451 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1452 unsigned long flags = 0;
1454 spin_lock_irqsave(&admin_queue->q_lock, flags);
1455 while (atomic_read(&admin_queue->outstanding_cmds) != 0) {
1456 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
1457 msleep(ENA_POLL_MS);
1458 spin_lock_irqsave(&admin_queue->q_lock, flags);
1460 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
1463 int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
1464 struct ena_com_io_cq *io_cq)
1466 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1467 struct ena_admin_aq_destroy_cq_cmd destroy_cmd;
1468 struct ena_admin_acq_destroy_cq_resp_desc destroy_resp;
1471 memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
1473 destroy_cmd.cq_idx = io_cq->idx;
1474 destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_CQ;
1476 ret = ena_com_execute_admin_command(admin_queue,
1477 (struct ena_admin_aq_entry *)&destroy_cmd,
1478 sizeof(destroy_cmd),
1479 (struct ena_admin_acq_entry *)&destroy_resp,
1480 sizeof(destroy_resp));
1482 if (unlikely(ret && (ret != -ENODEV)))
1483 pr_err("Failed to destroy IO CQ. error: %d\n", ret);
1488 bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev)
1490 return ena_dev->admin_queue.running_state;
1493 void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state)
1495 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1496 unsigned long flags = 0;
1498 spin_lock_irqsave(&admin_queue->q_lock, flags);
1499 ena_dev->admin_queue.running_state = state;
1500 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
1503 void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev)
1505 u16 depth = ena_dev->aenq.q_depth;
1507 WARN(ena_dev->aenq.head != depth, "Invalid AENQ state\n");
1509 /* Init head_db to mark that all entries in the queue
1510 * are initially available
1512 writel(depth, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
1515 int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
1517 struct ena_com_admin_queue *admin_queue;
1518 struct ena_admin_set_feat_cmd cmd;
1519 struct ena_admin_set_feat_resp resp;
1520 struct ena_admin_get_feat_resp get_resp;
1523 ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG, 0);
1525 pr_info("Can't get aenq configuration\n");
1529 if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) {
1530 pr_warn("Trying to set unsupported aenq events. supported flag: 0x%x asked flag: 0x%x\n",
1531 get_resp.u.aenq.supported_groups, groups_flag);
1535 memset(&cmd, 0x0, sizeof(cmd));
1536 admin_queue = &ena_dev->admin_queue;
1538 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
1539 cmd.aq_common_descriptor.flags = 0;
1540 cmd.feat_common.feature_id = ENA_ADMIN_AENQ_CONFIG;
1541 cmd.u.aenq.enabled_groups = groups_flag;
1543 ret = ena_com_execute_admin_command(admin_queue,
1544 (struct ena_admin_aq_entry *)&cmd,
1546 (struct ena_admin_acq_entry *)&resp,
1550 pr_err("Failed to config AENQ ret: %d\n", ret);
1555 int ena_com_get_dma_width(struct ena_com_dev *ena_dev)
1557 u32 caps = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
1560 if (unlikely(caps == ENA_MMIO_READ_TIMEOUT)) {
1561 pr_err("Reg read timeout occurred\n");
1565 width = (caps & ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK) >>
1566 ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT;
1568 pr_debug("ENA dma width: %d\n", width);
1570 if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) {
1571 pr_err("DMA width illegal value: %d\n", width);
1575 ena_dev->dma_addr_bits = width;
1580 int ena_com_validate_version(struct ena_com_dev *ena_dev)
1584 u32 ctrl_ver_masked;
1586 /* Make sure the ENA version and the controller version are at least
1587 * as the driver expects
1589 ver = ena_com_reg_bar_read32(ena_dev, ENA_REGS_VERSION_OFF);
1590 ctrl_ver = ena_com_reg_bar_read32(ena_dev,
1591 ENA_REGS_CONTROLLER_VERSION_OFF);
1593 if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) ||
1594 (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) {
1595 pr_err("Reg read timeout occurred\n");
1599 pr_info("ena device version: %d.%d\n",
1600 (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >>
1601 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
1602 ver & ENA_REGS_VERSION_MINOR_VERSION_MASK);
1604 pr_info("ena controller version: %d.%d.%d implementation version %d\n",
1605 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) >>
1606 ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
1607 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) >>
1608 ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT,
1609 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK),
1610 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK) >>
1611 ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT);
1614 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) |
1615 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) |
1616 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK);
1618 /* Validate the ctrl version without the implementation ID */
1619 if (ctrl_ver_masked < MIN_ENA_CTRL_VER) {
1620 pr_err("ENA ctrl version is lower than the minimal ctrl version the driver supports\n");
1627 void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
1629 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1630 struct ena_com_admin_cq *cq = &admin_queue->cq;
1631 struct ena_com_admin_sq *sq = &admin_queue->sq;
1632 struct ena_com_aenq *aenq = &ena_dev->aenq;
1635 if (admin_queue->comp_ctx)
1636 devm_kfree(ena_dev->dmadev, admin_queue->comp_ctx);
1637 admin_queue->comp_ctx = NULL;
1638 size = ADMIN_SQ_SIZE(admin_queue->q_depth);
1640 dma_free_coherent(ena_dev->dmadev, size, sq->entries,
1644 size = ADMIN_CQ_SIZE(admin_queue->q_depth);
1646 dma_free_coherent(ena_dev->dmadev, size, cq->entries,
1650 size = ADMIN_AENQ_SIZE(aenq->q_depth);
1651 if (ena_dev->aenq.entries)
1652 dma_free_coherent(ena_dev->dmadev, size, aenq->entries,
1654 aenq->entries = NULL;
1657 void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
1662 mask_value = ENA_REGS_ADMIN_INTR_MASK;
1664 writel(mask_value, ena_dev->reg_bar + ENA_REGS_INTR_MASK_OFF);
1665 ena_dev->admin_queue.polling = polling;
1668 void ena_com_set_admin_auto_polling_mode(struct ena_com_dev *ena_dev,
1671 ena_dev->admin_queue.auto_polling = polling;
1674 int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
1676 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1678 spin_lock_init(&mmio_read->lock);
1679 mmio_read->read_resp =
1680 dma_alloc_coherent(ena_dev->dmadev,
1681 sizeof(*mmio_read->read_resp),
1682 &mmio_read->read_resp_dma_addr, GFP_KERNEL);
1683 if (unlikely(!mmio_read->read_resp))
1686 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
1688 mmio_read->read_resp->req_id = 0x0;
1689 mmio_read->seq_num = 0x0;
1690 mmio_read->readless_supported = true;
1699 void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported)
1701 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1703 mmio_read->readless_supported = readless_supported;
1706 void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev)
1708 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1710 writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
1711 writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
1713 dma_free_coherent(ena_dev->dmadev, sizeof(*mmio_read->read_resp),
1714 mmio_read->read_resp, mmio_read->read_resp_dma_addr);
1716 mmio_read->read_resp = NULL;
1719 void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev)
1721 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1722 u32 addr_low, addr_high;
1724 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(mmio_read->read_resp_dma_addr);
1725 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(mmio_read->read_resp_dma_addr);
1727 writel(addr_low, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
1728 writel(addr_high, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
1731 int ena_com_admin_init(struct ena_com_dev *ena_dev,
1732 struct ena_aenq_handlers *aenq_handlers)
1734 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1735 u32 aq_caps, acq_caps, dev_sts, addr_low, addr_high;
1738 dev_sts = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
1740 if (unlikely(dev_sts == ENA_MMIO_READ_TIMEOUT)) {
1741 pr_err("Reg read timeout occurred\n");
1745 if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) {
1746 pr_err("Device isn't ready, abort com init\n");
1750 admin_queue->q_depth = ENA_ADMIN_QUEUE_DEPTH;
1752 admin_queue->q_dmadev = ena_dev->dmadev;
1753 admin_queue->polling = false;
1754 admin_queue->curr_cmd_id = 0;
1756 atomic_set(&admin_queue->outstanding_cmds, 0);
1758 spin_lock_init(&admin_queue->q_lock);
1760 ret = ena_com_init_comp_ctxt(admin_queue);
1764 ret = ena_com_admin_init_sq(admin_queue);
1768 ret = ena_com_admin_init_cq(admin_queue);
1772 admin_queue->sq.db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1773 ENA_REGS_AQ_DB_OFF);
1775 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->sq.dma_addr);
1776 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->sq.dma_addr);
1778 writel(addr_low, ena_dev->reg_bar + ENA_REGS_AQ_BASE_LO_OFF);
1779 writel(addr_high, ena_dev->reg_bar + ENA_REGS_AQ_BASE_HI_OFF);
1781 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->cq.dma_addr);
1782 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->cq.dma_addr);
1784 writel(addr_low, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_LO_OFF);
1785 writel(addr_high, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_HI_OFF);
1788 aq_caps |= admin_queue->q_depth & ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK;
1789 aq_caps |= (sizeof(struct ena_admin_aq_entry) <<
1790 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT) &
1791 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK;
1794 acq_caps |= admin_queue->q_depth & ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK;
1795 acq_caps |= (sizeof(struct ena_admin_acq_entry) <<
1796 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT) &
1797 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK;
1799 writel(aq_caps, ena_dev->reg_bar + ENA_REGS_AQ_CAPS_OFF);
1800 writel(acq_caps, ena_dev->reg_bar + ENA_REGS_ACQ_CAPS_OFF);
1801 ret = ena_com_admin_init_aenq(ena_dev, aenq_handlers);
1805 admin_queue->running_state = true;
1809 ena_com_admin_destroy(ena_dev);
1814 int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
1815 struct ena_com_create_io_ctx *ctx)
1817 struct ena_com_io_sq *io_sq;
1818 struct ena_com_io_cq *io_cq;
1821 if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) {
1822 pr_err("Qid (%d) is bigger than max num of queues (%d)\n",
1823 ctx->qid, ENA_TOTAL_NUM_QUEUES);
1827 io_sq = &ena_dev->io_sq_queues[ctx->qid];
1828 io_cq = &ena_dev->io_cq_queues[ctx->qid];
1830 memset(io_sq, 0x0, sizeof(*io_sq));
1831 memset(io_cq, 0x0, sizeof(*io_cq));
1834 io_cq->q_depth = ctx->queue_size;
1835 io_cq->direction = ctx->direction;
1836 io_cq->qid = ctx->qid;
1838 io_cq->msix_vector = ctx->msix_vector;
1840 io_sq->q_depth = ctx->queue_size;
1841 io_sq->direction = ctx->direction;
1842 io_sq->qid = ctx->qid;
1844 io_sq->mem_queue_type = ctx->mem_queue_type;
1846 if (ctx->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
1847 /* header length is limited to 8 bits */
1848 io_sq->tx_max_header_size =
1849 min_t(u32, ena_dev->tx_max_header_size, SZ_256);
1851 ret = ena_com_init_io_sq(ena_dev, ctx, io_sq);
1854 ret = ena_com_init_io_cq(ena_dev, ctx, io_cq);
1858 ret = ena_com_create_io_cq(ena_dev, io_cq);
1862 ret = ena_com_create_io_sq(ena_dev, io_sq, io_cq->idx);
1869 ena_com_destroy_io_cq(ena_dev, io_cq);
1871 ena_com_io_queue_free(ena_dev, io_sq, io_cq);
1875 void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid)
1877 struct ena_com_io_sq *io_sq;
1878 struct ena_com_io_cq *io_cq;
1880 if (qid >= ENA_TOTAL_NUM_QUEUES) {
1881 pr_err("Qid (%d) is bigger than max num of queues (%d)\n", qid,
1882 ENA_TOTAL_NUM_QUEUES);
1886 io_sq = &ena_dev->io_sq_queues[qid];
1887 io_cq = &ena_dev->io_cq_queues[qid];
1889 ena_com_destroy_io_sq(ena_dev, io_sq);
1890 ena_com_destroy_io_cq(ena_dev, io_cq);
1892 ena_com_io_queue_free(ena_dev, io_sq, io_cq);
1895 int ena_com_get_link_params(struct ena_com_dev *ena_dev,
1896 struct ena_admin_get_feat_resp *resp)
1898 return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG, 0);
1901 int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
1902 struct ena_com_dev_get_features_ctx *get_feat_ctx)
1904 struct ena_admin_get_feat_resp get_resp;
1907 rc = ena_com_get_feature(ena_dev, &get_resp,
1908 ENA_ADMIN_DEVICE_ATTRIBUTES, 0);
1912 memcpy(&get_feat_ctx->dev_attr, &get_resp.u.dev_attr,
1913 sizeof(get_resp.u.dev_attr));
1914 ena_dev->supported_features = get_resp.u.dev_attr.supported_features;
1916 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
1917 rc = ena_com_get_feature(ena_dev, &get_resp,
1918 ENA_ADMIN_MAX_QUEUES_EXT,
1919 ENA_FEATURE_MAX_QUEUE_EXT_VER);
1923 if (get_resp.u.max_queue_ext.version != ENA_FEATURE_MAX_QUEUE_EXT_VER)
1926 memcpy(&get_feat_ctx->max_queue_ext, &get_resp.u.max_queue_ext,
1927 sizeof(get_resp.u.max_queue_ext));
1928 ena_dev->tx_max_header_size =
1929 get_resp.u.max_queue_ext.max_queue_ext.max_tx_header_size;
1931 rc = ena_com_get_feature(ena_dev, &get_resp,
1932 ENA_ADMIN_MAX_QUEUES_NUM, 0);
1933 memcpy(&get_feat_ctx->max_queues, &get_resp.u.max_queue,
1934 sizeof(get_resp.u.max_queue));
1935 ena_dev->tx_max_header_size =
1936 get_resp.u.max_queue.max_header_size;
1942 rc = ena_com_get_feature(ena_dev, &get_resp,
1943 ENA_ADMIN_AENQ_CONFIG, 0);
1947 memcpy(&get_feat_ctx->aenq, &get_resp.u.aenq,
1948 sizeof(get_resp.u.aenq));
1950 rc = ena_com_get_feature(ena_dev, &get_resp,
1951 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
1955 memcpy(&get_feat_ctx->offload, &get_resp.u.offload,
1956 sizeof(get_resp.u.offload));
1958 /* Driver hints isn't mandatory admin command. So in case the
1959 * command isn't supported set driver hints to 0
1961 rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_HW_HINTS, 0);
1964 memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints,
1965 sizeof(get_resp.u.hw_hints));
1966 else if (rc == -EOPNOTSUPP)
1967 memset(&get_feat_ctx->hw_hints, 0x0,
1968 sizeof(get_feat_ctx->hw_hints));
1972 rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_LLQ, 0);
1974 memcpy(&get_feat_ctx->llq, &get_resp.u.llq,
1975 sizeof(get_resp.u.llq));
1976 else if (rc == -EOPNOTSUPP)
1977 memset(&get_feat_ctx->llq, 0x0, sizeof(get_feat_ctx->llq));
1984 void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev)
1986 ena_com_handle_admin_completion(&ena_dev->admin_queue);
1989 /* ena_handle_specific_aenq_event:
1990 * return the handler that is relevant to the specific event group
1992 static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *dev,
1995 struct ena_aenq_handlers *aenq_handlers = dev->aenq.aenq_handlers;
1997 if ((group < ENA_MAX_HANDLERS) && aenq_handlers->handlers[group])
1998 return aenq_handlers->handlers[group];
2000 return aenq_handlers->unimplemented_handler;
2003 /* ena_aenq_intr_handler:
2004 * handles the aenq incoming events.
2005 * pop events from the queue and apply the specific handler
2007 void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
2009 struct ena_admin_aenq_entry *aenq_e;
2010 struct ena_admin_aenq_common_desc *aenq_common;
2011 struct ena_com_aenq *aenq = &dev->aenq;
2012 unsigned long long timestamp;
2013 ena_aenq_handler handler_cb;
2014 u16 masked_head, processed = 0;
2017 masked_head = aenq->head & (aenq->q_depth - 1);
2018 phase = aenq->phase;
2019 aenq_e = &aenq->entries[masked_head]; /* Get first entry */
2020 aenq_common = &aenq_e->aenq_common_desc;
2022 /* Go over all the events */
2023 while ((READ_ONCE(aenq_common->flags) &
2024 ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) {
2025 /* Make sure the phase bit (ownership) is as expected before
2026 * reading the rest of the descriptor.
2031 (unsigned long long)aenq_common->timestamp_low |
2032 ((unsigned long long)aenq_common->timestamp_high << 32);
2033 pr_debug("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n",
2034 aenq_common->group, aenq_common->syndrom, timestamp);
2036 /* Handle specific event*/
2037 handler_cb = ena_com_get_specific_aenq_cb(dev,
2038 aenq_common->group);
2039 handler_cb(data, aenq_e); /* call the actual event handler*/
2041 /* Get next event entry */
2045 if (unlikely(masked_head == aenq->q_depth)) {
2049 aenq_e = &aenq->entries[masked_head];
2050 aenq_common = &aenq_e->aenq_common_desc;
2053 aenq->head += processed;
2054 aenq->phase = phase;
2056 /* Don't update aenq doorbell if there weren't any processed events */
2060 /* write the aenq doorbell after all AENQ descriptors were read */
2062 writel_relaxed((u32)aenq->head,
2063 dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
2066 int ena_com_dev_reset(struct ena_com_dev *ena_dev,
2067 enum ena_regs_reset_reason_types reset_reason)
2069 u32 stat, timeout, cap, reset_val;
2072 stat = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
2073 cap = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
2075 if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) ||
2076 (cap == ENA_MMIO_READ_TIMEOUT))) {
2077 pr_err("Reg read32 timeout occurred\n");
2081 if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) {
2082 pr_err("Device isn't ready, can't reset device\n");
2086 timeout = (cap & ENA_REGS_CAPS_RESET_TIMEOUT_MASK) >>
2087 ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT;
2089 pr_err("Invalid timeout value\n");
2094 reset_val = ENA_REGS_DEV_CTL_DEV_RESET_MASK;
2095 reset_val |= (reset_reason << ENA_REGS_DEV_CTL_RESET_REASON_SHIFT) &
2096 ENA_REGS_DEV_CTL_RESET_REASON_MASK;
2097 writel(reset_val, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
2099 /* Write again the MMIO read request address */
2100 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
2102 rc = wait_for_reset_state(ena_dev, timeout,
2103 ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK);
2105 pr_err("Reset indication didn't turn on\n");
2110 writel(0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
2111 rc = wait_for_reset_state(ena_dev, timeout, 0);
2113 pr_err("Reset indication didn't turn off\n");
2117 timeout = (cap & ENA_REGS_CAPS_ADMIN_CMD_TO_MASK) >>
2118 ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT;
2120 /* the resolution of timeout reg is 100ms */
2121 ena_dev->admin_queue.completion_timeout = timeout * 100000;
2123 ena_dev->admin_queue.completion_timeout = ADMIN_CMD_TIMEOUT_US;
2128 static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
2129 struct ena_com_stats_ctx *ctx,
2130 enum ena_admin_get_stats_type type)
2132 struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx->get_cmd;
2133 struct ena_admin_acq_get_stats_resp *get_resp = &ctx->get_resp;
2134 struct ena_com_admin_queue *admin_queue;
2137 admin_queue = &ena_dev->admin_queue;
2139 get_cmd->aq_common_descriptor.opcode = ENA_ADMIN_GET_STATS;
2140 get_cmd->aq_common_descriptor.flags = 0;
2141 get_cmd->type = type;
2143 ret = ena_com_execute_admin_command(admin_queue,
2144 (struct ena_admin_aq_entry *)get_cmd,
2146 (struct ena_admin_acq_entry *)get_resp,
2150 pr_err("Failed to get stats. error: %d\n", ret);
2155 int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
2156 struct ena_admin_basic_stats *stats)
2158 struct ena_com_stats_ctx ctx;
2161 memset(&ctx, 0x0, sizeof(ctx));
2162 ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_BASIC);
2163 if (likely(ret == 0))
2164 memcpy(stats, &ctx.get_resp.basic_stats,
2165 sizeof(ctx.get_resp.basic_stats));
2170 int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu)
2172 struct ena_com_admin_queue *admin_queue;
2173 struct ena_admin_set_feat_cmd cmd;
2174 struct ena_admin_set_feat_resp resp;
2177 if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) {
2178 pr_debug("Feature %d isn't supported\n", ENA_ADMIN_MTU);
2182 memset(&cmd, 0x0, sizeof(cmd));
2183 admin_queue = &ena_dev->admin_queue;
2185 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2186 cmd.aq_common_descriptor.flags = 0;
2187 cmd.feat_common.feature_id = ENA_ADMIN_MTU;
2188 cmd.u.mtu.mtu = mtu;
2190 ret = ena_com_execute_admin_command(admin_queue,
2191 (struct ena_admin_aq_entry *)&cmd,
2193 (struct ena_admin_acq_entry *)&resp,
2197 pr_err("Failed to set mtu %d. error: %d\n", mtu, ret);
2202 int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
2203 struct ena_admin_feature_offload_desc *offload)
2206 struct ena_admin_get_feat_resp resp;
2208 ret = ena_com_get_feature(ena_dev, &resp,
2209 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
2210 if (unlikely(ret)) {
2211 pr_err("Failed to get offload capabilities %d\n", ret);
2215 memcpy(offload, &resp.u.offload, sizeof(resp.u.offload));
2220 int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
2222 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2223 struct ena_rss *rss = &ena_dev->rss;
2224 struct ena_admin_set_feat_cmd cmd;
2225 struct ena_admin_set_feat_resp resp;
2226 struct ena_admin_get_feat_resp get_resp;
2229 if (!ena_com_check_supported_feature_id(ena_dev,
2230 ENA_ADMIN_RSS_HASH_FUNCTION)) {
2231 pr_debug("Feature %d isn't supported\n",
2232 ENA_ADMIN_RSS_HASH_FUNCTION);
2236 /* Validate hash function is supported */
2237 ret = ena_com_get_feature(ena_dev, &get_resp,
2238 ENA_ADMIN_RSS_HASH_FUNCTION, 0);
2242 if (!(get_resp.u.flow_hash_func.supported_func & BIT(rss->hash_func))) {
2243 pr_err("Func hash %d isn't supported by device, abort\n",
2248 memset(&cmd, 0x0, sizeof(cmd));
2250 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2251 cmd.aq_common_descriptor.flags =
2252 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2253 cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_FUNCTION;
2254 cmd.u.flow_hash_func.init_val = rss->hash_init_val;
2255 cmd.u.flow_hash_func.selected_func = 1 << rss->hash_func;
2257 ret = ena_com_mem_addr_set(ena_dev,
2258 &cmd.control_buffer.address,
2259 rss->hash_key_dma_addr);
2260 if (unlikely(ret)) {
2261 pr_err("memory address set failed\n");
2265 cmd.control_buffer.length = sizeof(*rss->hash_key);
2267 ret = ena_com_execute_admin_command(admin_queue,
2268 (struct ena_admin_aq_entry *)&cmd,
2270 (struct ena_admin_acq_entry *)&resp,
2272 if (unlikely(ret)) {
2273 pr_err("Failed to set hash function %d. error: %d\n",
2274 rss->hash_func, ret);
2281 int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
2282 enum ena_admin_hash_functions func,
2283 const u8 *key, u16 key_len, u32 init_val)
2285 struct ena_rss *rss = &ena_dev->rss;
2286 struct ena_admin_get_feat_resp get_resp;
2287 struct ena_admin_feature_rss_flow_hash_control *hash_key =
2291 /* Make sure size is a mult of DWs */
2292 if (unlikely(key_len & 0x3))
2295 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2296 ENA_ADMIN_RSS_HASH_FUNCTION,
2297 rss->hash_key_dma_addr,
2298 sizeof(*rss->hash_key), 0);
2302 if (!((1 << func) & get_resp.u.flow_hash_func.supported_func)) {
2303 pr_err("Flow hash function %d isn't supported\n", func);
2308 case ENA_ADMIN_TOEPLITZ:
2310 if (key_len != sizeof(hash_key->key)) {
2311 pr_err("key len (%hu) doesn't equal the supported size (%zu)\n",
2312 key_len, sizeof(hash_key->key));
2315 memcpy(hash_key->key, key, key_len);
2316 rss->hash_init_val = init_val;
2317 hash_key->keys_num = key_len >> 2;
2320 case ENA_ADMIN_CRC32:
2321 rss->hash_init_val = init_val;
2324 pr_err("Invalid hash function (%d)\n", func);
2328 rss->hash_func = func;
2329 rc = ena_com_set_hash_function(ena_dev);
2331 /* Restore the old function */
2333 ena_com_get_hash_function(ena_dev, NULL, NULL);
2338 int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
2339 enum ena_admin_hash_functions *func,
2342 struct ena_rss *rss = &ena_dev->rss;
2343 struct ena_admin_get_feat_resp get_resp;
2344 struct ena_admin_feature_rss_flow_hash_control *hash_key =
2348 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2349 ENA_ADMIN_RSS_HASH_FUNCTION,
2350 rss->hash_key_dma_addr,
2351 sizeof(*rss->hash_key), 0);
2355 /* ffs() returns 1 in case the lsb is set */
2356 rss->hash_func = ffs(get_resp.u.flow_hash_func.selected_func);
2361 *func = rss->hash_func;
2364 memcpy(key, hash_key->key, (size_t)(hash_key->keys_num) << 2);
2369 int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev,
2370 enum ena_admin_flow_hash_proto proto,
2373 struct ena_rss *rss = &ena_dev->rss;
2374 struct ena_admin_get_feat_resp get_resp;
2377 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2378 ENA_ADMIN_RSS_HASH_INPUT,
2379 rss->hash_ctrl_dma_addr,
2380 sizeof(*rss->hash_ctrl), 0);
2385 *fields = rss->hash_ctrl->selected_fields[proto].fields;
2390 int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
2392 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2393 struct ena_rss *rss = &ena_dev->rss;
2394 struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
2395 struct ena_admin_set_feat_cmd cmd;
2396 struct ena_admin_set_feat_resp resp;
2399 if (!ena_com_check_supported_feature_id(ena_dev,
2400 ENA_ADMIN_RSS_HASH_INPUT)) {
2401 pr_debug("Feature %d isn't supported\n",
2402 ENA_ADMIN_RSS_HASH_INPUT);
2406 memset(&cmd, 0x0, sizeof(cmd));
2408 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2409 cmd.aq_common_descriptor.flags =
2410 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2411 cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_INPUT;
2412 cmd.u.flow_hash_input.enabled_input_sort =
2413 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK |
2414 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK;
2416 ret = ena_com_mem_addr_set(ena_dev,
2417 &cmd.control_buffer.address,
2418 rss->hash_ctrl_dma_addr);
2419 if (unlikely(ret)) {
2420 pr_err("memory address set failed\n");
2423 cmd.control_buffer.length = sizeof(*hash_ctrl);
2425 ret = ena_com_execute_admin_command(admin_queue,
2426 (struct ena_admin_aq_entry *)&cmd,
2428 (struct ena_admin_acq_entry *)&resp,
2431 pr_err("Failed to set hash input. error: %d\n", ret);
2436 int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
2438 struct ena_rss *rss = &ena_dev->rss;
2439 struct ena_admin_feature_rss_hash_control *hash_ctrl =
2441 u16 available_fields = 0;
2444 /* Get the supported hash input */
2445 rc = ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2449 hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP4].fields =
2450 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2451 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2453 hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP4].fields =
2454 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2455 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2457 hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP6].fields =
2458 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2459 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2461 hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP6].fields =
2462 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2463 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2465 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4].fields =
2466 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2468 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP6].fields =
2469 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2471 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields =
2472 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2474 hash_ctrl->selected_fields[ENA_ADMIN_RSS_NOT_IP].fields =
2475 ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA;
2477 for (i = 0; i < ENA_ADMIN_RSS_PROTO_NUM; i++) {
2478 available_fields = hash_ctrl->selected_fields[i].fields &
2479 hash_ctrl->supported_fields[i].fields;
2480 if (available_fields != hash_ctrl->selected_fields[i].fields) {
2481 pr_err("hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
2482 i, hash_ctrl->supported_fields[i].fields,
2483 hash_ctrl->selected_fields[i].fields);
2488 rc = ena_com_set_hash_ctrl(ena_dev);
2490 /* In case of failure, restore the old hash ctrl */
2492 ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2497 int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
2498 enum ena_admin_flow_hash_proto proto,
2501 struct ena_rss *rss = &ena_dev->rss;
2502 struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
2503 u16 supported_fields;
2506 if (proto >= ENA_ADMIN_RSS_PROTO_NUM) {
2507 pr_err("Invalid proto num (%u)\n", proto);
2511 /* Get the ctrl table */
2512 rc = ena_com_get_hash_ctrl(ena_dev, proto, NULL);
2516 /* Make sure all the fields are supported */
2517 supported_fields = hash_ctrl->supported_fields[proto].fields;
2518 if ((hash_fields & supported_fields) != hash_fields) {
2519 pr_err("proto %d doesn't support the required fields %x. supports only: %x\n",
2520 proto, hash_fields, supported_fields);
2523 hash_ctrl->selected_fields[proto].fields = hash_fields;
2525 rc = ena_com_set_hash_ctrl(ena_dev);
2527 /* In case of failure, restore the old hash ctrl */
2529 ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2534 int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev,
2535 u16 entry_idx, u16 entry_value)
2537 struct ena_rss *rss = &ena_dev->rss;
2539 if (unlikely(entry_idx >= (1 << rss->tbl_log_size)))
2542 if (unlikely((entry_value > ENA_TOTAL_NUM_QUEUES)))
2545 rss->host_rss_ind_tbl[entry_idx] = entry_value;
2550 int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
2552 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2553 struct ena_rss *rss = &ena_dev->rss;
2554 struct ena_admin_set_feat_cmd cmd;
2555 struct ena_admin_set_feat_resp resp;
2558 if (!ena_com_check_supported_feature_id(
2559 ena_dev, ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG)) {
2560 pr_debug("Feature %d isn't supported\n",
2561 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
2565 ret = ena_com_ind_tbl_convert_to_device(ena_dev);
2567 pr_err("Failed to convert host indirection table to device table\n");
2571 memset(&cmd, 0x0, sizeof(cmd));
2573 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2574 cmd.aq_common_descriptor.flags =
2575 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2576 cmd.feat_common.feature_id = ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG;
2577 cmd.u.ind_table.size = rss->tbl_log_size;
2578 cmd.u.ind_table.inline_index = 0xFFFFFFFF;
2580 ret = ena_com_mem_addr_set(ena_dev,
2581 &cmd.control_buffer.address,
2582 rss->rss_ind_tbl_dma_addr);
2583 if (unlikely(ret)) {
2584 pr_err("memory address set failed\n");
2588 cmd.control_buffer.length = (1ULL << rss->tbl_log_size) *
2589 sizeof(struct ena_admin_rss_ind_table_entry);
2591 ret = ena_com_execute_admin_command(admin_queue,
2592 (struct ena_admin_aq_entry *)&cmd,
2594 (struct ena_admin_acq_entry *)&resp,
2598 pr_err("Failed to set indirect table. error: %d\n", ret);
2603 int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl)
2605 struct ena_rss *rss = &ena_dev->rss;
2606 struct ena_admin_get_feat_resp get_resp;
2610 tbl_size = (1ULL << rss->tbl_log_size) *
2611 sizeof(struct ena_admin_rss_ind_table_entry);
2613 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2614 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG,
2615 rss->rss_ind_tbl_dma_addr,
2623 for (i = 0; i < (1 << rss->tbl_log_size); i++)
2624 ind_tbl[i] = rss->host_rss_ind_tbl[i];
2629 int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size)
2633 memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
2635 rc = ena_com_indirect_table_allocate(ena_dev, indr_tbl_log_size);
2639 /* The following function might return unsupported in case the
2640 * device doesn't support setting the key / hash function. We can safely
2641 * ignore this error and have indirection table support only.
2643 rc = ena_com_hash_key_allocate(ena_dev);
2644 if (unlikely(rc) && rc != -EOPNOTSUPP)
2646 else if (rc != -EOPNOTSUPP)
2647 ena_com_hash_key_fill_default_key(ena_dev);
2649 rc = ena_com_hash_ctrl_init(ena_dev);
2656 ena_com_hash_key_destroy(ena_dev);
2658 ena_com_indirect_table_destroy(ena_dev);
2664 void ena_com_rss_destroy(struct ena_com_dev *ena_dev)
2666 ena_com_indirect_table_destroy(ena_dev);
2667 ena_com_hash_key_destroy(ena_dev);
2668 ena_com_hash_ctrl_destroy(ena_dev);
2670 memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
2673 int ena_com_allocate_host_info(struct ena_com_dev *ena_dev)
2675 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2677 host_attr->host_info =
2678 dma_alloc_coherent(ena_dev->dmadev, SZ_4K,
2679 &host_attr->host_info_dma_addr, GFP_KERNEL);
2680 if (unlikely(!host_attr->host_info))
2683 host_attr->host_info->ena_spec_version = ((ENA_COMMON_SPEC_VERSION_MAJOR <<
2684 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) |
2685 (ENA_COMMON_SPEC_VERSION_MINOR));
2690 int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
2691 u32 debug_area_size)
2693 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2695 host_attr->debug_area_virt_addr =
2696 dma_alloc_coherent(ena_dev->dmadev, debug_area_size,
2697 &host_attr->debug_area_dma_addr,
2699 if (unlikely(!host_attr->debug_area_virt_addr)) {
2700 host_attr->debug_area_size = 0;
2704 host_attr->debug_area_size = debug_area_size;
2709 void ena_com_delete_host_info(struct ena_com_dev *ena_dev)
2711 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2713 if (host_attr->host_info) {
2714 dma_free_coherent(ena_dev->dmadev, SZ_4K, host_attr->host_info,
2715 host_attr->host_info_dma_addr);
2716 host_attr->host_info = NULL;
2720 void ena_com_delete_debug_area(struct ena_com_dev *ena_dev)
2722 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2724 if (host_attr->debug_area_virt_addr) {
2725 dma_free_coherent(ena_dev->dmadev, host_attr->debug_area_size,
2726 host_attr->debug_area_virt_addr,
2727 host_attr->debug_area_dma_addr);
2728 host_attr->debug_area_virt_addr = NULL;
2732 int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
2734 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2735 struct ena_com_admin_queue *admin_queue;
2736 struct ena_admin_set_feat_cmd cmd;
2737 struct ena_admin_set_feat_resp resp;
2741 /* Host attribute config is called before ena_com_get_dev_attr_feat
2742 * so ena_com can't check if the feature is supported.
2745 memset(&cmd, 0x0, sizeof(cmd));
2746 admin_queue = &ena_dev->admin_queue;
2748 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2749 cmd.feat_common.feature_id = ENA_ADMIN_HOST_ATTR_CONFIG;
2751 ret = ena_com_mem_addr_set(ena_dev,
2752 &cmd.u.host_attr.debug_ba,
2753 host_attr->debug_area_dma_addr);
2754 if (unlikely(ret)) {
2755 pr_err("memory address set failed\n");
2759 ret = ena_com_mem_addr_set(ena_dev,
2760 &cmd.u.host_attr.os_info_ba,
2761 host_attr->host_info_dma_addr);
2762 if (unlikely(ret)) {
2763 pr_err("memory address set failed\n");
2767 cmd.u.host_attr.debug_area_size = host_attr->debug_area_size;
2769 ret = ena_com_execute_admin_command(admin_queue,
2770 (struct ena_admin_aq_entry *)&cmd,
2772 (struct ena_admin_acq_entry *)&resp,
2776 pr_err("Failed to set host attributes: %d\n", ret);
2781 /* Interrupt moderation */
2782 bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev)
2784 return ena_com_check_supported_feature_id(ena_dev,
2785 ENA_ADMIN_INTERRUPT_MODERATION);
2788 static int ena_com_update_nonadaptive_moderation_interval(u32 coalesce_usecs,
2789 u32 intr_delay_resolution,
2790 u32 *intr_moder_interval)
2792 if (!intr_delay_resolution) {
2793 pr_err("Illegal interrupt delay granularity value\n");
2797 *intr_moder_interval = coalesce_usecs / intr_delay_resolution;
2802 int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
2803 u32 tx_coalesce_usecs)
2805 return ena_com_update_nonadaptive_moderation_interval(tx_coalesce_usecs,
2806 ena_dev->intr_delay_resolution,
2807 &ena_dev->intr_moder_tx_interval);
2810 int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
2811 u32 rx_coalesce_usecs)
2813 return ena_com_update_nonadaptive_moderation_interval(rx_coalesce_usecs,
2814 ena_dev->intr_delay_resolution,
2815 &ena_dev->intr_moder_rx_interval);
2818 int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
2820 struct ena_admin_get_feat_resp get_resp;
2821 u16 delay_resolution;
2824 rc = ena_com_get_feature(ena_dev, &get_resp,
2825 ENA_ADMIN_INTERRUPT_MODERATION, 0);
2828 if (rc == -EOPNOTSUPP) {
2829 pr_debug("Feature %d isn't supported\n",
2830 ENA_ADMIN_INTERRUPT_MODERATION);
2833 pr_err("Failed to get interrupt moderation admin cmd. rc: %d\n",
2837 /* no moderation supported, disable adaptive support */
2838 ena_com_disable_adaptive_moderation(ena_dev);
2842 /* if moderation is supported by device we set adaptive moderation */
2843 delay_resolution = get_resp.u.intr_moderation.intr_delay_resolution;
2844 ena_com_update_intr_delay_resolution(ena_dev, delay_resolution);
2846 /* Disable adaptive moderation by default - can be enabled later */
2847 ena_com_disable_adaptive_moderation(ena_dev);
2852 unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev)
2854 return ena_dev->intr_moder_tx_interval;
2857 unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev)
2859 return ena_dev->intr_moder_rx_interval;
2862 int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
2863 struct ena_admin_feature_llq_desc *llq_features,
2864 struct ena_llq_configurations *llq_default_cfg)
2866 struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
2869 if (!llq_features->max_llq_num) {
2870 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
2874 rc = ena_com_config_llq_info(ena_dev, llq_features, llq_default_cfg);
2878 ena_dev->tx_max_header_size = llq_info->desc_list_entry_size -
2879 (llq_info->descs_num_before_header * sizeof(struct ena_eth_io_tx_desc));
2881 if (unlikely(ena_dev->tx_max_header_size == 0)) {
2882 pr_err("the size of the LLQ entry is smaller than needed\n");
2886 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV;