1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
3 * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
8 /*****************************************************************************/
9 /*****************************************************************************/
11 /* Timeout in micro-sec */
12 #define ADMIN_CMD_TIMEOUT_US (3000000)
14 #define ENA_ASYNC_QUEUE_DEPTH 16
15 #define ENA_ADMIN_QUEUE_DEPTH 32
18 #define ENA_CTRL_MAJOR 0
19 #define ENA_CTRL_MINOR 0
20 #define ENA_CTRL_SUB_MINOR 1
22 #define MIN_ENA_CTRL_VER \
23 (((ENA_CTRL_MAJOR) << \
24 (ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT)) | \
25 ((ENA_CTRL_MINOR) << \
26 (ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT)) | \
29 #define ENA_DMA_ADDR_TO_UINT32_LOW(x) ((u32)((u64)(x)))
30 #define ENA_DMA_ADDR_TO_UINT32_HIGH(x) ((u32)(((u64)(x)) >> 32))
32 #define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF
34 #define ENA_COM_BOUNCE_BUFFER_CNTRL_CNT 4
36 #define ENA_REGS_ADMIN_INTR_MASK 1
38 #define ENA_MAX_BACKOFF_DELAY_EXP 16U
40 #define ENA_MIN_ADMIN_POLL_US 100
42 #define ENA_MAX_ADMIN_POLL_US 5000
44 /*****************************************************************************/
45 /*****************************************************************************/
46 /*****************************************************************************/
51 /* Abort - canceled by the driver */
56 struct completion wait_event;
57 struct ena_admin_acq_entry *user_cqe;
59 enum ena_cmd_status status;
60 /* status from the device */
66 struct ena_com_stats_ctx {
67 struct ena_admin_aq_get_stats_cmd get_cmd;
68 struct ena_admin_acq_get_stats_resp get_resp;
71 static int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
72 struct ena_common_mem_addr *ena_addr,
75 if ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr) {
76 netdev_err(ena_dev->net_device,
77 "DMA address has more bits that the device supports\n");
81 ena_addr->mem_addr_low = lower_32_bits(addr);
82 ena_addr->mem_addr_high = (u16)upper_32_bits(addr);
87 static int ena_com_admin_init_sq(struct ena_com_admin_queue *admin_queue)
89 struct ena_com_dev *ena_dev = admin_queue->ena_dev;
90 struct ena_com_admin_sq *sq = &admin_queue->sq;
91 u16 size = ADMIN_SQ_SIZE(admin_queue->q_depth);
93 sq->entries = dma_alloc_coherent(admin_queue->q_dmadev, size,
94 &sq->dma_addr, GFP_KERNEL);
97 netdev_err(ena_dev->net_device, "Memory allocation failed\n");
110 static int ena_com_admin_init_cq(struct ena_com_admin_queue *admin_queue)
112 struct ena_com_dev *ena_dev = admin_queue->ena_dev;
113 struct ena_com_admin_cq *cq = &admin_queue->cq;
114 u16 size = ADMIN_CQ_SIZE(admin_queue->q_depth);
116 cq->entries = dma_alloc_coherent(admin_queue->q_dmadev, size,
117 &cq->dma_addr, GFP_KERNEL);
120 netdev_err(ena_dev->net_device, "Memory allocation failed\n");
130 static int ena_com_admin_init_aenq(struct ena_com_dev *ena_dev,
131 struct ena_aenq_handlers *aenq_handlers)
133 struct ena_com_aenq *aenq = &ena_dev->aenq;
134 u32 addr_low, addr_high, aenq_caps;
137 ena_dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
138 size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH);
139 aenq->entries = dma_alloc_coherent(ena_dev->dmadev, size,
140 &aenq->dma_addr, GFP_KERNEL);
142 if (!aenq->entries) {
143 netdev_err(ena_dev->net_device, "Memory allocation failed\n");
147 aenq->head = aenq->q_depth;
150 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(aenq->dma_addr);
151 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(aenq->dma_addr);
153 writel(addr_low, ena_dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF);
154 writel(addr_high, ena_dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF);
157 aenq_caps |= ena_dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
158 aenq_caps |= (sizeof(struct ena_admin_aenq_entry)
159 << ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
160 ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
161 writel(aenq_caps, ena_dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF);
163 if (unlikely(!aenq_handlers)) {
164 netdev_err(ena_dev->net_device,
165 "AENQ handlers pointer is NULL\n");
169 aenq->aenq_handlers = aenq_handlers;
174 static void comp_ctxt_release(struct ena_com_admin_queue *queue,
175 struct ena_comp_ctx *comp_ctx)
177 comp_ctx->occupied = false;
178 atomic_dec(&queue->outstanding_cmds);
181 static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *admin_queue,
182 u16 command_id, bool capture)
184 if (unlikely(command_id >= admin_queue->q_depth)) {
185 netdev_err(admin_queue->ena_dev->net_device,
186 "Command id is larger than the queue size. cmd_id: %u queue size %d\n",
187 command_id, admin_queue->q_depth);
191 if (unlikely(!admin_queue->comp_ctx)) {
192 netdev_err(admin_queue->ena_dev->net_device,
193 "Completion context is NULL\n");
197 if (unlikely(admin_queue->comp_ctx[command_id].occupied && capture)) {
198 netdev_err(admin_queue->ena_dev->net_device,
199 "Completion context is occupied\n");
204 atomic_inc(&admin_queue->outstanding_cmds);
205 admin_queue->comp_ctx[command_id].occupied = true;
208 return &admin_queue->comp_ctx[command_id];
211 static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
212 struct ena_admin_aq_entry *cmd,
213 size_t cmd_size_in_bytes,
214 struct ena_admin_acq_entry *comp,
215 size_t comp_size_in_bytes)
217 struct ena_comp_ctx *comp_ctx;
218 u16 tail_masked, cmd_id;
222 queue_size_mask = admin_queue->q_depth - 1;
224 tail_masked = admin_queue->sq.tail & queue_size_mask;
226 /* In case of queue FULL */
227 cnt = (u16)atomic_read(&admin_queue->outstanding_cmds);
228 if (cnt >= admin_queue->q_depth) {
229 netdev_dbg(admin_queue->ena_dev->net_device,
230 "Admin queue is full.\n");
231 admin_queue->stats.out_of_space++;
232 return ERR_PTR(-ENOSPC);
235 cmd_id = admin_queue->curr_cmd_id;
237 cmd->aq_common_descriptor.flags |= admin_queue->sq.phase &
238 ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK;
240 cmd->aq_common_descriptor.command_id |= cmd_id &
241 ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
243 comp_ctx = get_comp_ctxt(admin_queue, cmd_id, true);
244 if (unlikely(!comp_ctx))
245 return ERR_PTR(-EINVAL);
247 comp_ctx->status = ENA_CMD_SUBMITTED;
248 comp_ctx->comp_size = (u32)comp_size_in_bytes;
249 comp_ctx->user_cqe = comp;
250 comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode;
252 reinit_completion(&comp_ctx->wait_event);
254 memcpy(&admin_queue->sq.entries[tail_masked], cmd, cmd_size_in_bytes);
256 admin_queue->curr_cmd_id = (admin_queue->curr_cmd_id + 1) &
259 admin_queue->sq.tail++;
260 admin_queue->stats.submitted_cmd++;
262 if (unlikely((admin_queue->sq.tail & queue_size_mask) == 0))
263 admin_queue->sq.phase = !admin_queue->sq.phase;
265 writel(admin_queue->sq.tail, admin_queue->sq.db_addr);
270 static int ena_com_init_comp_ctxt(struct ena_com_admin_queue *admin_queue)
272 struct ena_com_dev *ena_dev = admin_queue->ena_dev;
273 size_t size = admin_queue->q_depth * sizeof(struct ena_comp_ctx);
274 struct ena_comp_ctx *comp_ctx;
277 admin_queue->comp_ctx =
278 devm_kzalloc(admin_queue->q_dmadev, size, GFP_KERNEL);
279 if (unlikely(!admin_queue->comp_ctx)) {
280 netdev_err(ena_dev->net_device, "Memory allocation failed\n");
284 for (i = 0; i < admin_queue->q_depth; i++) {
285 comp_ctx = get_comp_ctxt(admin_queue, i, false);
287 init_completion(&comp_ctx->wait_event);
293 static struct ena_comp_ctx *ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
294 struct ena_admin_aq_entry *cmd,
295 size_t cmd_size_in_bytes,
296 struct ena_admin_acq_entry *comp,
297 size_t comp_size_in_bytes)
299 unsigned long flags = 0;
300 struct ena_comp_ctx *comp_ctx;
302 spin_lock_irqsave(&admin_queue->q_lock, flags);
303 if (unlikely(!admin_queue->running_state)) {
304 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
305 return ERR_PTR(-ENODEV);
307 comp_ctx = __ena_com_submit_admin_cmd(admin_queue, cmd,
311 if (IS_ERR(comp_ctx))
312 admin_queue->running_state = false;
313 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
318 static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
319 struct ena_com_create_io_ctx *ctx,
320 struct ena_com_io_sq *io_sq)
325 memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr));
327 io_sq->dma_addr_bits = (u8)ena_dev->dma_addr_bits;
328 io_sq->desc_entry_size =
329 (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
330 sizeof(struct ena_eth_io_tx_desc) :
331 sizeof(struct ena_eth_io_rx_desc);
333 size = io_sq->desc_entry_size * io_sq->q_depth;
335 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
336 dev_node = dev_to_node(ena_dev->dmadev);
337 set_dev_node(ena_dev->dmadev, ctx->numa_node);
338 io_sq->desc_addr.virt_addr =
339 dma_alloc_coherent(ena_dev->dmadev, size,
340 &io_sq->desc_addr.phys_addr,
342 set_dev_node(ena_dev->dmadev, dev_node);
343 if (!io_sq->desc_addr.virt_addr) {
344 io_sq->desc_addr.virt_addr =
345 dma_alloc_coherent(ena_dev->dmadev, size,
346 &io_sq->desc_addr.phys_addr,
350 if (!io_sq->desc_addr.virt_addr) {
351 netdev_err(ena_dev->net_device,
352 "Memory allocation failed\n");
357 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
358 /* Allocate bounce buffers */
359 io_sq->bounce_buf_ctrl.buffer_size =
360 ena_dev->llq_info.desc_list_entry_size;
361 io_sq->bounce_buf_ctrl.buffers_num =
362 ENA_COM_BOUNCE_BUFFER_CNTRL_CNT;
363 io_sq->bounce_buf_ctrl.next_to_use = 0;
365 size = io_sq->bounce_buf_ctrl.buffer_size *
366 io_sq->bounce_buf_ctrl.buffers_num;
368 dev_node = dev_to_node(ena_dev->dmadev);
369 set_dev_node(ena_dev->dmadev, ctx->numa_node);
370 io_sq->bounce_buf_ctrl.base_buffer =
371 devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
372 set_dev_node(ena_dev->dmadev, dev_node);
373 if (!io_sq->bounce_buf_ctrl.base_buffer)
374 io_sq->bounce_buf_ctrl.base_buffer =
375 devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
377 if (!io_sq->bounce_buf_ctrl.base_buffer) {
378 netdev_err(ena_dev->net_device,
379 "Bounce buffer memory allocation failed\n");
383 memcpy(&io_sq->llq_info, &ena_dev->llq_info,
384 sizeof(io_sq->llq_info));
386 /* Initiate the first bounce buffer */
387 io_sq->llq_buf_ctrl.curr_bounce_buf =
388 ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
389 memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
390 0x0, io_sq->llq_info.desc_list_entry_size);
391 io_sq->llq_buf_ctrl.descs_left_in_line =
392 io_sq->llq_info.descs_num_before_header;
393 io_sq->disable_meta_caching =
394 io_sq->llq_info.disable_meta_caching;
396 if (io_sq->llq_info.max_entries_in_tx_burst > 0)
397 io_sq->entries_in_tx_burst_left =
398 io_sq->llq_info.max_entries_in_tx_burst;
402 io_sq->next_to_comp = 0;
408 static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
409 struct ena_com_create_io_ctx *ctx,
410 struct ena_com_io_cq *io_cq)
415 memset(&io_cq->cdesc_addr, 0x0, sizeof(io_cq->cdesc_addr));
417 /* Use the basic completion descriptor for Rx */
418 io_cq->cdesc_entry_size_in_bytes =
419 (io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
420 sizeof(struct ena_eth_io_tx_cdesc) :
421 sizeof(struct ena_eth_io_rx_cdesc_base);
423 size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
425 prev_node = dev_to_node(ena_dev->dmadev);
426 set_dev_node(ena_dev->dmadev, ctx->numa_node);
427 io_cq->cdesc_addr.virt_addr =
428 dma_alloc_coherent(ena_dev->dmadev, size,
429 &io_cq->cdesc_addr.phys_addr, GFP_KERNEL);
430 set_dev_node(ena_dev->dmadev, prev_node);
431 if (!io_cq->cdesc_addr.virt_addr) {
432 io_cq->cdesc_addr.virt_addr =
433 dma_alloc_coherent(ena_dev->dmadev, size,
434 &io_cq->cdesc_addr.phys_addr,
438 if (!io_cq->cdesc_addr.virt_addr) {
439 netdev_err(ena_dev->net_device, "Memory allocation failed\n");
449 static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue *admin_queue,
450 struct ena_admin_acq_entry *cqe)
452 struct ena_comp_ctx *comp_ctx;
455 cmd_id = cqe->acq_common_descriptor.command &
456 ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;
458 comp_ctx = get_comp_ctxt(admin_queue, cmd_id, false);
459 if (unlikely(!comp_ctx)) {
460 netdev_err(admin_queue->ena_dev->net_device,
461 "comp_ctx is NULL. Changing the admin queue running state\n");
462 admin_queue->running_state = false;
466 comp_ctx->status = ENA_CMD_COMPLETED;
467 comp_ctx->comp_status = cqe->acq_common_descriptor.status;
469 if (comp_ctx->user_cqe)
470 memcpy(comp_ctx->user_cqe, (void *)cqe, comp_ctx->comp_size);
472 if (!admin_queue->polling)
473 complete(&comp_ctx->wait_event);
476 static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_queue)
478 struct ena_admin_acq_entry *cqe = NULL;
483 head_masked = admin_queue->cq.head & (admin_queue->q_depth - 1);
484 phase = admin_queue->cq.phase;
486 cqe = &admin_queue->cq.entries[head_masked];
488 /* Go over all the completions */
489 while ((READ_ONCE(cqe->acq_common_descriptor.flags) &
490 ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
491 /* Do not read the rest of the completion entry before the
492 * phase bit was validated
495 ena_com_handle_single_admin_completion(admin_queue, cqe);
499 if (unlikely(head_masked == admin_queue->q_depth)) {
504 cqe = &admin_queue->cq.entries[head_masked];
507 admin_queue->cq.head += comp_num;
508 admin_queue->cq.phase = phase;
509 admin_queue->sq.head += comp_num;
510 admin_queue->stats.completed_cmd += comp_num;
513 static int ena_com_comp_status_to_errno(struct ena_com_admin_queue *admin_queue,
516 if (unlikely(comp_status != 0))
517 netdev_err(admin_queue->ena_dev->net_device,
518 "Admin command failed[%u]\n", comp_status);
520 switch (comp_status) {
521 case ENA_ADMIN_SUCCESS:
523 case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE:
525 case ENA_ADMIN_UNSUPPORTED_OPCODE:
527 case ENA_ADMIN_BAD_OPCODE:
528 case ENA_ADMIN_MALFORMED_REQUEST:
529 case ENA_ADMIN_ILLEGAL_PARAMETER:
530 case ENA_ADMIN_UNKNOWN_ERROR:
532 case ENA_ADMIN_RESOURCE_BUSY:
539 static void ena_delay_exponential_backoff_us(u32 exp, u32 delay_us)
541 exp = min_t(u32, exp, ENA_MAX_BACKOFF_DELAY_EXP);
542 delay_us = max_t(u32, ENA_MIN_ADMIN_POLL_US, delay_us);
543 delay_us = min_t(u32, delay_us * (1U << exp), ENA_MAX_ADMIN_POLL_US);
544 usleep_range(delay_us, 2 * delay_us);
547 static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx,
548 struct ena_com_admin_queue *admin_queue)
550 unsigned long flags = 0;
551 unsigned long timeout;
555 timeout = jiffies + usecs_to_jiffies(admin_queue->completion_timeout);
558 spin_lock_irqsave(&admin_queue->q_lock, flags);
559 ena_com_handle_admin_completion(admin_queue);
560 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
562 if (comp_ctx->status != ENA_CMD_SUBMITTED)
565 if (time_is_before_jiffies(timeout)) {
566 netdev_err(admin_queue->ena_dev->net_device,
567 "Wait for completion (polling) timeout\n");
568 /* ENA didn't have any completion */
569 spin_lock_irqsave(&admin_queue->q_lock, flags);
570 admin_queue->stats.no_completion++;
571 admin_queue->running_state = false;
572 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
578 ena_delay_exponential_backoff_us(exp++,
579 admin_queue->ena_dev->ena_min_poll_delay_us);
582 if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) {
583 netdev_err(admin_queue->ena_dev->net_device,
584 "Command was aborted\n");
585 spin_lock_irqsave(&admin_queue->q_lock, flags);
586 admin_queue->stats.aborted_cmd++;
587 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
592 WARN(comp_ctx->status != ENA_CMD_COMPLETED, "Invalid comp status %d\n",
595 ret = ena_com_comp_status_to_errno(admin_queue, comp_ctx->comp_status);
597 comp_ctxt_release(admin_queue, comp_ctx);
602 * Set the LLQ configurations of the firmware
604 * The driver provides only the enabled feature values to the device,
605 * which in turn, checks if they are supported.
607 static int ena_com_set_llq(struct ena_com_dev *ena_dev)
609 struct ena_com_admin_queue *admin_queue;
610 struct ena_admin_set_feat_cmd cmd;
611 struct ena_admin_set_feat_resp resp;
612 struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
615 memset(&cmd, 0x0, sizeof(cmd));
616 admin_queue = &ena_dev->admin_queue;
618 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
619 cmd.feat_common.feature_id = ENA_ADMIN_LLQ;
621 cmd.u.llq.header_location_ctrl_enabled = llq_info->header_location_ctrl;
622 cmd.u.llq.entry_size_ctrl_enabled = llq_info->desc_list_entry_size_ctrl;
623 cmd.u.llq.desc_num_before_header_enabled = llq_info->descs_num_before_header;
624 cmd.u.llq.descriptors_stride_ctrl_enabled = llq_info->desc_stride_ctrl;
626 cmd.u.llq.accel_mode.u.set.enabled_flags =
627 BIT(ENA_ADMIN_DISABLE_META_CACHING) |
628 BIT(ENA_ADMIN_LIMIT_TX_BURST);
630 ret = ena_com_execute_admin_command(admin_queue,
631 (struct ena_admin_aq_entry *)&cmd,
633 (struct ena_admin_acq_entry *)&resp,
637 netdev_err(ena_dev->net_device,
638 "Failed to set LLQ configurations: %d\n", ret);
643 static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
644 struct ena_admin_feature_llq_desc *llq_features,
645 struct ena_llq_configurations *llq_default_cfg)
647 struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
648 struct ena_admin_accel_mode_get llq_accel_mode_get;
652 memset(llq_info, 0, sizeof(*llq_info));
654 supported_feat = llq_features->header_location_ctrl_supported;
656 if (likely(supported_feat & llq_default_cfg->llq_header_location)) {
657 llq_info->header_location_ctrl =
658 llq_default_cfg->llq_header_location;
660 netdev_err(ena_dev->net_device,
661 "Invalid header location control, supported: 0x%x\n",
666 if (likely(llq_info->header_location_ctrl == ENA_ADMIN_INLINE_HEADER)) {
667 supported_feat = llq_features->descriptors_stride_ctrl_supported;
668 if (likely(supported_feat & llq_default_cfg->llq_stride_ctrl)) {
669 llq_info->desc_stride_ctrl = llq_default_cfg->llq_stride_ctrl;
671 if (supported_feat & ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY) {
672 llq_info->desc_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
673 } else if (supported_feat & ENA_ADMIN_SINGLE_DESC_PER_ENTRY) {
674 llq_info->desc_stride_ctrl = ENA_ADMIN_SINGLE_DESC_PER_ENTRY;
676 netdev_err(ena_dev->net_device,
677 "Invalid desc_stride_ctrl, supported: 0x%x\n",
682 netdev_err(ena_dev->net_device,
683 "Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
684 llq_default_cfg->llq_stride_ctrl,
685 supported_feat, llq_info->desc_stride_ctrl);
688 llq_info->desc_stride_ctrl = 0;
691 supported_feat = llq_features->entry_size_ctrl_supported;
692 if (likely(supported_feat & llq_default_cfg->llq_ring_entry_size)) {
693 llq_info->desc_list_entry_size_ctrl = llq_default_cfg->llq_ring_entry_size;
694 llq_info->desc_list_entry_size = llq_default_cfg->llq_ring_entry_size_value;
696 if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_128B) {
697 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
698 llq_info->desc_list_entry_size = 128;
699 } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_192B) {
700 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_192B;
701 llq_info->desc_list_entry_size = 192;
702 } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_256B) {
703 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_256B;
704 llq_info->desc_list_entry_size = 256;
706 netdev_err(ena_dev->net_device,
707 "Invalid entry_size_ctrl, supported: 0x%x\n",
712 netdev_err(ena_dev->net_device,
713 "Default llq ring entry size is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
714 llq_default_cfg->llq_ring_entry_size, supported_feat,
715 llq_info->desc_list_entry_size);
717 if (unlikely(llq_info->desc_list_entry_size & 0x7)) {
718 /* The desc list entry size should be whole multiply of 8
719 * This requirement comes from __iowrite64_copy()
721 netdev_err(ena_dev->net_device, "Illegal entry size %d\n",
722 llq_info->desc_list_entry_size);
726 if (llq_info->desc_stride_ctrl == ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY)
727 llq_info->descs_per_entry = llq_info->desc_list_entry_size /
728 sizeof(struct ena_eth_io_tx_desc);
730 llq_info->descs_per_entry = 1;
732 supported_feat = llq_features->desc_num_before_header_supported;
733 if (likely(supported_feat & llq_default_cfg->llq_num_decs_before_header)) {
734 llq_info->descs_num_before_header = llq_default_cfg->llq_num_decs_before_header;
736 if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2) {
737 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
738 } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1) {
739 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1;
740 } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4) {
741 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4;
742 } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8) {
743 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8;
745 netdev_err(ena_dev->net_device,
746 "Invalid descs_num_before_header, supported: 0x%x\n",
751 netdev_err(ena_dev->net_device,
752 "Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
753 llq_default_cfg->llq_num_decs_before_header,
754 supported_feat, llq_info->descs_num_before_header);
756 /* Check for accelerated queue supported */
757 llq_accel_mode_get = llq_features->accel_mode.u.get;
759 llq_info->disable_meta_caching =
760 !!(llq_accel_mode_get.supported_flags &
761 BIT(ENA_ADMIN_DISABLE_META_CACHING));
763 if (llq_accel_mode_get.supported_flags & BIT(ENA_ADMIN_LIMIT_TX_BURST))
764 llq_info->max_entries_in_tx_burst =
765 llq_accel_mode_get.max_tx_burst_size /
766 llq_default_cfg->llq_ring_entry_size_value;
768 rc = ena_com_set_llq(ena_dev);
770 netdev_err(ena_dev->net_device,
771 "Cannot set LLQ configuration: %d\n", rc);
776 static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx,
777 struct ena_com_admin_queue *admin_queue)
779 unsigned long flags = 0;
782 wait_for_completion_timeout(&comp_ctx->wait_event,
784 admin_queue->completion_timeout));
786 /* In case the command wasn't completed find out the root cause.
787 * There might be 2 kinds of errors
788 * 1) No completion (timeout reached)
789 * 2) There is completion but the device didn't get any msi-x interrupt.
791 if (unlikely(comp_ctx->status == ENA_CMD_SUBMITTED)) {
792 spin_lock_irqsave(&admin_queue->q_lock, flags);
793 ena_com_handle_admin_completion(admin_queue);
794 admin_queue->stats.no_completion++;
795 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
797 if (comp_ctx->status == ENA_CMD_COMPLETED) {
798 netdev_err(admin_queue->ena_dev->net_device,
799 "The ena device sent a completion but the driver didn't receive a MSI-X interrupt (cmd %d), autopolling mode is %s\n",
800 comp_ctx->cmd_opcode,
801 admin_queue->auto_polling ? "ON" : "OFF");
802 /* Check if fallback to polling is enabled */
803 if (admin_queue->auto_polling)
804 admin_queue->polling = true;
806 netdev_err(admin_queue->ena_dev->net_device,
807 "The ena device didn't send a completion for the admin cmd %d status %d\n",
808 comp_ctx->cmd_opcode, comp_ctx->status);
810 /* Check if shifted to polling mode.
811 * This will happen if there is a completion without an interrupt
812 * and autopolling mode is enabled. Continuing normal execution in such case
814 if (!admin_queue->polling) {
815 admin_queue->running_state = false;
821 ret = ena_com_comp_status_to_errno(admin_queue, comp_ctx->comp_status);
823 comp_ctxt_release(admin_queue, comp_ctx);
827 /* This method read the hardware device register through posting writes
828 * and waiting for response
829 * On timeout the function will return ENA_MMIO_READ_TIMEOUT
831 static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
833 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
834 volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp =
835 mmio_read->read_resp;
836 u32 mmio_read_reg, ret, i;
837 unsigned long flags = 0;
838 u32 timeout = mmio_read->reg_read_to;
843 timeout = ENA_REG_READ_TIMEOUT;
845 /* If readless is disabled, perform regular read */
846 if (!mmio_read->readless_supported)
847 return readl(ena_dev->reg_bar + offset);
849 spin_lock_irqsave(&mmio_read->lock, flags);
850 mmio_read->seq_num++;
852 read_resp->req_id = mmio_read->seq_num + 0xDEAD;
853 mmio_read_reg = (offset << ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT) &
854 ENA_REGS_MMIO_REG_READ_REG_OFF_MASK;
855 mmio_read_reg |= mmio_read->seq_num &
856 ENA_REGS_MMIO_REG_READ_REQ_ID_MASK;
858 writel(mmio_read_reg, ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
860 for (i = 0; i < timeout; i++) {
861 if (READ_ONCE(read_resp->req_id) == mmio_read->seq_num)
867 if (unlikely(i == timeout)) {
868 netdev_err(ena_dev->net_device,
869 "Reading reg failed for timeout. expected: req id[%u] offset[%u] actual: req id[%u] offset[%u]\n",
870 mmio_read->seq_num, offset, read_resp->req_id,
872 ret = ENA_MMIO_READ_TIMEOUT;
876 if (read_resp->reg_off != offset) {
877 netdev_err(ena_dev->net_device,
878 "Read failure: wrong offset provided\n");
879 ret = ENA_MMIO_READ_TIMEOUT;
881 ret = read_resp->reg_val;
884 spin_unlock_irqrestore(&mmio_read->lock, flags);
889 /* There are two types to wait for completion.
890 * Polling mode - wait until the completion is available.
891 * Async mode - wait on wait queue until the completion is ready
892 * (or the timeout expired).
893 * It is expected that the IRQ called ena_com_handle_admin_completion
894 * to mark the completions.
896 static int ena_com_wait_and_process_admin_cq(struct ena_comp_ctx *comp_ctx,
897 struct ena_com_admin_queue *admin_queue)
899 if (admin_queue->polling)
900 return ena_com_wait_and_process_admin_cq_polling(comp_ctx,
903 return ena_com_wait_and_process_admin_cq_interrupts(comp_ctx,
907 static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev,
908 struct ena_com_io_sq *io_sq)
910 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
911 struct ena_admin_aq_destroy_sq_cmd destroy_cmd;
912 struct ena_admin_acq_destroy_sq_resp_desc destroy_resp;
916 memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
918 if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
919 direction = ENA_ADMIN_SQ_DIRECTION_TX;
921 direction = ENA_ADMIN_SQ_DIRECTION_RX;
923 destroy_cmd.sq.sq_identity |= (direction <<
924 ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT) &
925 ENA_ADMIN_SQ_SQ_DIRECTION_MASK;
927 destroy_cmd.sq.sq_idx = io_sq->idx;
928 destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_SQ;
930 ret = ena_com_execute_admin_command(admin_queue,
931 (struct ena_admin_aq_entry *)&destroy_cmd,
933 (struct ena_admin_acq_entry *)&destroy_resp,
934 sizeof(destroy_resp));
936 if (unlikely(ret && (ret != -ENODEV)))
937 netdev_err(ena_dev->net_device,
938 "Failed to destroy io sq error: %d\n", ret);
943 static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
944 struct ena_com_io_sq *io_sq,
945 struct ena_com_io_cq *io_cq)
949 if (io_cq->cdesc_addr.virt_addr) {
950 size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
952 dma_free_coherent(ena_dev->dmadev, size,
953 io_cq->cdesc_addr.virt_addr,
954 io_cq->cdesc_addr.phys_addr);
956 io_cq->cdesc_addr.virt_addr = NULL;
959 if (io_sq->desc_addr.virt_addr) {
960 size = io_sq->desc_entry_size * io_sq->q_depth;
962 dma_free_coherent(ena_dev->dmadev, size,
963 io_sq->desc_addr.virt_addr,
964 io_sq->desc_addr.phys_addr);
966 io_sq->desc_addr.virt_addr = NULL;
969 if (io_sq->bounce_buf_ctrl.base_buffer) {
970 devm_kfree(ena_dev->dmadev, io_sq->bounce_buf_ctrl.base_buffer);
971 io_sq->bounce_buf_ctrl.base_buffer = NULL;
975 static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
979 unsigned long timeout_stamp;
981 /* Convert timeout from resolution of 100ms to us resolution. */
982 timeout_stamp = jiffies + usecs_to_jiffies(100 * 1000 * timeout);
985 val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
987 if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) {
988 netdev_err(ena_dev->net_device,
989 "Reg read timeout occurred\n");
993 if ((val & ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK) ==
997 if (time_is_before_jiffies(timeout_stamp))
1000 ena_delay_exponential_backoff_us(exp++, ena_dev->ena_min_poll_delay_us);
1004 static bool ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev,
1005 enum ena_admin_aq_feature_id feature_id)
1007 u32 feature_mask = 1 << feature_id;
1009 /* Device attributes is always supported */
1010 if ((feature_id != ENA_ADMIN_DEVICE_ATTRIBUTES) &&
1011 !(ena_dev->supported_features & feature_mask))
1017 static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
1018 struct ena_admin_get_feat_resp *get_resp,
1019 enum ena_admin_aq_feature_id feature_id,
1020 dma_addr_t control_buf_dma_addr,
1021 u32 control_buff_size,
1024 struct ena_com_admin_queue *admin_queue;
1025 struct ena_admin_get_feat_cmd get_cmd;
1028 if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) {
1029 netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
1034 memset(&get_cmd, 0x0, sizeof(get_cmd));
1035 admin_queue = &ena_dev->admin_queue;
1037 get_cmd.aq_common_descriptor.opcode = ENA_ADMIN_GET_FEATURE;
1039 if (control_buff_size)
1040 get_cmd.aq_common_descriptor.flags =
1041 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
1043 get_cmd.aq_common_descriptor.flags = 0;
1045 ret = ena_com_mem_addr_set(ena_dev,
1046 &get_cmd.control_buffer.address,
1047 control_buf_dma_addr);
1048 if (unlikely(ret)) {
1049 netdev_err(ena_dev->net_device, "Memory address set failed\n");
1053 get_cmd.control_buffer.length = control_buff_size;
1054 get_cmd.feat_common.feature_version = feature_ver;
1055 get_cmd.feat_common.feature_id = feature_id;
1057 ret = ena_com_execute_admin_command(admin_queue,
1058 (struct ena_admin_aq_entry *)
1061 (struct ena_admin_acq_entry *)
1066 netdev_err(ena_dev->net_device,
1067 "Failed to submit get_feature command %d error: %d\n",
1073 static int ena_com_get_feature(struct ena_com_dev *ena_dev,
1074 struct ena_admin_get_feat_resp *get_resp,
1075 enum ena_admin_aq_feature_id feature_id,
1078 return ena_com_get_feature_ex(ena_dev,
1086 int ena_com_get_current_hash_function(struct ena_com_dev *ena_dev)
1088 return ena_dev->rss.hash_func;
1091 static void ena_com_hash_key_fill_default_key(struct ena_com_dev *ena_dev)
1093 struct ena_admin_feature_rss_flow_hash_control *hash_key =
1094 (ena_dev->rss).hash_key;
1096 netdev_rss_key_fill(&hash_key->key, sizeof(hash_key->key));
1097 /* The key buffer is stored in the device in an array of
1100 hash_key->key_parts = ENA_ADMIN_RSS_KEY_PARTS;
1103 static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
1105 struct ena_rss *rss = &ena_dev->rss;
1107 if (!ena_com_check_supported_feature_id(ena_dev,
1108 ENA_ADMIN_RSS_HASH_FUNCTION))
1112 dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
1113 &rss->hash_key_dma_addr, GFP_KERNEL);
1115 if (unlikely(!rss->hash_key))
1121 static void ena_com_hash_key_destroy(struct ena_com_dev *ena_dev)
1123 struct ena_rss *rss = &ena_dev->rss;
1126 dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
1127 rss->hash_key, rss->hash_key_dma_addr);
1128 rss->hash_key = NULL;
1131 static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev)
1133 struct ena_rss *rss = &ena_dev->rss;
1136 dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
1137 &rss->hash_ctrl_dma_addr, GFP_KERNEL);
1139 if (unlikely(!rss->hash_ctrl))
1145 static void ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev)
1147 struct ena_rss *rss = &ena_dev->rss;
1150 dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
1151 rss->hash_ctrl, rss->hash_ctrl_dma_addr);
1152 rss->hash_ctrl = NULL;
1155 static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
1158 struct ena_rss *rss = &ena_dev->rss;
1159 struct ena_admin_get_feat_resp get_resp;
1163 ret = ena_com_get_feature(ena_dev, &get_resp,
1164 ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG, 0);
1168 if ((get_resp.u.ind_table.min_size > log_size) ||
1169 (get_resp.u.ind_table.max_size < log_size)) {
1170 netdev_err(ena_dev->net_device,
1171 "Indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n",
1172 1 << log_size, 1 << get_resp.u.ind_table.min_size,
1173 1 << get_resp.u.ind_table.max_size);
1177 tbl_size = (1ULL << log_size) *
1178 sizeof(struct ena_admin_rss_ind_table_entry);
1181 dma_alloc_coherent(ena_dev->dmadev, tbl_size,
1182 &rss->rss_ind_tbl_dma_addr, GFP_KERNEL);
1183 if (unlikely(!rss->rss_ind_tbl))
1186 tbl_size = (1ULL << log_size) * sizeof(u16);
1187 rss->host_rss_ind_tbl =
1188 devm_kzalloc(ena_dev->dmadev, tbl_size, GFP_KERNEL);
1189 if (unlikely(!rss->host_rss_ind_tbl))
1192 rss->tbl_log_size = log_size;
1197 tbl_size = (1ULL << log_size) *
1198 sizeof(struct ena_admin_rss_ind_table_entry);
1200 dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl,
1201 rss->rss_ind_tbl_dma_addr);
1202 rss->rss_ind_tbl = NULL;
1204 rss->tbl_log_size = 0;
1208 static void ena_com_indirect_table_destroy(struct ena_com_dev *ena_dev)
1210 struct ena_rss *rss = &ena_dev->rss;
1211 size_t tbl_size = (1ULL << rss->tbl_log_size) *
1212 sizeof(struct ena_admin_rss_ind_table_entry);
1214 if (rss->rss_ind_tbl)
1215 dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl,
1216 rss->rss_ind_tbl_dma_addr);
1217 rss->rss_ind_tbl = NULL;
1219 if (rss->host_rss_ind_tbl)
1220 devm_kfree(ena_dev->dmadev, rss->host_rss_ind_tbl);
1221 rss->host_rss_ind_tbl = NULL;
1224 static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
1225 struct ena_com_io_sq *io_sq, u16 cq_idx)
1227 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1228 struct ena_admin_aq_create_sq_cmd create_cmd;
1229 struct ena_admin_acq_create_sq_resp_desc cmd_completion;
1233 memset(&create_cmd, 0x0, sizeof(create_cmd));
1235 create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_SQ;
1237 if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
1238 direction = ENA_ADMIN_SQ_DIRECTION_TX;
1240 direction = ENA_ADMIN_SQ_DIRECTION_RX;
1242 create_cmd.sq_identity |= (direction <<
1243 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT) &
1244 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK;
1246 create_cmd.sq_caps_2 |= io_sq->mem_queue_type &
1247 ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK;
1249 create_cmd.sq_caps_2 |= (ENA_ADMIN_COMPLETION_POLICY_DESC <<
1250 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT) &
1251 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK;
1253 create_cmd.sq_caps_3 |=
1254 ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK;
1256 create_cmd.cq_idx = cq_idx;
1257 create_cmd.sq_depth = io_sq->q_depth;
1259 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
1260 ret = ena_com_mem_addr_set(ena_dev,
1262 io_sq->desc_addr.phys_addr);
1263 if (unlikely(ret)) {
1264 netdev_err(ena_dev->net_device,
1265 "Memory address set failed\n");
1270 ret = ena_com_execute_admin_command(admin_queue,
1271 (struct ena_admin_aq_entry *)&create_cmd,
1273 (struct ena_admin_acq_entry *)&cmd_completion,
1274 sizeof(cmd_completion));
1275 if (unlikely(ret)) {
1276 netdev_err(ena_dev->net_device,
1277 "Failed to create IO SQ. error: %d\n", ret);
1281 io_sq->idx = cmd_completion.sq_idx;
1283 io_sq->db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1284 (uintptr_t)cmd_completion.sq_doorbell_offset);
1286 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
1287 io_sq->header_addr = (u8 __iomem *)((uintptr_t)ena_dev->mem_bar
1288 + cmd_completion.llq_headers_offset);
1290 io_sq->desc_addr.pbuf_dev_addr =
1291 (u8 __iomem *)((uintptr_t)ena_dev->mem_bar +
1292 cmd_completion.llq_descriptors_offset);
1295 netdev_dbg(ena_dev->net_device, "Created sq[%u], depth[%u]\n",
1296 io_sq->idx, io_sq->q_depth);
1301 static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev)
1303 struct ena_rss *rss = &ena_dev->rss;
1304 struct ena_com_io_sq *io_sq;
1308 for (i = 0; i < 1 << rss->tbl_log_size; i++) {
1309 qid = rss->host_rss_ind_tbl[i];
1310 if (qid >= ENA_TOTAL_NUM_QUEUES)
1313 io_sq = &ena_dev->io_sq_queues[qid];
1315 if (io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX)
1318 rss->rss_ind_tbl[i].cq_idx = io_sq->idx;
1324 static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev,
1325 u16 intr_delay_resolution)
1327 u16 prev_intr_delay_resolution = ena_dev->intr_delay_resolution;
1329 if (unlikely(!intr_delay_resolution)) {
1330 netdev_err(ena_dev->net_device,
1331 "Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n");
1332 intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION;
1336 ena_dev->intr_moder_rx_interval =
1337 ena_dev->intr_moder_rx_interval *
1338 prev_intr_delay_resolution /
1339 intr_delay_resolution;
1342 ena_dev->intr_moder_tx_interval =
1343 ena_dev->intr_moder_tx_interval *
1344 prev_intr_delay_resolution /
1345 intr_delay_resolution;
1347 ena_dev->intr_delay_resolution = intr_delay_resolution;
1350 /*****************************************************************************/
1351 /******************************* API ******************************/
1352 /*****************************************************************************/
1354 int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue,
1355 struct ena_admin_aq_entry *cmd,
1357 struct ena_admin_acq_entry *comp,
1360 struct ena_comp_ctx *comp_ctx;
1363 comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size,
1365 if (IS_ERR(comp_ctx)) {
1366 ret = PTR_ERR(comp_ctx);
1368 netdev_dbg(admin_queue->ena_dev->net_device,
1369 "Failed to submit command [%d]\n", ret);
1371 netdev_err(admin_queue->ena_dev->net_device,
1372 "Failed to submit command [%d]\n", ret);
1377 ret = ena_com_wait_and_process_admin_cq(comp_ctx, admin_queue);
1378 if (unlikely(ret)) {
1379 if (admin_queue->running_state)
1380 netdev_err(admin_queue->ena_dev->net_device,
1381 "Failed to process command. ret = %d\n", ret);
1383 netdev_dbg(admin_queue->ena_dev->net_device,
1384 "Failed to process command. ret = %d\n", ret);
1389 int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
1390 struct ena_com_io_cq *io_cq)
1392 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1393 struct ena_admin_aq_create_cq_cmd create_cmd;
1394 struct ena_admin_acq_create_cq_resp_desc cmd_completion;
1397 memset(&create_cmd, 0x0, sizeof(create_cmd));
1399 create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_CQ;
1401 create_cmd.cq_caps_2 |= (io_cq->cdesc_entry_size_in_bytes / 4) &
1402 ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK;
1403 create_cmd.cq_caps_1 |=
1404 ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK;
1406 create_cmd.msix_vector = io_cq->msix_vector;
1407 create_cmd.cq_depth = io_cq->q_depth;
1409 ret = ena_com_mem_addr_set(ena_dev,
1411 io_cq->cdesc_addr.phys_addr);
1412 if (unlikely(ret)) {
1413 netdev_err(ena_dev->net_device, "Memory address set failed\n");
1417 ret = ena_com_execute_admin_command(admin_queue,
1418 (struct ena_admin_aq_entry *)&create_cmd,
1420 (struct ena_admin_acq_entry *)&cmd_completion,
1421 sizeof(cmd_completion));
1422 if (unlikely(ret)) {
1423 netdev_err(ena_dev->net_device,
1424 "Failed to create IO CQ. error: %d\n", ret);
1428 io_cq->idx = cmd_completion.cq_idx;
1430 io_cq->unmask_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1431 cmd_completion.cq_interrupt_unmask_register_offset);
1433 if (cmd_completion.cq_head_db_register_offset)
1434 io_cq->cq_head_db_reg =
1435 (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1436 cmd_completion.cq_head_db_register_offset);
1438 if (cmd_completion.numa_node_register_offset)
1439 io_cq->numa_node_cfg_reg =
1440 (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1441 cmd_completion.numa_node_register_offset);
1443 netdev_dbg(ena_dev->net_device, "Created cq[%u], depth[%u]\n",
1444 io_cq->idx, io_cq->q_depth);
1449 int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid,
1450 struct ena_com_io_sq **io_sq,
1451 struct ena_com_io_cq **io_cq)
1453 if (qid >= ENA_TOTAL_NUM_QUEUES) {
1454 netdev_err(ena_dev->net_device,
1455 "Invalid queue number %d but the max is %d\n", qid,
1456 ENA_TOTAL_NUM_QUEUES);
1460 *io_sq = &ena_dev->io_sq_queues[qid];
1461 *io_cq = &ena_dev->io_cq_queues[qid];
1466 void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev)
1468 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1469 struct ena_comp_ctx *comp_ctx;
1472 if (!admin_queue->comp_ctx)
1475 for (i = 0; i < admin_queue->q_depth; i++) {
1476 comp_ctx = get_comp_ctxt(admin_queue, i, false);
1477 if (unlikely(!comp_ctx))
1480 comp_ctx->status = ENA_CMD_ABORTED;
1482 complete(&comp_ctx->wait_event);
1486 void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev)
1488 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1489 unsigned long flags = 0;
1492 spin_lock_irqsave(&admin_queue->q_lock, flags);
1493 while (atomic_read(&admin_queue->outstanding_cmds) != 0) {
1494 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
1495 ena_delay_exponential_backoff_us(exp++,
1496 ena_dev->ena_min_poll_delay_us);
1497 spin_lock_irqsave(&admin_queue->q_lock, flags);
1499 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
1502 int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
1503 struct ena_com_io_cq *io_cq)
1505 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1506 struct ena_admin_aq_destroy_cq_cmd destroy_cmd;
1507 struct ena_admin_acq_destroy_cq_resp_desc destroy_resp;
1510 memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
1512 destroy_cmd.cq_idx = io_cq->idx;
1513 destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_CQ;
1515 ret = ena_com_execute_admin_command(admin_queue,
1516 (struct ena_admin_aq_entry *)&destroy_cmd,
1517 sizeof(destroy_cmd),
1518 (struct ena_admin_acq_entry *)&destroy_resp,
1519 sizeof(destroy_resp));
1521 if (unlikely(ret && (ret != -ENODEV)))
1522 netdev_err(ena_dev->net_device,
1523 "Failed to destroy IO CQ. error: %d\n", ret);
1528 bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev)
1530 return ena_dev->admin_queue.running_state;
1533 void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state)
1535 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1536 unsigned long flags = 0;
1538 spin_lock_irqsave(&admin_queue->q_lock, flags);
1539 ena_dev->admin_queue.running_state = state;
1540 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
1543 void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev)
1545 u16 depth = ena_dev->aenq.q_depth;
1547 WARN(ena_dev->aenq.head != depth, "Invalid AENQ state\n");
1549 /* Init head_db to mark that all entries in the queue
1550 * are initially available
1552 writel(depth, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
1555 int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
1557 struct ena_com_admin_queue *admin_queue;
1558 struct ena_admin_set_feat_cmd cmd;
1559 struct ena_admin_set_feat_resp resp;
1560 struct ena_admin_get_feat_resp get_resp;
1563 ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG, 0);
1565 dev_info(ena_dev->dmadev, "Can't get aenq configuration\n");
1569 if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) {
1570 netdev_warn(ena_dev->net_device,
1571 "Trying to set unsupported aenq events. supported flag: 0x%x asked flag: 0x%x\n",
1572 get_resp.u.aenq.supported_groups, groups_flag);
1576 memset(&cmd, 0x0, sizeof(cmd));
1577 admin_queue = &ena_dev->admin_queue;
1579 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
1580 cmd.aq_common_descriptor.flags = 0;
1581 cmd.feat_common.feature_id = ENA_ADMIN_AENQ_CONFIG;
1582 cmd.u.aenq.enabled_groups = groups_flag;
1584 ret = ena_com_execute_admin_command(admin_queue,
1585 (struct ena_admin_aq_entry *)&cmd,
1587 (struct ena_admin_acq_entry *)&resp,
1591 netdev_err(ena_dev->net_device,
1592 "Failed to config AENQ ret: %d\n", ret);
1597 int ena_com_get_dma_width(struct ena_com_dev *ena_dev)
1599 u32 caps = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
1602 if (unlikely(caps == ENA_MMIO_READ_TIMEOUT)) {
1603 netdev_err(ena_dev->net_device, "Reg read timeout occurred\n");
1607 width = (caps & ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK) >>
1608 ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT;
1610 netdev_dbg(ena_dev->net_device, "ENA dma width: %d\n", width);
1612 if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) {
1613 netdev_err(ena_dev->net_device, "DMA width illegal value: %d\n",
1618 ena_dev->dma_addr_bits = width;
1623 int ena_com_validate_version(struct ena_com_dev *ena_dev)
1627 u32 ctrl_ver_masked;
1629 /* Make sure the ENA version and the controller version are at least
1630 * as the driver expects
1632 ver = ena_com_reg_bar_read32(ena_dev, ENA_REGS_VERSION_OFF);
1633 ctrl_ver = ena_com_reg_bar_read32(ena_dev,
1634 ENA_REGS_CONTROLLER_VERSION_OFF);
1636 if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) ||
1637 (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) {
1638 netdev_err(ena_dev->net_device, "Reg read timeout occurred\n");
1642 dev_info(ena_dev->dmadev, "ENA device version: %d.%d\n",
1643 (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >>
1644 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
1645 ver & ENA_REGS_VERSION_MINOR_VERSION_MASK);
1647 dev_info(ena_dev->dmadev,
1648 "ENA controller version: %d.%d.%d implementation version %d\n",
1649 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) >>
1650 ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
1651 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) >>
1652 ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT,
1653 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK),
1654 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK) >>
1655 ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT);
1658 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) |
1659 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) |
1660 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK);
1662 /* Validate the ctrl version without the implementation ID */
1663 if (ctrl_ver_masked < MIN_ENA_CTRL_VER) {
1664 netdev_err(ena_dev->net_device,
1665 "ENA ctrl version is lower than the minimal ctrl version the driver supports\n");
1673 ena_com_free_ena_admin_queue_comp_ctx(struct ena_com_dev *ena_dev,
1674 struct ena_com_admin_queue *admin_queue)
1677 if (!admin_queue->comp_ctx)
1680 devm_kfree(ena_dev->dmadev, admin_queue->comp_ctx);
1682 admin_queue->comp_ctx = NULL;
1685 void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
1687 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1688 struct ena_com_admin_cq *cq = &admin_queue->cq;
1689 struct ena_com_admin_sq *sq = &admin_queue->sq;
1690 struct ena_com_aenq *aenq = &ena_dev->aenq;
1693 ena_com_free_ena_admin_queue_comp_ctx(ena_dev, admin_queue);
1695 size = ADMIN_SQ_SIZE(admin_queue->q_depth);
1697 dma_free_coherent(ena_dev->dmadev, size, sq->entries,
1701 size = ADMIN_CQ_SIZE(admin_queue->q_depth);
1703 dma_free_coherent(ena_dev->dmadev, size, cq->entries,
1707 size = ADMIN_AENQ_SIZE(aenq->q_depth);
1708 if (ena_dev->aenq.entries)
1709 dma_free_coherent(ena_dev->dmadev, size, aenq->entries,
1711 aenq->entries = NULL;
1714 void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
1719 mask_value = ENA_REGS_ADMIN_INTR_MASK;
1721 writel(mask_value, ena_dev->reg_bar + ENA_REGS_INTR_MASK_OFF);
1722 ena_dev->admin_queue.polling = polling;
1725 void ena_com_set_admin_auto_polling_mode(struct ena_com_dev *ena_dev,
1728 ena_dev->admin_queue.auto_polling = polling;
1731 int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
1733 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1735 spin_lock_init(&mmio_read->lock);
1736 mmio_read->read_resp =
1737 dma_alloc_coherent(ena_dev->dmadev,
1738 sizeof(*mmio_read->read_resp),
1739 &mmio_read->read_resp_dma_addr, GFP_KERNEL);
1740 if (unlikely(!mmio_read->read_resp))
1743 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
1745 mmio_read->read_resp->req_id = 0x0;
1746 mmio_read->seq_num = 0x0;
1747 mmio_read->readless_supported = true;
1756 void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported)
1758 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1760 mmio_read->readless_supported = readless_supported;
1763 void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev)
1765 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1767 writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
1768 writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
1770 dma_free_coherent(ena_dev->dmadev, sizeof(*mmio_read->read_resp),
1771 mmio_read->read_resp, mmio_read->read_resp_dma_addr);
1773 mmio_read->read_resp = NULL;
1776 void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev)
1778 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1779 u32 addr_low, addr_high;
1781 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(mmio_read->read_resp_dma_addr);
1782 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(mmio_read->read_resp_dma_addr);
1784 writel(addr_low, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
1785 writel(addr_high, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
1788 int ena_com_admin_init(struct ena_com_dev *ena_dev,
1789 struct ena_aenq_handlers *aenq_handlers)
1791 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1792 u32 aq_caps, acq_caps, dev_sts, addr_low, addr_high;
1795 dev_sts = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
1797 if (unlikely(dev_sts == ENA_MMIO_READ_TIMEOUT)) {
1798 netdev_err(ena_dev->net_device, "Reg read timeout occurred\n");
1802 if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) {
1803 netdev_err(ena_dev->net_device,
1804 "Device isn't ready, abort com init\n");
1808 admin_queue->q_depth = ENA_ADMIN_QUEUE_DEPTH;
1810 admin_queue->q_dmadev = ena_dev->dmadev;
1811 admin_queue->polling = false;
1812 admin_queue->curr_cmd_id = 0;
1814 atomic_set(&admin_queue->outstanding_cmds, 0);
1816 spin_lock_init(&admin_queue->q_lock);
1818 ret = ena_com_init_comp_ctxt(admin_queue);
1822 ret = ena_com_admin_init_sq(admin_queue);
1826 ret = ena_com_admin_init_cq(admin_queue);
1830 admin_queue->sq.db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1831 ENA_REGS_AQ_DB_OFF);
1833 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->sq.dma_addr);
1834 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->sq.dma_addr);
1836 writel(addr_low, ena_dev->reg_bar + ENA_REGS_AQ_BASE_LO_OFF);
1837 writel(addr_high, ena_dev->reg_bar + ENA_REGS_AQ_BASE_HI_OFF);
1839 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->cq.dma_addr);
1840 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->cq.dma_addr);
1842 writel(addr_low, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_LO_OFF);
1843 writel(addr_high, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_HI_OFF);
1846 aq_caps |= admin_queue->q_depth & ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK;
1847 aq_caps |= (sizeof(struct ena_admin_aq_entry) <<
1848 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT) &
1849 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK;
1852 acq_caps |= admin_queue->q_depth & ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK;
1853 acq_caps |= (sizeof(struct ena_admin_acq_entry) <<
1854 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT) &
1855 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK;
1857 writel(aq_caps, ena_dev->reg_bar + ENA_REGS_AQ_CAPS_OFF);
1858 writel(acq_caps, ena_dev->reg_bar + ENA_REGS_ACQ_CAPS_OFF);
1859 ret = ena_com_admin_init_aenq(ena_dev, aenq_handlers);
1863 admin_queue->ena_dev = ena_dev;
1864 admin_queue->running_state = true;
1868 ena_com_admin_destroy(ena_dev);
1873 int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
1874 struct ena_com_create_io_ctx *ctx)
1876 struct ena_com_io_sq *io_sq;
1877 struct ena_com_io_cq *io_cq;
1880 if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) {
1881 netdev_err(ena_dev->net_device,
1882 "Qid (%d) is bigger than max num of queues (%d)\n",
1883 ctx->qid, ENA_TOTAL_NUM_QUEUES);
1887 io_sq = &ena_dev->io_sq_queues[ctx->qid];
1888 io_cq = &ena_dev->io_cq_queues[ctx->qid];
1890 memset(io_sq, 0x0, sizeof(*io_sq));
1891 memset(io_cq, 0x0, sizeof(*io_cq));
1894 io_cq->q_depth = ctx->queue_size;
1895 io_cq->direction = ctx->direction;
1896 io_cq->qid = ctx->qid;
1898 io_cq->msix_vector = ctx->msix_vector;
1900 io_sq->q_depth = ctx->queue_size;
1901 io_sq->direction = ctx->direction;
1902 io_sq->qid = ctx->qid;
1904 io_sq->mem_queue_type = ctx->mem_queue_type;
1906 if (ctx->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
1907 /* header length is limited to 8 bits */
1908 io_sq->tx_max_header_size =
1909 min_t(u32, ena_dev->tx_max_header_size, SZ_256);
1911 ret = ena_com_init_io_sq(ena_dev, ctx, io_sq);
1914 ret = ena_com_init_io_cq(ena_dev, ctx, io_cq);
1918 ret = ena_com_create_io_cq(ena_dev, io_cq);
1922 ret = ena_com_create_io_sq(ena_dev, io_sq, io_cq->idx);
1929 ena_com_destroy_io_cq(ena_dev, io_cq);
1931 ena_com_io_queue_free(ena_dev, io_sq, io_cq);
1935 void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid)
1937 struct ena_com_io_sq *io_sq;
1938 struct ena_com_io_cq *io_cq;
1940 if (qid >= ENA_TOTAL_NUM_QUEUES) {
1941 netdev_err(ena_dev->net_device,
1942 "Qid (%d) is bigger than max num of queues (%d)\n",
1943 qid, ENA_TOTAL_NUM_QUEUES);
1947 io_sq = &ena_dev->io_sq_queues[qid];
1948 io_cq = &ena_dev->io_cq_queues[qid];
1950 ena_com_destroy_io_sq(ena_dev, io_sq);
1951 ena_com_destroy_io_cq(ena_dev, io_cq);
1953 ena_com_io_queue_free(ena_dev, io_sq, io_cq);
1956 int ena_com_get_link_params(struct ena_com_dev *ena_dev,
1957 struct ena_admin_get_feat_resp *resp)
1959 return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG, 0);
1962 int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
1963 struct ena_com_dev_get_features_ctx *get_feat_ctx)
1965 struct ena_admin_get_feat_resp get_resp;
1968 rc = ena_com_get_feature(ena_dev, &get_resp,
1969 ENA_ADMIN_DEVICE_ATTRIBUTES, 0);
1973 memcpy(&get_feat_ctx->dev_attr, &get_resp.u.dev_attr,
1974 sizeof(get_resp.u.dev_attr));
1976 ena_dev->supported_features = get_resp.u.dev_attr.supported_features;
1977 ena_dev->capabilities = get_resp.u.dev_attr.capabilities;
1979 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
1980 rc = ena_com_get_feature(ena_dev, &get_resp,
1981 ENA_ADMIN_MAX_QUEUES_EXT,
1982 ENA_FEATURE_MAX_QUEUE_EXT_VER);
1986 if (get_resp.u.max_queue_ext.version !=
1987 ENA_FEATURE_MAX_QUEUE_EXT_VER)
1990 memcpy(&get_feat_ctx->max_queue_ext, &get_resp.u.max_queue_ext,
1991 sizeof(get_resp.u.max_queue_ext));
1992 ena_dev->tx_max_header_size =
1993 get_resp.u.max_queue_ext.max_queue_ext.max_tx_header_size;
1995 rc = ena_com_get_feature(ena_dev, &get_resp,
1996 ENA_ADMIN_MAX_QUEUES_NUM, 0);
1997 memcpy(&get_feat_ctx->max_queues, &get_resp.u.max_queue,
1998 sizeof(get_resp.u.max_queue));
1999 ena_dev->tx_max_header_size =
2000 get_resp.u.max_queue.max_header_size;
2006 rc = ena_com_get_feature(ena_dev, &get_resp,
2007 ENA_ADMIN_AENQ_CONFIG, 0);
2011 memcpy(&get_feat_ctx->aenq, &get_resp.u.aenq,
2012 sizeof(get_resp.u.aenq));
2014 rc = ena_com_get_feature(ena_dev, &get_resp,
2015 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
2019 memcpy(&get_feat_ctx->offload, &get_resp.u.offload,
2020 sizeof(get_resp.u.offload));
2022 /* Driver hints isn't mandatory admin command. So in case the
2023 * command isn't supported set driver hints to 0
2025 rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_HW_HINTS, 0);
2028 memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints,
2029 sizeof(get_resp.u.hw_hints));
2030 else if (rc == -EOPNOTSUPP)
2031 memset(&get_feat_ctx->hw_hints, 0x0,
2032 sizeof(get_feat_ctx->hw_hints));
2036 rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_LLQ, 0);
2038 memcpy(&get_feat_ctx->llq, &get_resp.u.llq,
2039 sizeof(get_resp.u.llq));
2040 else if (rc == -EOPNOTSUPP)
2041 memset(&get_feat_ctx->llq, 0x0, sizeof(get_feat_ctx->llq));
2048 void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev)
2050 ena_com_handle_admin_completion(&ena_dev->admin_queue);
2053 /* ena_handle_specific_aenq_event:
2054 * return the handler that is relevant to the specific event group
2056 static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *ena_dev,
2059 struct ena_aenq_handlers *aenq_handlers = ena_dev->aenq.aenq_handlers;
2061 if ((group < ENA_MAX_HANDLERS) && aenq_handlers->handlers[group])
2062 return aenq_handlers->handlers[group];
2064 return aenq_handlers->unimplemented_handler;
2067 /* ena_aenq_intr_handler:
2068 * handles the aenq incoming events.
2069 * pop events from the queue and apply the specific handler
2071 void ena_com_aenq_intr_handler(struct ena_com_dev *ena_dev, void *data)
2073 struct ena_admin_aenq_entry *aenq_e;
2074 struct ena_admin_aenq_common_desc *aenq_common;
2075 struct ena_com_aenq *aenq = &ena_dev->aenq;
2077 ena_aenq_handler handler_cb;
2078 u16 masked_head, processed = 0;
2081 masked_head = aenq->head & (aenq->q_depth - 1);
2082 phase = aenq->phase;
2083 aenq_e = &aenq->entries[masked_head]; /* Get first entry */
2084 aenq_common = &aenq_e->aenq_common_desc;
2086 /* Go over all the events */
2087 while ((READ_ONCE(aenq_common->flags) &
2088 ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) {
2089 /* Make sure the phase bit (ownership) is as expected before
2090 * reading the rest of the descriptor.
2094 timestamp = (u64)aenq_common->timestamp_low |
2095 ((u64)aenq_common->timestamp_high << 32);
2097 netdev_dbg(ena_dev->net_device,
2098 "AENQ! Group[%x] Syndrome[%x] timestamp: [%llus]\n",
2099 aenq_common->group, aenq_common->syndrome, timestamp);
2101 /* Handle specific event*/
2102 handler_cb = ena_com_get_specific_aenq_cb(ena_dev,
2103 aenq_common->group);
2104 handler_cb(data, aenq_e); /* call the actual event handler*/
2106 /* Get next event entry */
2110 if (unlikely(masked_head == aenq->q_depth)) {
2114 aenq_e = &aenq->entries[masked_head];
2115 aenq_common = &aenq_e->aenq_common_desc;
2118 aenq->head += processed;
2119 aenq->phase = phase;
2121 /* Don't update aenq doorbell if there weren't any processed events */
2125 /* write the aenq doorbell after all AENQ descriptors were read */
2127 writel_relaxed((u32)aenq->head,
2128 ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
2131 int ena_com_dev_reset(struct ena_com_dev *ena_dev,
2132 enum ena_regs_reset_reason_types reset_reason)
2134 u32 stat, timeout, cap, reset_val;
2137 stat = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
2138 cap = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
2140 if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) ||
2141 (cap == ENA_MMIO_READ_TIMEOUT))) {
2142 netdev_err(ena_dev->net_device, "Reg read32 timeout occurred\n");
2146 if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) {
2147 netdev_err(ena_dev->net_device,
2148 "Device isn't ready, can't reset device\n");
2152 timeout = (cap & ENA_REGS_CAPS_RESET_TIMEOUT_MASK) >>
2153 ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT;
2155 netdev_err(ena_dev->net_device, "Invalid timeout value\n");
2160 reset_val = ENA_REGS_DEV_CTL_DEV_RESET_MASK;
2161 reset_val |= (reset_reason << ENA_REGS_DEV_CTL_RESET_REASON_SHIFT) &
2162 ENA_REGS_DEV_CTL_RESET_REASON_MASK;
2163 writel(reset_val, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
2165 /* Write again the MMIO read request address */
2166 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
2168 rc = wait_for_reset_state(ena_dev, timeout,
2169 ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK);
2171 netdev_err(ena_dev->net_device,
2172 "Reset indication didn't turn on\n");
2177 writel(0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
2178 rc = wait_for_reset_state(ena_dev, timeout, 0);
2180 netdev_err(ena_dev->net_device,
2181 "Reset indication didn't turn off\n");
2185 timeout = (cap & ENA_REGS_CAPS_ADMIN_CMD_TO_MASK) >>
2186 ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT;
2188 /* the resolution of timeout reg is 100ms */
2189 ena_dev->admin_queue.completion_timeout = timeout * 100000;
2191 ena_dev->admin_queue.completion_timeout = ADMIN_CMD_TIMEOUT_US;
2196 static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
2197 struct ena_com_stats_ctx *ctx,
2198 enum ena_admin_get_stats_type type)
2200 struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx->get_cmd;
2201 struct ena_admin_acq_get_stats_resp *get_resp = &ctx->get_resp;
2202 struct ena_com_admin_queue *admin_queue;
2205 admin_queue = &ena_dev->admin_queue;
2207 get_cmd->aq_common_descriptor.opcode = ENA_ADMIN_GET_STATS;
2208 get_cmd->aq_common_descriptor.flags = 0;
2209 get_cmd->type = type;
2211 ret = ena_com_execute_admin_command(admin_queue,
2212 (struct ena_admin_aq_entry *)get_cmd,
2214 (struct ena_admin_acq_entry *)get_resp,
2218 netdev_err(ena_dev->net_device,
2219 "Failed to get stats. error: %d\n", ret);
2224 int ena_com_get_eni_stats(struct ena_com_dev *ena_dev,
2225 struct ena_admin_eni_stats *stats)
2227 struct ena_com_stats_ctx ctx;
2230 if (!ena_com_get_cap(ena_dev, ENA_ADMIN_ENI_STATS)) {
2231 netdev_err(ena_dev->net_device,
2232 "Capability %d isn't supported\n",
2233 ENA_ADMIN_ENI_STATS);
2237 memset(&ctx, 0x0, sizeof(ctx));
2238 ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_ENI);
2239 if (likely(ret == 0))
2240 memcpy(stats, &ctx.get_resp.u.eni_stats,
2241 sizeof(ctx.get_resp.u.eni_stats));
2246 int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
2247 struct ena_admin_basic_stats *stats)
2249 struct ena_com_stats_ctx ctx;
2252 memset(&ctx, 0x0, sizeof(ctx));
2253 ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_BASIC);
2254 if (likely(ret == 0))
2255 memcpy(stats, &ctx.get_resp.u.basic_stats,
2256 sizeof(ctx.get_resp.u.basic_stats));
2261 int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, u32 mtu)
2263 struct ena_com_admin_queue *admin_queue;
2264 struct ena_admin_set_feat_cmd cmd;
2265 struct ena_admin_set_feat_resp resp;
2268 if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) {
2269 netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
2274 memset(&cmd, 0x0, sizeof(cmd));
2275 admin_queue = &ena_dev->admin_queue;
2277 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2278 cmd.aq_common_descriptor.flags = 0;
2279 cmd.feat_common.feature_id = ENA_ADMIN_MTU;
2280 cmd.u.mtu.mtu = mtu;
2282 ret = ena_com_execute_admin_command(admin_queue,
2283 (struct ena_admin_aq_entry *)&cmd,
2285 (struct ena_admin_acq_entry *)&resp,
2289 netdev_err(ena_dev->net_device,
2290 "Failed to set mtu %d. error: %d\n", mtu, ret);
2295 int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
2296 struct ena_admin_feature_offload_desc *offload)
2299 struct ena_admin_get_feat_resp resp;
2301 ret = ena_com_get_feature(ena_dev, &resp,
2302 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
2303 if (unlikely(ret)) {
2304 netdev_err(ena_dev->net_device,
2305 "Failed to get offload capabilities %d\n", ret);
2309 memcpy(offload, &resp.u.offload, sizeof(resp.u.offload));
2314 int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
2316 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2317 struct ena_rss *rss = &ena_dev->rss;
2318 struct ena_admin_set_feat_cmd cmd;
2319 struct ena_admin_set_feat_resp resp;
2320 struct ena_admin_get_feat_resp get_resp;
2323 if (!ena_com_check_supported_feature_id(ena_dev,
2324 ENA_ADMIN_RSS_HASH_FUNCTION)) {
2325 netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
2326 ENA_ADMIN_RSS_HASH_FUNCTION);
2330 /* Validate hash function is supported */
2331 ret = ena_com_get_feature(ena_dev, &get_resp,
2332 ENA_ADMIN_RSS_HASH_FUNCTION, 0);
2336 if (!(get_resp.u.flow_hash_func.supported_func & BIT(rss->hash_func))) {
2337 netdev_err(ena_dev->net_device,
2338 "Func hash %d isn't supported by device, abort\n",
2343 memset(&cmd, 0x0, sizeof(cmd));
2345 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2346 cmd.aq_common_descriptor.flags =
2347 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2348 cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_FUNCTION;
2349 cmd.u.flow_hash_func.init_val = rss->hash_init_val;
2350 cmd.u.flow_hash_func.selected_func = 1 << rss->hash_func;
2352 ret = ena_com_mem_addr_set(ena_dev,
2353 &cmd.control_buffer.address,
2354 rss->hash_key_dma_addr);
2355 if (unlikely(ret)) {
2356 netdev_err(ena_dev->net_device, "Memory address set failed\n");
2360 cmd.control_buffer.length = sizeof(*rss->hash_key);
2362 ret = ena_com_execute_admin_command(admin_queue,
2363 (struct ena_admin_aq_entry *)&cmd,
2365 (struct ena_admin_acq_entry *)&resp,
2367 if (unlikely(ret)) {
2368 netdev_err(ena_dev->net_device,
2369 "Failed to set hash function %d. error: %d\n",
2370 rss->hash_func, ret);
2377 int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
2378 enum ena_admin_hash_functions func,
2379 const u8 *key, u16 key_len, u32 init_val)
2381 struct ena_admin_feature_rss_flow_hash_control *hash_key;
2382 struct ena_admin_get_feat_resp get_resp;
2383 enum ena_admin_hash_functions old_func;
2384 struct ena_rss *rss = &ena_dev->rss;
2387 hash_key = rss->hash_key;
2389 /* Make sure size is a mult of DWs */
2390 if (unlikely(key_len & 0x3))
2393 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2394 ENA_ADMIN_RSS_HASH_FUNCTION,
2395 rss->hash_key_dma_addr,
2396 sizeof(*rss->hash_key), 0);
2400 if (!(BIT(func) & get_resp.u.flow_hash_func.supported_func)) {
2401 netdev_err(ena_dev->net_device,
2402 "Flow hash function %d isn't supported\n", func);
2406 if ((func == ENA_ADMIN_TOEPLITZ) && key) {
2407 if (key_len != sizeof(hash_key->key)) {
2408 netdev_err(ena_dev->net_device,
2409 "key len (%u) doesn't equal the supported size (%zu)\n",
2410 key_len, sizeof(hash_key->key));
2413 memcpy(hash_key->key, key, key_len);
2414 hash_key->key_parts = key_len / sizeof(hash_key->key[0]);
2417 rss->hash_init_val = init_val;
2418 old_func = rss->hash_func;
2419 rss->hash_func = func;
2420 rc = ena_com_set_hash_function(ena_dev);
2422 /* Restore the old function */
2424 rss->hash_func = old_func;
2429 int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
2430 enum ena_admin_hash_functions *func)
2432 struct ena_rss *rss = &ena_dev->rss;
2433 struct ena_admin_get_feat_resp get_resp;
2436 if (unlikely(!func))
2439 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2440 ENA_ADMIN_RSS_HASH_FUNCTION,
2441 rss->hash_key_dma_addr,
2442 sizeof(*rss->hash_key), 0);
2446 /* ffs() returns 1 in case the lsb is set */
2447 rss->hash_func = ffs(get_resp.u.flow_hash_func.selected_func);
2451 *func = rss->hash_func;
2456 int ena_com_get_hash_key(struct ena_com_dev *ena_dev, u8 *key)
2458 struct ena_admin_feature_rss_flow_hash_control *hash_key =
2459 ena_dev->rss.hash_key;
2462 memcpy(key, hash_key->key,
2463 (size_t)(hash_key->key_parts) * sizeof(hash_key->key[0]));
2468 int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev,
2469 enum ena_admin_flow_hash_proto proto,
2472 struct ena_rss *rss = &ena_dev->rss;
2473 struct ena_admin_get_feat_resp get_resp;
2476 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2477 ENA_ADMIN_RSS_HASH_INPUT,
2478 rss->hash_ctrl_dma_addr,
2479 sizeof(*rss->hash_ctrl), 0);
2484 *fields = rss->hash_ctrl->selected_fields[proto].fields;
2489 int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
2491 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2492 struct ena_rss *rss = &ena_dev->rss;
2493 struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
2494 struct ena_admin_set_feat_cmd cmd;
2495 struct ena_admin_set_feat_resp resp;
2498 if (!ena_com_check_supported_feature_id(ena_dev,
2499 ENA_ADMIN_RSS_HASH_INPUT)) {
2500 netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
2501 ENA_ADMIN_RSS_HASH_INPUT);
2505 memset(&cmd, 0x0, sizeof(cmd));
2507 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2508 cmd.aq_common_descriptor.flags =
2509 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2510 cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_INPUT;
2511 cmd.u.flow_hash_input.enabled_input_sort =
2512 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK |
2513 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK;
2515 ret = ena_com_mem_addr_set(ena_dev,
2516 &cmd.control_buffer.address,
2517 rss->hash_ctrl_dma_addr);
2518 if (unlikely(ret)) {
2519 netdev_err(ena_dev->net_device, "Memory address set failed\n");
2522 cmd.control_buffer.length = sizeof(*hash_ctrl);
2524 ret = ena_com_execute_admin_command(admin_queue,
2525 (struct ena_admin_aq_entry *)&cmd,
2527 (struct ena_admin_acq_entry *)&resp,
2530 netdev_err(ena_dev->net_device,
2531 "Failed to set hash input. error: %d\n", ret);
2536 int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
2538 struct ena_rss *rss = &ena_dev->rss;
2539 struct ena_admin_feature_rss_hash_control *hash_ctrl =
2541 u16 available_fields = 0;
2544 /* Get the supported hash input */
2545 rc = ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2549 hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP4].fields =
2550 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2551 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2553 hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP4].fields =
2554 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2555 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2557 hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP6].fields =
2558 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2559 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2561 hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP6].fields =
2562 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2563 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2565 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4].fields =
2566 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2568 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP6].fields =
2569 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2571 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields =
2572 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2574 hash_ctrl->selected_fields[ENA_ADMIN_RSS_NOT_IP].fields =
2575 ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA;
2577 for (i = 0; i < ENA_ADMIN_RSS_PROTO_NUM; i++) {
2578 available_fields = hash_ctrl->selected_fields[i].fields &
2579 hash_ctrl->supported_fields[i].fields;
2580 if (available_fields != hash_ctrl->selected_fields[i].fields) {
2581 netdev_err(ena_dev->net_device,
2582 "Hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
2583 i, hash_ctrl->supported_fields[i].fields,
2584 hash_ctrl->selected_fields[i].fields);
2589 rc = ena_com_set_hash_ctrl(ena_dev);
2591 /* In case of failure, restore the old hash ctrl */
2593 ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2598 int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
2599 enum ena_admin_flow_hash_proto proto,
2602 struct ena_rss *rss = &ena_dev->rss;
2603 struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
2604 u16 supported_fields;
2607 if (proto >= ENA_ADMIN_RSS_PROTO_NUM) {
2608 netdev_err(ena_dev->net_device, "Invalid proto num (%u)\n",
2613 /* Get the ctrl table */
2614 rc = ena_com_get_hash_ctrl(ena_dev, proto, NULL);
2618 /* Make sure all the fields are supported */
2619 supported_fields = hash_ctrl->supported_fields[proto].fields;
2620 if ((hash_fields & supported_fields) != hash_fields) {
2621 netdev_err(ena_dev->net_device,
2622 "Proto %d doesn't support the required fields %x. supports only: %x\n",
2623 proto, hash_fields, supported_fields);
2626 hash_ctrl->selected_fields[proto].fields = hash_fields;
2628 rc = ena_com_set_hash_ctrl(ena_dev);
2630 /* In case of failure, restore the old hash ctrl */
2632 ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2637 int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev,
2638 u16 entry_idx, u16 entry_value)
2640 struct ena_rss *rss = &ena_dev->rss;
2642 if (unlikely(entry_idx >= (1 << rss->tbl_log_size)))
2645 if (unlikely((entry_value > ENA_TOTAL_NUM_QUEUES)))
2648 rss->host_rss_ind_tbl[entry_idx] = entry_value;
2653 int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
2655 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2656 struct ena_rss *rss = &ena_dev->rss;
2657 struct ena_admin_set_feat_cmd cmd;
2658 struct ena_admin_set_feat_resp resp;
2661 if (!ena_com_check_supported_feature_id(
2662 ena_dev, ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG)) {
2663 netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
2664 ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG);
2668 ret = ena_com_ind_tbl_convert_to_device(ena_dev);
2670 netdev_err(ena_dev->net_device,
2671 "Failed to convert host indirection table to device table\n");
2675 memset(&cmd, 0x0, sizeof(cmd));
2677 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2678 cmd.aq_common_descriptor.flags =
2679 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2680 cmd.feat_common.feature_id = ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG;
2681 cmd.u.ind_table.size = rss->tbl_log_size;
2682 cmd.u.ind_table.inline_index = 0xFFFFFFFF;
2684 ret = ena_com_mem_addr_set(ena_dev,
2685 &cmd.control_buffer.address,
2686 rss->rss_ind_tbl_dma_addr);
2687 if (unlikely(ret)) {
2688 netdev_err(ena_dev->net_device, "Memory address set failed\n");
2692 cmd.control_buffer.length = (1ULL << rss->tbl_log_size) *
2693 sizeof(struct ena_admin_rss_ind_table_entry);
2695 ret = ena_com_execute_admin_command(admin_queue,
2696 (struct ena_admin_aq_entry *)&cmd,
2698 (struct ena_admin_acq_entry *)&resp,
2702 netdev_err(ena_dev->net_device,
2703 "Failed to set indirect table. error: %d\n", ret);
2708 int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl)
2710 struct ena_rss *rss = &ena_dev->rss;
2711 struct ena_admin_get_feat_resp get_resp;
2715 tbl_size = (1ULL << rss->tbl_log_size) *
2716 sizeof(struct ena_admin_rss_ind_table_entry);
2718 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2719 ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG,
2720 rss->rss_ind_tbl_dma_addr,
2728 for (i = 0; i < (1 << rss->tbl_log_size); i++)
2729 ind_tbl[i] = rss->host_rss_ind_tbl[i];
2734 int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size)
2738 memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
2740 rc = ena_com_indirect_table_allocate(ena_dev, indr_tbl_log_size);
2744 /* The following function might return unsupported in case the
2745 * device doesn't support setting the key / hash function. We can safely
2746 * ignore this error and have indirection table support only.
2748 rc = ena_com_hash_key_allocate(ena_dev);
2750 ena_com_hash_key_fill_default_key(ena_dev);
2751 else if (rc != -EOPNOTSUPP)
2754 rc = ena_com_hash_ctrl_init(ena_dev);
2761 ena_com_hash_key_destroy(ena_dev);
2763 ena_com_indirect_table_destroy(ena_dev);
2769 void ena_com_rss_destroy(struct ena_com_dev *ena_dev)
2771 ena_com_indirect_table_destroy(ena_dev);
2772 ena_com_hash_key_destroy(ena_dev);
2773 ena_com_hash_ctrl_destroy(ena_dev);
2775 memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
2778 int ena_com_allocate_host_info(struct ena_com_dev *ena_dev)
2780 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2782 host_attr->host_info =
2783 dma_alloc_coherent(ena_dev->dmadev, SZ_4K,
2784 &host_attr->host_info_dma_addr, GFP_KERNEL);
2785 if (unlikely(!host_attr->host_info))
2788 host_attr->host_info->ena_spec_version = ((ENA_COMMON_SPEC_VERSION_MAJOR <<
2789 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) |
2790 (ENA_COMMON_SPEC_VERSION_MINOR));
2795 int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
2796 u32 debug_area_size)
2798 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2800 host_attr->debug_area_virt_addr =
2801 dma_alloc_coherent(ena_dev->dmadev, debug_area_size,
2802 &host_attr->debug_area_dma_addr, GFP_KERNEL);
2803 if (unlikely(!host_attr->debug_area_virt_addr)) {
2804 host_attr->debug_area_size = 0;
2808 host_attr->debug_area_size = debug_area_size;
2813 void ena_com_delete_host_info(struct ena_com_dev *ena_dev)
2815 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2817 if (host_attr->host_info) {
2818 dma_free_coherent(ena_dev->dmadev, SZ_4K, host_attr->host_info,
2819 host_attr->host_info_dma_addr);
2820 host_attr->host_info = NULL;
2824 void ena_com_delete_debug_area(struct ena_com_dev *ena_dev)
2826 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2828 if (host_attr->debug_area_virt_addr) {
2829 dma_free_coherent(ena_dev->dmadev, host_attr->debug_area_size,
2830 host_attr->debug_area_virt_addr,
2831 host_attr->debug_area_dma_addr);
2832 host_attr->debug_area_virt_addr = NULL;
2836 int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
2838 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2839 struct ena_com_admin_queue *admin_queue;
2840 struct ena_admin_set_feat_cmd cmd;
2841 struct ena_admin_set_feat_resp resp;
2845 /* Host attribute config is called before ena_com_get_dev_attr_feat
2846 * so ena_com can't check if the feature is supported.
2849 memset(&cmd, 0x0, sizeof(cmd));
2850 admin_queue = &ena_dev->admin_queue;
2852 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2853 cmd.feat_common.feature_id = ENA_ADMIN_HOST_ATTR_CONFIG;
2855 ret = ena_com_mem_addr_set(ena_dev,
2856 &cmd.u.host_attr.debug_ba,
2857 host_attr->debug_area_dma_addr);
2858 if (unlikely(ret)) {
2859 netdev_err(ena_dev->net_device, "Memory address set failed\n");
2863 ret = ena_com_mem_addr_set(ena_dev,
2864 &cmd.u.host_attr.os_info_ba,
2865 host_attr->host_info_dma_addr);
2866 if (unlikely(ret)) {
2867 netdev_err(ena_dev->net_device, "Memory address set failed\n");
2871 cmd.u.host_attr.debug_area_size = host_attr->debug_area_size;
2873 ret = ena_com_execute_admin_command(admin_queue,
2874 (struct ena_admin_aq_entry *)&cmd,
2876 (struct ena_admin_acq_entry *)&resp,
2880 netdev_err(ena_dev->net_device,
2881 "Failed to set host attributes: %d\n", ret);
2886 /* Interrupt moderation */
2887 bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev)
2889 return ena_com_check_supported_feature_id(ena_dev,
2890 ENA_ADMIN_INTERRUPT_MODERATION);
2893 static int ena_com_update_nonadaptive_moderation_interval(struct ena_com_dev *ena_dev,
2895 u32 intr_delay_resolution,
2896 u32 *intr_moder_interval)
2898 if (!intr_delay_resolution) {
2899 netdev_err(ena_dev->net_device,
2900 "Illegal interrupt delay granularity value\n");
2904 *intr_moder_interval = coalesce_usecs / intr_delay_resolution;
2909 int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
2910 u32 tx_coalesce_usecs)
2912 return ena_com_update_nonadaptive_moderation_interval(ena_dev,
2914 ena_dev->intr_delay_resolution,
2915 &ena_dev->intr_moder_tx_interval);
2918 int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
2919 u32 rx_coalesce_usecs)
2921 return ena_com_update_nonadaptive_moderation_interval(ena_dev,
2923 ena_dev->intr_delay_resolution,
2924 &ena_dev->intr_moder_rx_interval);
2927 int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
2929 struct ena_admin_get_feat_resp get_resp;
2930 u16 delay_resolution;
2933 rc = ena_com_get_feature(ena_dev, &get_resp,
2934 ENA_ADMIN_INTERRUPT_MODERATION, 0);
2937 if (rc == -EOPNOTSUPP) {
2938 netdev_dbg(ena_dev->net_device,
2939 "Feature %d isn't supported\n",
2940 ENA_ADMIN_INTERRUPT_MODERATION);
2943 netdev_err(ena_dev->net_device,
2944 "Failed to get interrupt moderation admin cmd. rc: %d\n",
2948 /* no moderation supported, disable adaptive support */
2949 ena_com_disable_adaptive_moderation(ena_dev);
2953 /* if moderation is supported by device we set adaptive moderation */
2954 delay_resolution = get_resp.u.intr_moderation.intr_delay_resolution;
2955 ena_com_update_intr_delay_resolution(ena_dev, delay_resolution);
2957 /* Disable adaptive moderation by default - can be enabled later */
2958 ena_com_disable_adaptive_moderation(ena_dev);
2963 unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev)
2965 return ena_dev->intr_moder_tx_interval;
2968 unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev)
2970 return ena_dev->intr_moder_rx_interval;
2973 int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
2974 struct ena_admin_feature_llq_desc *llq_features,
2975 struct ena_llq_configurations *llq_default_cfg)
2977 struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
2980 if (!llq_features->max_llq_num) {
2981 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
2985 rc = ena_com_config_llq_info(ena_dev, llq_features, llq_default_cfg);
2989 ena_dev->tx_max_header_size = llq_info->desc_list_entry_size -
2990 (llq_info->descs_num_before_header * sizeof(struct ena_eth_io_tx_desc));
2992 if (unlikely(ena_dev->tx_max_header_size == 0)) {
2993 netdev_err(ena_dev->net_device,
2994 "The size of the LLQ entry is smaller than needed\n");
2998 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV;