net: ena: explicit casting and initialization, and clearer error handling
[sfrench/cifs-2.6.git] / drivers / net / ethernet / amazon / ena / ena_com.c
index 7635c38e77dd0ce43a960ffef0fdbaabc4f1422d..5c468b28723b275c79450279e467a4e1871fdb19 100644 (file)
@@ -41,9 +41,6 @@
 #define ENA_ASYNC_QUEUE_DEPTH 16
 #define ENA_ADMIN_QUEUE_DEPTH 32
 
-#define MIN_ENA_VER (((ENA_COMMON_SPEC_VERSION_MAJOR) << \
-               ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) \
-               | (ENA_COMMON_SPEC_VERSION_MINOR))
 
 #define ENA_CTRL_MAJOR         0
 #define ENA_CTRL_MINOR         0
@@ -61,6 +58,8 @@
 
 #define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF
 
+#define ENA_COM_BOUNCE_BUFFER_CNTRL_CNT        4
+
 #define ENA_REGS_ADMIN_INTR_MASK 1
 
 #define ENA_POLL_MS    5
@@ -236,7 +235,7 @@ static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queu
        tail_masked = admin_queue->sq.tail & queue_size_mask;
 
        /* In case of queue FULL */
-       cnt = atomic_read(&admin_queue->outstanding_cmds);
+       cnt = (u16)atomic_read(&admin_queue->outstanding_cmds);
        if (cnt >= admin_queue->q_depth) {
                pr_debug("admin queue is full.\n");
                admin_queue->stats.out_of_space++;
@@ -305,7 +304,7 @@ static struct ena_comp_ctx *ena_com_submit_admin_cmd(struct ena_com_admin_queue
                                                     struct ena_admin_acq_entry *comp,
                                                     size_t comp_size_in_bytes)
 {
-       unsigned long flags;
+       unsigned long flags = 0;
        struct ena_comp_ctx *comp_ctx;
 
        spin_lock_irqsave(&admin_queue->q_lock, flags);
@@ -333,7 +332,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
 
        memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr));
 
-       io_sq->dma_addr_bits = ena_dev->dma_addr_bits;
+       io_sq->dma_addr_bits = (u8)ena_dev->dma_addr_bits;
        io_sq->desc_entry_size =
                (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
                sizeof(struct ena_eth_io_tx_desc) :
@@ -355,21 +354,48 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
                                                    &io_sq->desc_addr.phys_addr,
                                                    GFP_KERNEL);
                }
-       } else {
+
+               if (!io_sq->desc_addr.virt_addr) {
+                       pr_err("memory allocation failed");
+                       return -ENOMEM;
+               }
+       }
+
+       if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
+               /* Allocate bounce buffers */
+               io_sq->bounce_buf_ctrl.buffer_size =
+                       ena_dev->llq_info.desc_list_entry_size;
+               io_sq->bounce_buf_ctrl.buffers_num =
+                       ENA_COM_BOUNCE_BUFFER_CNTRL_CNT;
+               io_sq->bounce_buf_ctrl.next_to_use = 0;
+
+               size = io_sq->bounce_buf_ctrl.buffer_size *
+                        io_sq->bounce_buf_ctrl.buffers_num;
+
                dev_node = dev_to_node(ena_dev->dmadev);
                set_dev_node(ena_dev->dmadev, ctx->numa_node);
-               io_sq->desc_addr.virt_addr =
+               io_sq->bounce_buf_ctrl.base_buffer =
                        devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
                set_dev_node(ena_dev->dmadev, dev_node);
-               if (!io_sq->desc_addr.virt_addr) {
-                       io_sq->desc_addr.virt_addr =
+               if (!io_sq->bounce_buf_ctrl.base_buffer)
+                       io_sq->bounce_buf_ctrl.base_buffer =
                                devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
+
+               if (!io_sq->bounce_buf_ctrl.base_buffer) {
+                       pr_err("bounce buffer memory allocation failed");
+                       return -ENOMEM;
                }
-       }
 
-       if (!io_sq->desc_addr.virt_addr) {
-               pr_err("memory allocation failed");
-               return -ENOMEM;
+               memcpy(&io_sq->llq_info, &ena_dev->llq_info,
+                      sizeof(io_sq->llq_info));
+
+               /* Initiate the first bounce buffer */
+               io_sq->llq_buf_ctrl.curr_bounce_buf =
+                       ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
+               memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
+                      0x0, io_sq->llq_info.desc_list_entry_size);
+               io_sq->llq_buf_ctrl.descs_left_in_line =
+                       io_sq->llq_info.descs_num_before_header;
        }
 
        io_sq->tail = 0;
@@ -460,7 +486,7 @@ static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_qu
 
        /* Go over all the completions */
        while ((READ_ONCE(cqe->acq_common_descriptor.flags) &
-                       ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
+               ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
                /* Do not read the rest of the completion entry before the
                 * phase bit was validated
                 */
@@ -511,7 +537,8 @@ static int ena_com_comp_status_to_errno(u8 comp_status)
 static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx,
                                                     struct ena_com_admin_queue *admin_queue)
 {
-       unsigned long flags, timeout;
+       unsigned long flags = 0;
+       unsigned long timeout;
        int ret;
 
        timeout = jiffies + usecs_to_jiffies(admin_queue->completion_timeout);
@@ -557,10 +584,160 @@ err:
        return ret;
 }
 
+/**
+ * Set the LLQ configurations of the firmware
+ *
+ * The driver provides only the enabled feature values to the device,
+ * which in turn, checks if they are supported.
+ */
+static int ena_com_set_llq(struct ena_com_dev *ena_dev)
+{
+       struct ena_com_admin_queue *admin_queue;
+       struct ena_admin_set_feat_cmd cmd;
+       struct ena_admin_set_feat_resp resp;
+       struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
+       int ret;
+
+       memset(&cmd, 0x0, sizeof(cmd));
+       admin_queue = &ena_dev->admin_queue;
+
+       cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
+       cmd.feat_common.feature_id = ENA_ADMIN_LLQ;
+
+       cmd.u.llq.header_location_ctrl_enabled = llq_info->header_location_ctrl;
+       cmd.u.llq.entry_size_ctrl_enabled = llq_info->desc_list_entry_size_ctrl;
+       cmd.u.llq.desc_num_before_header_enabled = llq_info->descs_num_before_header;
+       cmd.u.llq.descriptors_stride_ctrl_enabled = llq_info->desc_stride_ctrl;
+
+       ret = ena_com_execute_admin_command(admin_queue,
+                                           (struct ena_admin_aq_entry *)&cmd,
+                                           sizeof(cmd),
+                                           (struct ena_admin_acq_entry *)&resp,
+                                           sizeof(resp));
+
+       if (unlikely(ret))
+               pr_err("Failed to set LLQ configurations: %d\n", ret);
+
+       return ret;
+}
+
+static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
+                                  struct ena_admin_feature_llq_desc *llq_features,
+                                  struct ena_llq_configurations *llq_default_cfg)
+{
+       struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
+       u16 supported_feat;
+       int rc;
+
+       memset(llq_info, 0, sizeof(*llq_info));
+
+       supported_feat = llq_features->header_location_ctrl_supported;
+
+       if (likely(supported_feat & llq_default_cfg->llq_header_location)) {
+               llq_info->header_location_ctrl =
+                       llq_default_cfg->llq_header_location;
+       } else {
+               pr_err("Invalid header location control, supported: 0x%x\n",
+                      supported_feat);
+               return -EINVAL;
+       }
+
+       if (likely(llq_info->header_location_ctrl == ENA_ADMIN_INLINE_HEADER)) {
+               supported_feat = llq_features->descriptors_stride_ctrl_supported;
+               if (likely(supported_feat & llq_default_cfg->llq_stride_ctrl)) {
+                       llq_info->desc_stride_ctrl = llq_default_cfg->llq_stride_ctrl;
+               } else  {
+                       if (supported_feat & ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY) {
+                               llq_info->desc_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
+                       } else if (supported_feat & ENA_ADMIN_SINGLE_DESC_PER_ENTRY) {
+                               llq_info->desc_stride_ctrl = ENA_ADMIN_SINGLE_DESC_PER_ENTRY;
+                       } else {
+                               pr_err("Invalid desc_stride_ctrl, supported: 0x%x\n",
+                                      supported_feat);
+                               return -EINVAL;
+                       }
+
+                       pr_err("Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
+                              llq_default_cfg->llq_stride_ctrl, supported_feat,
+                              llq_info->desc_stride_ctrl);
+               }
+       } else {
+               llq_info->desc_stride_ctrl = 0;
+       }
+
+       supported_feat = llq_features->entry_size_ctrl_supported;
+       if (likely(supported_feat & llq_default_cfg->llq_ring_entry_size)) {
+               llq_info->desc_list_entry_size_ctrl = llq_default_cfg->llq_ring_entry_size;
+               llq_info->desc_list_entry_size = llq_default_cfg->llq_ring_entry_size_value;
+       } else {
+               if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_128B) {
+                       llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
+                       llq_info->desc_list_entry_size = 128;
+               } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_192B) {
+                       llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_192B;
+                       llq_info->desc_list_entry_size = 192;
+               } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_256B) {
+                       llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_256B;
+                       llq_info->desc_list_entry_size = 256;
+               } else {
+                       pr_err("Invalid entry_size_ctrl, supported: 0x%x\n",
+                              supported_feat);
+                       return -EINVAL;
+               }
+
+               pr_err("Default llq ring entry size is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
+                      llq_default_cfg->llq_ring_entry_size, supported_feat,
+                      llq_info->desc_list_entry_size);
+       }
+       if (unlikely(llq_info->desc_list_entry_size & 0x7)) {
+               /* The desc list entry size should be whole multiply of 8
+                * This requirement comes from __iowrite64_copy()
+                */
+               pr_err("illegal entry size %d\n",
+                      llq_info->desc_list_entry_size);
+               return -EINVAL;
+       }
+
+       if (llq_info->desc_stride_ctrl == ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY)
+               llq_info->descs_per_entry = llq_info->desc_list_entry_size /
+                       sizeof(struct ena_eth_io_tx_desc);
+       else
+               llq_info->descs_per_entry = 1;
+
+       supported_feat = llq_features->desc_num_before_header_supported;
+       if (likely(supported_feat & llq_default_cfg->llq_num_decs_before_header)) {
+               llq_info->descs_num_before_header = llq_default_cfg->llq_num_decs_before_header;
+       } else {
+               if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2) {
+                       llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
+               } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1) {
+                       llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1;
+               } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4) {
+                       llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4;
+               } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8) {
+                       llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8;
+               } else {
+                       pr_err("Invalid descs_num_before_header, supported: 0x%x\n",
+                              supported_feat);
+                       return -EINVAL;
+               }
+
+               pr_err("Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
+                      llq_default_cfg->llq_num_decs_before_header,
+                      supported_feat, llq_info->descs_num_before_header);
+       }
+
+       rc = ena_com_set_llq(ena_dev);
+       if (rc)
+               pr_err("Cannot set LLQ configuration: %d\n", rc);
+
+       return 0;
+}
+
 static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx,
                                                        struct ena_com_admin_queue *admin_queue)
 {
-       unsigned long flags;
+       unsigned long flags = 0;
        int ret;
 
        wait_for_completion_timeout(&comp_ctx->wait_event,
@@ -606,7 +783,7 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
        volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp =
                mmio_read->read_resp;
        u32 mmio_read_reg, ret, i;
-       unsigned long flags;
+       unsigned long flags = 0;
        u32 timeout = mmio_read->reg_read_to;
 
        might_sleep();
@@ -728,15 +905,17 @@ static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
        if (io_sq->desc_addr.virt_addr) {
                size = io_sq->desc_entry_size * io_sq->q_depth;
 
-               if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
-                       dma_free_coherent(ena_dev->dmadev, size,
-                                         io_sq->desc_addr.virt_addr,
-                                         io_sq->desc_addr.phys_addr);
-               else
-                       devm_kfree(ena_dev->dmadev, io_sq->desc_addr.virt_addr);
+               dma_free_coherent(ena_dev->dmadev, size,
+                                 io_sq->desc_addr.virt_addr,
+                                 io_sq->desc_addr.phys_addr);
 
                io_sq->desc_addr.virt_addr = NULL;
        }
+
+       if (io_sq->bounce_buf_ctrl.base_buffer) {
+               devm_kfree(ena_dev->dmadev, io_sq->bounce_buf_ctrl.base_buffer);
+               io_sq->bounce_buf_ctrl.base_buffer = NULL;
+       }
 }
 
 static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
@@ -1248,7 +1427,7 @@ void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev)
 void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev)
 {
        struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
-       unsigned long flags;
+       unsigned long flags = 0;
 
        spin_lock_irqsave(&admin_queue->q_lock, flags);
        while (atomic_read(&admin_queue->outstanding_cmds) != 0) {
@@ -1292,7 +1471,7 @@ bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev)
 void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state)
 {
        struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
-       unsigned long flags;
+       unsigned long flags = 0;
 
        spin_lock_irqsave(&admin_queue->q_lock, flags);
        ena_dev->admin_queue.running_state = state;
@@ -1326,7 +1505,7 @@ int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
        }
 
        if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) {
-               pr_warn("Trying to set unsupported aenq events. supported flag: %x asked flag: %x\n",
+               pr_warn("Trying to set unsupported aenq events. supported flag: 0x%x asked flag: 0x%x\n",
                        get_resp.u.aenq.supported_groups, groups_flag);
                return -EOPNOTSUPP;
        }
@@ -1400,11 +1579,6 @@ int ena_com_validate_version(struct ena_com_dev *ena_dev)
                        ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
                ver & ENA_REGS_VERSION_MINOR_VERSION_MASK);
 
-       if (ver < MIN_ENA_VER) {
-               pr_err("ENA version is lower than the minimal version the driver supports\n");
-               return -1;
-       }
-
        pr_info("ena controller version: %d.%d.%d implementation version %d\n",
                (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) >>
                        ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
@@ -1479,7 +1653,7 @@ int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
                                    sizeof(*mmio_read->read_resp),
                                    &mmio_read->read_resp_dma_addr, GFP_KERNEL);
        if (unlikely(!mmio_read->read_resp))
-               return -ENOMEM;
+               goto err;
 
        ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
 
@@ -1488,6 +1662,10 @@ int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
        mmio_read->readless_supported = true;
 
        return 0;
+
+err:
+
+       return -ENOMEM;
 }
 
 void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported)
@@ -1748,6 +1926,15 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
        else
                return rc;
 
+       rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_LLQ);
+       if (!rc)
+               memcpy(&get_feat_ctx->llq, &get_resp.u.llq,
+                      sizeof(get_resp.u.llq));
+       else if (rc == -EOPNOTSUPP)
+               memset(&get_feat_ctx->llq, 0x0, sizeof(get_feat_ctx->llq));
+       else
+               return rc;
+
        return 0;
 }
 
@@ -1779,6 +1966,7 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
        struct ena_admin_aenq_entry *aenq_e;
        struct ena_admin_aenq_common_desc *aenq_common;
        struct ena_com_aenq *aenq  = &dev->aenq;
+       unsigned long long timestamp;
        ena_aenq_handler handler_cb;
        u16 masked_head, processed = 0;
        u8 phase;
@@ -1796,10 +1984,11 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
                 */
                dma_rmb();
 
+               timestamp =
+                       (unsigned long long)aenq_common->timestamp_low |
+                       ((unsigned long long)aenq_common->timestamp_high << 32);
                pr_debug("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n",
-                        aenq_common->group, aenq_common->syndrom,
-                        (u64)aenq_common->timestamp_low +
-                                ((u64)aenq_common->timestamp_high << 32));
+                        aenq_common->group, aenq_common->syndrom, timestamp);
 
                /* Handle specific event*/
                handler_cb = ena_com_get_specific_aenq_cb(dev,
@@ -2441,6 +2630,10 @@ int ena_com_allocate_host_info(struct ena_com_dev *ena_dev)
        if (unlikely(!host_attr->host_info))
                return -ENOMEM;
 
+       host_attr->host_info->ena_spec_version = ((ENA_COMMON_SPEC_VERSION_MAJOR <<
+               ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) |
+               (ENA_COMMON_SPEC_VERSION_MINOR));
+
        return 0;
 }
 
@@ -2712,3 +2905,34 @@ void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev,
        intr_moder_tbl[level].pkts_per_interval;
        entry->bytes_per_interval = intr_moder_tbl[level].bytes_per_interval;
 }
+
+int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
+                           struct ena_admin_feature_llq_desc *llq_features,
+                           struct ena_llq_configurations *llq_default_cfg)
+{
+       int rc;
+       int size;
+
+       if (!llq_features->max_llq_num) {
+               ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
+               return 0;
+       }
+
+       rc = ena_com_config_llq_info(ena_dev, llq_features, llq_default_cfg);
+       if (rc)
+               return rc;
+
+       /* Validate the descriptor is not too big */
+       size = ena_dev->tx_max_header_size;
+       size += ena_dev->llq_info.descs_num_before_header *
+               sizeof(struct ena_eth_io_tx_desc);
+
+       if (unlikely(ena_dev->llq_info.desc_list_entry_size < size)) {
+               pr_err("the size of the LLQ entry is smaller than needed\n");
+               return -EINVAL;
+       }
+
+       ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV;
+
+       return 0;
+}