net: ena: complete host info to match latest ENA spec
[sfrench/cifs-2.6.git] / drivers / net / ethernet / amazon / ena / ena_com.c
1 /*
2  * Copyright 2015 Amazon.com, Inc. or its affiliates.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include "ena_com.h"
34
35 /*****************************************************************************/
36 /*****************************************************************************/
37
38 /* Timeout in micro-sec */
39 #define ADMIN_CMD_TIMEOUT_US (3000000)
40
41 #define ENA_ASYNC_QUEUE_DEPTH 16
42 #define ENA_ADMIN_QUEUE_DEPTH 32
43
44
45 #define ENA_CTRL_MAJOR          0
46 #define ENA_CTRL_MINOR          0
47 #define ENA_CTRL_SUB_MINOR      1
48
49 #define MIN_ENA_CTRL_VER \
50         (((ENA_CTRL_MAJOR) << \
51         (ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT)) | \
52         ((ENA_CTRL_MINOR) << \
53         (ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT)) | \
54         (ENA_CTRL_SUB_MINOR))
55
56 #define ENA_DMA_ADDR_TO_UINT32_LOW(x)   ((u32)((u64)(x)))
57 #define ENA_DMA_ADDR_TO_UINT32_HIGH(x)  ((u32)(((u64)(x)) >> 32))
58
59 #define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF
60
61 #define ENA_REGS_ADMIN_INTR_MASK 1
62
63 #define ENA_POLL_MS     5
64
65 /*****************************************************************************/
66 /*****************************************************************************/
67 /*****************************************************************************/
68
69 enum ena_cmd_status {
70         ENA_CMD_SUBMITTED,
71         ENA_CMD_COMPLETED,
72         /* Abort - canceled by the driver */
73         ENA_CMD_ABORTED,
74 };
75
76 struct ena_comp_ctx {
77         struct completion wait_event;
78         struct ena_admin_acq_entry *user_cqe;
79         u32 comp_size;
80         enum ena_cmd_status status;
81         /* status from the device */
82         u8 comp_status;
83         u8 cmd_opcode;
84         bool occupied;
85 };
86
87 struct ena_com_stats_ctx {
88         struct ena_admin_aq_get_stats_cmd get_cmd;
89         struct ena_admin_acq_get_stats_resp get_resp;
90 };
91
92 static inline int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
93                                        struct ena_common_mem_addr *ena_addr,
94                                        dma_addr_t addr)
95 {
96         if ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr) {
97                 pr_err("dma address has more bits that the device supports\n");
98                 return -EINVAL;
99         }
100
101         ena_addr->mem_addr_low = lower_32_bits(addr);
102         ena_addr->mem_addr_high = (u16)upper_32_bits(addr);
103
104         return 0;
105 }
106
107 static int ena_com_admin_init_sq(struct ena_com_admin_queue *queue)
108 {
109         struct ena_com_admin_sq *sq = &queue->sq;
110         u16 size = ADMIN_SQ_SIZE(queue->q_depth);
111
112         sq->entries = dma_zalloc_coherent(queue->q_dmadev, size, &sq->dma_addr,
113                                           GFP_KERNEL);
114
115         if (!sq->entries) {
116                 pr_err("memory allocation failed");
117                 return -ENOMEM;
118         }
119
120         sq->head = 0;
121         sq->tail = 0;
122         sq->phase = 1;
123
124         sq->db_addr = NULL;
125
126         return 0;
127 }
128
129 static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue)
130 {
131         struct ena_com_admin_cq *cq = &queue->cq;
132         u16 size = ADMIN_CQ_SIZE(queue->q_depth);
133
134         cq->entries = dma_zalloc_coherent(queue->q_dmadev, size, &cq->dma_addr,
135                                           GFP_KERNEL);
136
137         if (!cq->entries) {
138                 pr_err("memory allocation failed");
139                 return -ENOMEM;
140         }
141
142         cq->head = 0;
143         cq->phase = 1;
144
145         return 0;
146 }
147
148 static int ena_com_admin_init_aenq(struct ena_com_dev *dev,
149                                    struct ena_aenq_handlers *aenq_handlers)
150 {
151         struct ena_com_aenq *aenq = &dev->aenq;
152         u32 addr_low, addr_high, aenq_caps;
153         u16 size;
154
155         dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
156         size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH);
157         aenq->entries = dma_zalloc_coherent(dev->dmadev, size, &aenq->dma_addr,
158                                             GFP_KERNEL);
159
160         if (!aenq->entries) {
161                 pr_err("memory allocation failed");
162                 return -ENOMEM;
163         }
164
165         aenq->head = aenq->q_depth;
166         aenq->phase = 1;
167
168         addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(aenq->dma_addr);
169         addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(aenq->dma_addr);
170
171         writel(addr_low, dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF);
172         writel(addr_high, dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF);
173
174         aenq_caps = 0;
175         aenq_caps |= dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
176         aenq_caps |= (sizeof(struct ena_admin_aenq_entry)
177                       << ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
178                      ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
179         writel(aenq_caps, dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF);
180
181         if (unlikely(!aenq_handlers)) {
182                 pr_err("aenq handlers pointer is NULL\n");
183                 return -EINVAL;
184         }
185
186         aenq->aenq_handlers = aenq_handlers;
187
188         return 0;
189 }
190
191 static inline void comp_ctxt_release(struct ena_com_admin_queue *queue,
192                                      struct ena_comp_ctx *comp_ctx)
193 {
194         comp_ctx->occupied = false;
195         atomic_dec(&queue->outstanding_cmds);
196 }
197
198 static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue,
199                                           u16 command_id, bool capture)
200 {
201         if (unlikely(command_id >= queue->q_depth)) {
202                 pr_err("command id is larger than the queue size. cmd_id: %u queue size %d\n",
203                        command_id, queue->q_depth);
204                 return NULL;
205         }
206
207         if (unlikely(queue->comp_ctx[command_id].occupied && capture)) {
208                 pr_err("Completion context is occupied\n");
209                 return NULL;
210         }
211
212         if (capture) {
213                 atomic_inc(&queue->outstanding_cmds);
214                 queue->comp_ctx[command_id].occupied = true;
215         }
216
217         return &queue->comp_ctx[command_id];
218 }
219
220 static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
221                                                        struct ena_admin_aq_entry *cmd,
222                                                        size_t cmd_size_in_bytes,
223                                                        struct ena_admin_acq_entry *comp,
224                                                        size_t comp_size_in_bytes)
225 {
226         struct ena_comp_ctx *comp_ctx;
227         u16 tail_masked, cmd_id;
228         u16 queue_size_mask;
229         u16 cnt;
230
231         queue_size_mask = admin_queue->q_depth - 1;
232
233         tail_masked = admin_queue->sq.tail & queue_size_mask;
234
235         /* In case of queue FULL */
236         cnt = atomic_read(&admin_queue->outstanding_cmds);
237         if (cnt >= admin_queue->q_depth) {
238                 pr_debug("admin queue is full.\n");
239                 admin_queue->stats.out_of_space++;
240                 return ERR_PTR(-ENOSPC);
241         }
242
243         cmd_id = admin_queue->curr_cmd_id;
244
245         cmd->aq_common_descriptor.flags |= admin_queue->sq.phase &
246                 ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK;
247
248         cmd->aq_common_descriptor.command_id |= cmd_id &
249                 ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
250
251         comp_ctx = get_comp_ctxt(admin_queue, cmd_id, true);
252         if (unlikely(!comp_ctx))
253                 return ERR_PTR(-EINVAL);
254
255         comp_ctx->status = ENA_CMD_SUBMITTED;
256         comp_ctx->comp_size = (u32)comp_size_in_bytes;
257         comp_ctx->user_cqe = comp;
258         comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode;
259
260         reinit_completion(&comp_ctx->wait_event);
261
262         memcpy(&admin_queue->sq.entries[tail_masked], cmd, cmd_size_in_bytes);
263
264         admin_queue->curr_cmd_id = (admin_queue->curr_cmd_id + 1) &
265                 queue_size_mask;
266
267         admin_queue->sq.tail++;
268         admin_queue->stats.submitted_cmd++;
269
270         if (unlikely((admin_queue->sq.tail & queue_size_mask) == 0))
271                 admin_queue->sq.phase = !admin_queue->sq.phase;
272
273         writel(admin_queue->sq.tail, admin_queue->sq.db_addr);
274
275         return comp_ctx;
276 }
277
278 static inline int ena_com_init_comp_ctxt(struct ena_com_admin_queue *queue)
279 {
280         size_t size = queue->q_depth * sizeof(struct ena_comp_ctx);
281         struct ena_comp_ctx *comp_ctx;
282         u16 i;
283
284         queue->comp_ctx = devm_kzalloc(queue->q_dmadev, size, GFP_KERNEL);
285         if (unlikely(!queue->comp_ctx)) {
286                 pr_err("memory allocation failed");
287                 return -ENOMEM;
288         }
289
290         for (i = 0; i < queue->q_depth; i++) {
291                 comp_ctx = get_comp_ctxt(queue, i, false);
292                 if (comp_ctx)
293                         init_completion(&comp_ctx->wait_event);
294         }
295
296         return 0;
297 }
298
299 static struct ena_comp_ctx *ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
300                                                      struct ena_admin_aq_entry *cmd,
301                                                      size_t cmd_size_in_bytes,
302                                                      struct ena_admin_acq_entry *comp,
303                                                      size_t comp_size_in_bytes)
304 {
305         unsigned long flags;
306         struct ena_comp_ctx *comp_ctx;
307
308         spin_lock_irqsave(&admin_queue->q_lock, flags);
309         if (unlikely(!admin_queue->running_state)) {
310                 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
311                 return ERR_PTR(-ENODEV);
312         }
313         comp_ctx = __ena_com_submit_admin_cmd(admin_queue, cmd,
314                                               cmd_size_in_bytes,
315                                               comp,
316                                               comp_size_in_bytes);
317         if (IS_ERR(comp_ctx))
318                 admin_queue->running_state = false;
319         spin_unlock_irqrestore(&admin_queue->q_lock, flags);
320
321         return comp_ctx;
322 }
323
324 static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
325                               struct ena_com_create_io_ctx *ctx,
326                               struct ena_com_io_sq *io_sq)
327 {
328         size_t size;
329         int dev_node = 0;
330
331         memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr));
332
333         io_sq->dma_addr_bits = ena_dev->dma_addr_bits;
334         io_sq->desc_entry_size =
335                 (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
336                 sizeof(struct ena_eth_io_tx_desc) :
337                 sizeof(struct ena_eth_io_rx_desc);
338
339         size = io_sq->desc_entry_size * io_sq->q_depth;
340
341         if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
342                 dev_node = dev_to_node(ena_dev->dmadev);
343                 set_dev_node(ena_dev->dmadev, ctx->numa_node);
344                 io_sq->desc_addr.virt_addr =
345                         dma_zalloc_coherent(ena_dev->dmadev, size,
346                                             &io_sq->desc_addr.phys_addr,
347                                             GFP_KERNEL);
348                 set_dev_node(ena_dev->dmadev, dev_node);
349                 if (!io_sq->desc_addr.virt_addr) {
350                         io_sq->desc_addr.virt_addr =
351                                 dma_zalloc_coherent(ena_dev->dmadev, size,
352                                                     &io_sq->desc_addr.phys_addr,
353                                                     GFP_KERNEL);
354                 }
355         } else {
356                 dev_node = dev_to_node(ena_dev->dmadev);
357                 set_dev_node(ena_dev->dmadev, ctx->numa_node);
358                 io_sq->desc_addr.virt_addr =
359                         devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
360                 set_dev_node(ena_dev->dmadev, dev_node);
361                 if (!io_sq->desc_addr.virt_addr) {
362                         io_sq->desc_addr.virt_addr =
363                                 devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
364                 }
365         }
366
367         if (!io_sq->desc_addr.virt_addr) {
368                 pr_err("memory allocation failed");
369                 return -ENOMEM;
370         }
371
372         io_sq->tail = 0;
373         io_sq->next_to_comp = 0;
374         io_sq->phase = 1;
375
376         return 0;
377 }
378
379 static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
380                               struct ena_com_create_io_ctx *ctx,
381                               struct ena_com_io_cq *io_cq)
382 {
383         size_t size;
384         int prev_node = 0;
385
386         memset(&io_cq->cdesc_addr, 0x0, sizeof(io_cq->cdesc_addr));
387
388         /* Use the basic completion descriptor for Rx */
389         io_cq->cdesc_entry_size_in_bytes =
390                 (io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
391                 sizeof(struct ena_eth_io_tx_cdesc) :
392                 sizeof(struct ena_eth_io_rx_cdesc_base);
393
394         size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
395
396         prev_node = dev_to_node(ena_dev->dmadev);
397         set_dev_node(ena_dev->dmadev, ctx->numa_node);
398         io_cq->cdesc_addr.virt_addr =
399                 dma_zalloc_coherent(ena_dev->dmadev, size,
400                                     &io_cq->cdesc_addr.phys_addr, GFP_KERNEL);
401         set_dev_node(ena_dev->dmadev, prev_node);
402         if (!io_cq->cdesc_addr.virt_addr) {
403                 io_cq->cdesc_addr.virt_addr =
404                         dma_zalloc_coherent(ena_dev->dmadev, size,
405                                             &io_cq->cdesc_addr.phys_addr,
406                                             GFP_KERNEL);
407         }
408
409         if (!io_cq->cdesc_addr.virt_addr) {
410                 pr_err("memory allocation failed");
411                 return -ENOMEM;
412         }
413
414         io_cq->phase = 1;
415         io_cq->head = 0;
416
417         return 0;
418 }
419
420 static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue *admin_queue,
421                                                    struct ena_admin_acq_entry *cqe)
422 {
423         struct ena_comp_ctx *comp_ctx;
424         u16 cmd_id;
425
426         cmd_id = cqe->acq_common_descriptor.command &
427                 ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;
428
429         comp_ctx = get_comp_ctxt(admin_queue, cmd_id, false);
430         if (unlikely(!comp_ctx)) {
431                 pr_err("comp_ctx is NULL. Changing the admin queue running state\n");
432                 admin_queue->running_state = false;
433                 return;
434         }
435
436         comp_ctx->status = ENA_CMD_COMPLETED;
437         comp_ctx->comp_status = cqe->acq_common_descriptor.status;
438
439         if (comp_ctx->user_cqe)
440                 memcpy(comp_ctx->user_cqe, (void *)cqe, comp_ctx->comp_size);
441
442         if (!admin_queue->polling)
443                 complete(&comp_ctx->wait_event);
444 }
445
446 static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_queue)
447 {
448         struct ena_admin_acq_entry *cqe = NULL;
449         u16 comp_num = 0;
450         u16 head_masked;
451         u8 phase;
452
453         head_masked = admin_queue->cq.head & (admin_queue->q_depth - 1);
454         phase = admin_queue->cq.phase;
455
456         cqe = &admin_queue->cq.entries[head_masked];
457
458         /* Go over all the completions */
459         while ((READ_ONCE(cqe->acq_common_descriptor.flags) &
460                         ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
461                 /* Do not read the rest of the completion entry before the
462                  * phase bit was validated
463                  */
464                 dma_rmb();
465                 ena_com_handle_single_admin_completion(admin_queue, cqe);
466
467                 head_masked++;
468                 comp_num++;
469                 if (unlikely(head_masked == admin_queue->q_depth)) {
470                         head_masked = 0;
471                         phase = !phase;
472                 }
473
474                 cqe = &admin_queue->cq.entries[head_masked];
475         }
476
477         admin_queue->cq.head += comp_num;
478         admin_queue->cq.phase = phase;
479         admin_queue->sq.head += comp_num;
480         admin_queue->stats.completed_cmd += comp_num;
481 }
482
483 static int ena_com_comp_status_to_errno(u8 comp_status)
484 {
485         if (unlikely(comp_status != 0))
486                 pr_err("admin command failed[%u]\n", comp_status);
487
488         if (unlikely(comp_status > ENA_ADMIN_UNKNOWN_ERROR))
489                 return -EINVAL;
490
491         switch (comp_status) {
492         case ENA_ADMIN_SUCCESS:
493                 return 0;
494         case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE:
495                 return -ENOMEM;
496         case ENA_ADMIN_UNSUPPORTED_OPCODE:
497                 return -EOPNOTSUPP;
498         case ENA_ADMIN_BAD_OPCODE:
499         case ENA_ADMIN_MALFORMED_REQUEST:
500         case ENA_ADMIN_ILLEGAL_PARAMETER:
501         case ENA_ADMIN_UNKNOWN_ERROR:
502                 return -EINVAL;
503         }
504
505         return 0;
506 }
507
508 static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx,
509                                                      struct ena_com_admin_queue *admin_queue)
510 {
511         unsigned long flags, timeout;
512         int ret;
513
514         timeout = jiffies + usecs_to_jiffies(admin_queue->completion_timeout);
515
516         while (1) {
517                 spin_lock_irqsave(&admin_queue->q_lock, flags);
518                 ena_com_handle_admin_completion(admin_queue);
519                 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
520
521                 if (comp_ctx->status != ENA_CMD_SUBMITTED)
522                         break;
523
524                 if (time_is_before_jiffies(timeout)) {
525                         pr_err("Wait for completion (polling) timeout\n");
526                         /* ENA didn't have any completion */
527                         spin_lock_irqsave(&admin_queue->q_lock, flags);
528                         admin_queue->stats.no_completion++;
529                         admin_queue->running_state = false;
530                         spin_unlock_irqrestore(&admin_queue->q_lock, flags);
531
532                         ret = -ETIME;
533                         goto err;
534                 }
535
536                 msleep(ENA_POLL_MS);
537         }
538
539         if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) {
540                 pr_err("Command was aborted\n");
541                 spin_lock_irqsave(&admin_queue->q_lock, flags);
542                 admin_queue->stats.aborted_cmd++;
543                 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
544                 ret = -ENODEV;
545                 goto err;
546         }
547
548         WARN(comp_ctx->status != ENA_CMD_COMPLETED, "Invalid comp status %d\n",
549              comp_ctx->status);
550
551         ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
552 err:
553         comp_ctxt_release(admin_queue, comp_ctx);
554         return ret;
555 }
556
557 static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx,
558                                                         struct ena_com_admin_queue *admin_queue)
559 {
560         unsigned long flags;
561         int ret;
562
563         wait_for_completion_timeout(&comp_ctx->wait_event,
564                                     usecs_to_jiffies(
565                                             admin_queue->completion_timeout));
566
567         /* In case the command wasn't completed find out the root cause.
568          * There might be 2 kinds of errors
569          * 1) No completion (timeout reached)
570          * 2) There is completion but the device didn't get any msi-x interrupt.
571          */
572         if (unlikely(comp_ctx->status == ENA_CMD_SUBMITTED)) {
573                 spin_lock_irqsave(&admin_queue->q_lock, flags);
574                 ena_com_handle_admin_completion(admin_queue);
575                 admin_queue->stats.no_completion++;
576                 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
577
578                 if (comp_ctx->status == ENA_CMD_COMPLETED)
579                         pr_err("The ena device have completion but the driver didn't receive any MSI-X interrupt (cmd %d)\n",
580                                comp_ctx->cmd_opcode);
581                 else
582                         pr_err("The ena device doesn't send any completion for the admin cmd %d status %d\n",
583                                comp_ctx->cmd_opcode, comp_ctx->status);
584
585                 admin_queue->running_state = false;
586                 ret = -ETIME;
587                 goto err;
588         }
589
590         ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
591 err:
592         comp_ctxt_release(admin_queue, comp_ctx);
593         return ret;
594 }
595
596 /* This method read the hardware device register through posting writes
597  * and waiting for response
598  * On timeout the function will return ENA_MMIO_READ_TIMEOUT
599  */
600 static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
601 {
602         struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
603         volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp =
604                 mmio_read->read_resp;
605         u32 mmio_read_reg, ret, i;
606         unsigned long flags;
607         u32 timeout = mmio_read->reg_read_to;
608
609         might_sleep();
610
611         if (timeout == 0)
612                 timeout = ENA_REG_READ_TIMEOUT;
613
614         /* If readless is disabled, perform regular read */
615         if (!mmio_read->readless_supported)
616                 return readl(ena_dev->reg_bar + offset);
617
618         spin_lock_irqsave(&mmio_read->lock, flags);
619         mmio_read->seq_num++;
620
621         read_resp->req_id = mmio_read->seq_num + 0xDEAD;
622         mmio_read_reg = (offset << ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT) &
623                         ENA_REGS_MMIO_REG_READ_REG_OFF_MASK;
624         mmio_read_reg |= mmio_read->seq_num &
625                         ENA_REGS_MMIO_REG_READ_REQ_ID_MASK;
626
627         writel(mmio_read_reg, ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
628
629         for (i = 0; i < timeout; i++) {
630                 if (READ_ONCE(read_resp->req_id) == mmio_read->seq_num)
631                         break;
632
633                 udelay(1);
634         }
635
636         if (unlikely(i == timeout)) {
637                 pr_err("reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n",
638                        mmio_read->seq_num, offset, read_resp->req_id,
639                        read_resp->reg_off);
640                 ret = ENA_MMIO_READ_TIMEOUT;
641                 goto err;
642         }
643
644         if (read_resp->reg_off != offset) {
645                 pr_err("Read failure: wrong offset provided");
646                 ret = ENA_MMIO_READ_TIMEOUT;
647         } else {
648                 ret = read_resp->reg_val;
649         }
650 err:
651         spin_unlock_irqrestore(&mmio_read->lock, flags);
652
653         return ret;
654 }
655
656 /* There are two types to wait for completion.
657  * Polling mode - wait until the completion is available.
658  * Async mode - wait on wait queue until the completion is ready
659  * (or the timeout expired).
660  * It is expected that the IRQ called ena_com_handle_admin_completion
661  * to mark the completions.
662  */
663 static int ena_com_wait_and_process_admin_cq(struct ena_comp_ctx *comp_ctx,
664                                              struct ena_com_admin_queue *admin_queue)
665 {
666         if (admin_queue->polling)
667                 return ena_com_wait_and_process_admin_cq_polling(comp_ctx,
668                                                                  admin_queue);
669
670         return ena_com_wait_and_process_admin_cq_interrupts(comp_ctx,
671                                                             admin_queue);
672 }
673
674 static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev,
675                                  struct ena_com_io_sq *io_sq)
676 {
677         struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
678         struct ena_admin_aq_destroy_sq_cmd destroy_cmd;
679         struct ena_admin_acq_destroy_sq_resp_desc destroy_resp;
680         u8 direction;
681         int ret;
682
683         memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
684
685         if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
686                 direction = ENA_ADMIN_SQ_DIRECTION_TX;
687         else
688                 direction = ENA_ADMIN_SQ_DIRECTION_RX;
689
690         destroy_cmd.sq.sq_identity |= (direction <<
691                 ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT) &
692                 ENA_ADMIN_SQ_SQ_DIRECTION_MASK;
693
694         destroy_cmd.sq.sq_idx = io_sq->idx;
695         destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_SQ;
696
697         ret = ena_com_execute_admin_command(admin_queue,
698                                             (struct ena_admin_aq_entry *)&destroy_cmd,
699                                             sizeof(destroy_cmd),
700                                             (struct ena_admin_acq_entry *)&destroy_resp,
701                                             sizeof(destroy_resp));
702
703         if (unlikely(ret && (ret != -ENODEV)))
704                 pr_err("failed to destroy io sq error: %d\n", ret);
705
706         return ret;
707 }
708
709 static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
710                                   struct ena_com_io_sq *io_sq,
711                                   struct ena_com_io_cq *io_cq)
712 {
713         size_t size;
714
715         if (io_cq->cdesc_addr.virt_addr) {
716                 size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
717
718                 dma_free_coherent(ena_dev->dmadev, size,
719                                   io_cq->cdesc_addr.virt_addr,
720                                   io_cq->cdesc_addr.phys_addr);
721
722                 io_cq->cdesc_addr.virt_addr = NULL;
723         }
724
725         if (io_sq->desc_addr.virt_addr) {
726                 size = io_sq->desc_entry_size * io_sq->q_depth;
727
728                 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
729                         dma_free_coherent(ena_dev->dmadev, size,
730                                           io_sq->desc_addr.virt_addr,
731                                           io_sq->desc_addr.phys_addr);
732                 else
733                         devm_kfree(ena_dev->dmadev, io_sq->desc_addr.virt_addr);
734
735                 io_sq->desc_addr.virt_addr = NULL;
736         }
737 }
738
739 static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
740                                 u16 exp_state)
741 {
742         u32 val, i;
743
744         /* Convert timeout from resolution of 100ms to ENA_POLL_MS */
745         timeout = (timeout * 100) / ENA_POLL_MS;
746
747         for (i = 0; i < timeout; i++) {
748                 val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
749
750                 if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) {
751                         pr_err("Reg read timeout occurred\n");
752                         return -ETIME;
753                 }
754
755                 if ((val & ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK) ==
756                         exp_state)
757                         return 0;
758
759                 msleep(ENA_POLL_MS);
760         }
761
762         return -ETIME;
763 }
764
765 static bool ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev,
766                                                enum ena_admin_aq_feature_id feature_id)
767 {
768         u32 feature_mask = 1 << feature_id;
769
770         /* Device attributes is always supported */
771         if ((feature_id != ENA_ADMIN_DEVICE_ATTRIBUTES) &&
772             !(ena_dev->supported_features & feature_mask))
773                 return false;
774
775         return true;
776 }
777
778 static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
779                                   struct ena_admin_get_feat_resp *get_resp,
780                                   enum ena_admin_aq_feature_id feature_id,
781                                   dma_addr_t control_buf_dma_addr,
782                                   u32 control_buff_size)
783 {
784         struct ena_com_admin_queue *admin_queue;
785         struct ena_admin_get_feat_cmd get_cmd;
786         int ret;
787
788         if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) {
789                 pr_debug("Feature %d isn't supported\n", feature_id);
790                 return -EOPNOTSUPP;
791         }
792
793         memset(&get_cmd, 0x0, sizeof(get_cmd));
794         admin_queue = &ena_dev->admin_queue;
795
796         get_cmd.aq_common_descriptor.opcode = ENA_ADMIN_GET_FEATURE;
797
798         if (control_buff_size)
799                 get_cmd.aq_common_descriptor.flags =
800                         ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
801         else
802                 get_cmd.aq_common_descriptor.flags = 0;
803
804         ret = ena_com_mem_addr_set(ena_dev,
805                                    &get_cmd.control_buffer.address,
806                                    control_buf_dma_addr);
807         if (unlikely(ret)) {
808                 pr_err("memory address set failed\n");
809                 return ret;
810         }
811
812         get_cmd.control_buffer.length = control_buff_size;
813
814         get_cmd.feat_common.feature_id = feature_id;
815
816         ret = ena_com_execute_admin_command(admin_queue,
817                                             (struct ena_admin_aq_entry *)
818                                             &get_cmd,
819                                             sizeof(get_cmd),
820                                             (struct ena_admin_acq_entry *)
821                                             get_resp,
822                                             sizeof(*get_resp));
823
824         if (unlikely(ret))
825                 pr_err("Failed to submit get_feature command %d error: %d\n",
826                        feature_id, ret);
827
828         return ret;
829 }
830
831 static int ena_com_get_feature(struct ena_com_dev *ena_dev,
832                                struct ena_admin_get_feat_resp *get_resp,
833                                enum ena_admin_aq_feature_id feature_id)
834 {
835         return ena_com_get_feature_ex(ena_dev,
836                                       get_resp,
837                                       feature_id,
838                                       0,
839                                       0);
840 }
841
842 static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
843 {
844         struct ena_rss *rss = &ena_dev->rss;
845
846         rss->hash_key =
847                 dma_zalloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
848                                     &rss->hash_key_dma_addr, GFP_KERNEL);
849
850         if (unlikely(!rss->hash_key))
851                 return -ENOMEM;
852
853         return 0;
854 }
855
856 static void ena_com_hash_key_destroy(struct ena_com_dev *ena_dev)
857 {
858         struct ena_rss *rss = &ena_dev->rss;
859
860         if (rss->hash_key)
861                 dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
862                                   rss->hash_key, rss->hash_key_dma_addr);
863         rss->hash_key = NULL;
864 }
865
866 static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev)
867 {
868         struct ena_rss *rss = &ena_dev->rss;
869
870         rss->hash_ctrl =
871                 dma_zalloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
872                                     &rss->hash_ctrl_dma_addr, GFP_KERNEL);
873
874         if (unlikely(!rss->hash_ctrl))
875                 return -ENOMEM;
876
877         return 0;
878 }
879
880 static void ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev)
881 {
882         struct ena_rss *rss = &ena_dev->rss;
883
884         if (rss->hash_ctrl)
885                 dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
886                                   rss->hash_ctrl, rss->hash_ctrl_dma_addr);
887         rss->hash_ctrl = NULL;
888 }
889
890 static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
891                                            u16 log_size)
892 {
893         struct ena_rss *rss = &ena_dev->rss;
894         struct ena_admin_get_feat_resp get_resp;
895         size_t tbl_size;
896         int ret;
897
898         ret = ena_com_get_feature(ena_dev, &get_resp,
899                                   ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
900         if (unlikely(ret))
901                 return ret;
902
903         if ((get_resp.u.ind_table.min_size > log_size) ||
904             (get_resp.u.ind_table.max_size < log_size)) {
905                 pr_err("indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n",
906                        1 << log_size, 1 << get_resp.u.ind_table.min_size,
907                        1 << get_resp.u.ind_table.max_size);
908                 return -EINVAL;
909         }
910
911         tbl_size = (1ULL << log_size) *
912                 sizeof(struct ena_admin_rss_ind_table_entry);
913
914         rss->rss_ind_tbl =
915                 dma_zalloc_coherent(ena_dev->dmadev, tbl_size,
916                                     &rss->rss_ind_tbl_dma_addr, GFP_KERNEL);
917         if (unlikely(!rss->rss_ind_tbl))
918                 goto mem_err1;
919
920         tbl_size = (1ULL << log_size) * sizeof(u16);
921         rss->host_rss_ind_tbl =
922                 devm_kzalloc(ena_dev->dmadev, tbl_size, GFP_KERNEL);
923         if (unlikely(!rss->host_rss_ind_tbl))
924                 goto mem_err2;
925
926         rss->tbl_log_size = log_size;
927
928         return 0;
929
930 mem_err2:
931         tbl_size = (1ULL << log_size) *
932                 sizeof(struct ena_admin_rss_ind_table_entry);
933
934         dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl,
935                           rss->rss_ind_tbl_dma_addr);
936         rss->rss_ind_tbl = NULL;
937 mem_err1:
938         rss->tbl_log_size = 0;
939         return -ENOMEM;
940 }
941
942 static void ena_com_indirect_table_destroy(struct ena_com_dev *ena_dev)
943 {
944         struct ena_rss *rss = &ena_dev->rss;
945         size_t tbl_size = (1ULL << rss->tbl_log_size) *
946                 sizeof(struct ena_admin_rss_ind_table_entry);
947
948         if (rss->rss_ind_tbl)
949                 dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl,
950                                   rss->rss_ind_tbl_dma_addr);
951         rss->rss_ind_tbl = NULL;
952
953         if (rss->host_rss_ind_tbl)
954                 devm_kfree(ena_dev->dmadev, rss->host_rss_ind_tbl);
955         rss->host_rss_ind_tbl = NULL;
956 }
957
958 static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
959                                 struct ena_com_io_sq *io_sq, u16 cq_idx)
960 {
961         struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
962         struct ena_admin_aq_create_sq_cmd create_cmd;
963         struct ena_admin_acq_create_sq_resp_desc cmd_completion;
964         u8 direction;
965         int ret;
966
967         memset(&create_cmd, 0x0, sizeof(create_cmd));
968
969         create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_SQ;
970
971         if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
972                 direction = ENA_ADMIN_SQ_DIRECTION_TX;
973         else
974                 direction = ENA_ADMIN_SQ_DIRECTION_RX;
975
976         create_cmd.sq_identity |= (direction <<
977                 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT) &
978                 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK;
979
980         create_cmd.sq_caps_2 |= io_sq->mem_queue_type &
981                 ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK;
982
983         create_cmd.sq_caps_2 |= (ENA_ADMIN_COMPLETION_POLICY_DESC <<
984                 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT) &
985                 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK;
986
987         create_cmd.sq_caps_3 |=
988                 ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK;
989
990         create_cmd.cq_idx = cq_idx;
991         create_cmd.sq_depth = io_sq->q_depth;
992
993         if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
994                 ret = ena_com_mem_addr_set(ena_dev,
995                                            &create_cmd.sq_ba,
996                                            io_sq->desc_addr.phys_addr);
997                 if (unlikely(ret)) {
998                         pr_err("memory address set failed\n");
999                         return ret;
1000                 }
1001         }
1002
1003         ret = ena_com_execute_admin_command(admin_queue,
1004                                             (struct ena_admin_aq_entry *)&create_cmd,
1005                                             sizeof(create_cmd),
1006                                             (struct ena_admin_acq_entry *)&cmd_completion,
1007                                             sizeof(cmd_completion));
1008         if (unlikely(ret)) {
1009                 pr_err("Failed to create IO SQ. error: %d\n", ret);
1010                 return ret;
1011         }
1012
1013         io_sq->idx = cmd_completion.sq_idx;
1014
1015         io_sq->db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1016                 (uintptr_t)cmd_completion.sq_doorbell_offset);
1017
1018         if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
1019                 io_sq->header_addr = (u8 __iomem *)((uintptr_t)ena_dev->mem_bar
1020                                 + cmd_completion.llq_headers_offset);
1021
1022                 io_sq->desc_addr.pbuf_dev_addr =
1023                         (u8 __iomem *)((uintptr_t)ena_dev->mem_bar +
1024                         cmd_completion.llq_descriptors_offset);
1025         }
1026
1027         pr_debug("created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth);
1028
1029         return ret;
1030 }
1031
1032 static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev)
1033 {
1034         struct ena_rss *rss = &ena_dev->rss;
1035         struct ena_com_io_sq *io_sq;
1036         u16 qid;
1037         int i;
1038
1039         for (i = 0; i < 1 << rss->tbl_log_size; i++) {
1040                 qid = rss->host_rss_ind_tbl[i];
1041                 if (qid >= ENA_TOTAL_NUM_QUEUES)
1042                         return -EINVAL;
1043
1044                 io_sq = &ena_dev->io_sq_queues[qid];
1045
1046                 if (io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX)
1047                         return -EINVAL;
1048
1049                 rss->rss_ind_tbl[i].cq_idx = io_sq->idx;
1050         }
1051
1052         return 0;
1053 }
1054
1055 static int ena_com_ind_tbl_convert_from_device(struct ena_com_dev *ena_dev)
1056 {
1057         u16 dev_idx_to_host_tbl[ENA_TOTAL_NUM_QUEUES] = { (u16)-1 };
1058         struct ena_rss *rss = &ena_dev->rss;
1059         u8 idx;
1060         u16 i;
1061
1062         for (i = 0; i < ENA_TOTAL_NUM_QUEUES; i++)
1063                 dev_idx_to_host_tbl[ena_dev->io_sq_queues[i].idx] = i;
1064
1065         for (i = 0; i < 1 << rss->tbl_log_size; i++) {
1066                 if (rss->rss_ind_tbl[i].cq_idx > ENA_TOTAL_NUM_QUEUES)
1067                         return -EINVAL;
1068                 idx = (u8)rss->rss_ind_tbl[i].cq_idx;
1069
1070                 if (dev_idx_to_host_tbl[idx] > ENA_TOTAL_NUM_QUEUES)
1071                         return -EINVAL;
1072
1073                 rss->host_rss_ind_tbl[i] = dev_idx_to_host_tbl[idx];
1074         }
1075
1076         return 0;
1077 }
1078
1079 static int ena_com_init_interrupt_moderation_table(struct ena_com_dev *ena_dev)
1080 {
1081         size_t size;
1082
1083         size = sizeof(struct ena_intr_moder_entry) * ENA_INTR_MAX_NUM_OF_LEVELS;
1084
1085         ena_dev->intr_moder_tbl =
1086                 devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
1087         if (!ena_dev->intr_moder_tbl)
1088                 return -ENOMEM;
1089
1090         ena_com_config_default_interrupt_moderation_table(ena_dev);
1091
1092         return 0;
1093 }
1094
1095 static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev,
1096                                                  u16 intr_delay_resolution)
1097 {
1098         struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
1099         unsigned int i;
1100
1101         if (!intr_delay_resolution) {
1102                 pr_err("Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n");
1103                 intr_delay_resolution = 1;
1104         }
1105         ena_dev->intr_delay_resolution = intr_delay_resolution;
1106
1107         /* update Rx */
1108         for (i = 0; i < ENA_INTR_MAX_NUM_OF_LEVELS; i++)
1109                 intr_moder_tbl[i].intr_moder_interval /= intr_delay_resolution;
1110
1111         /* update Tx */
1112         ena_dev->intr_moder_tx_interval /= intr_delay_resolution;
1113 }
1114
1115 /*****************************************************************************/
1116 /*******************************      API       ******************************/
1117 /*****************************************************************************/
1118
1119 int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue,
1120                                   struct ena_admin_aq_entry *cmd,
1121                                   size_t cmd_size,
1122                                   struct ena_admin_acq_entry *comp,
1123                                   size_t comp_size)
1124 {
1125         struct ena_comp_ctx *comp_ctx;
1126         int ret;
1127
1128         comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size,
1129                                             comp, comp_size);
1130         if (IS_ERR(comp_ctx)) {
1131                 if (comp_ctx == ERR_PTR(-ENODEV))
1132                         pr_debug("Failed to submit command [%ld]\n",
1133                                  PTR_ERR(comp_ctx));
1134                 else
1135                         pr_err("Failed to submit command [%ld]\n",
1136                                PTR_ERR(comp_ctx));
1137
1138                 return PTR_ERR(comp_ctx);
1139         }
1140
1141         ret = ena_com_wait_and_process_admin_cq(comp_ctx, admin_queue);
1142         if (unlikely(ret)) {
1143                 if (admin_queue->running_state)
1144                         pr_err("Failed to process command. ret = %d\n", ret);
1145                 else
1146                         pr_debug("Failed to process command. ret = %d\n", ret);
1147         }
1148         return ret;
1149 }
1150
1151 int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
1152                          struct ena_com_io_cq *io_cq)
1153 {
1154         struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1155         struct ena_admin_aq_create_cq_cmd create_cmd;
1156         struct ena_admin_acq_create_cq_resp_desc cmd_completion;
1157         int ret;
1158
1159         memset(&create_cmd, 0x0, sizeof(create_cmd));
1160
1161         create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_CQ;
1162
1163         create_cmd.cq_caps_2 |= (io_cq->cdesc_entry_size_in_bytes / 4) &
1164                 ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK;
1165         create_cmd.cq_caps_1 |=
1166                 ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK;
1167
1168         create_cmd.msix_vector = io_cq->msix_vector;
1169         create_cmd.cq_depth = io_cq->q_depth;
1170
1171         ret = ena_com_mem_addr_set(ena_dev,
1172                                    &create_cmd.cq_ba,
1173                                    io_cq->cdesc_addr.phys_addr);
1174         if (unlikely(ret)) {
1175                 pr_err("memory address set failed\n");
1176                 return ret;
1177         }
1178
1179         ret = ena_com_execute_admin_command(admin_queue,
1180                                             (struct ena_admin_aq_entry *)&create_cmd,
1181                                             sizeof(create_cmd),
1182                                             (struct ena_admin_acq_entry *)&cmd_completion,
1183                                             sizeof(cmd_completion));
1184         if (unlikely(ret)) {
1185                 pr_err("Failed to create IO CQ. error: %d\n", ret);
1186                 return ret;
1187         }
1188
1189         io_cq->idx = cmd_completion.cq_idx;
1190
1191         io_cq->unmask_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1192                 cmd_completion.cq_interrupt_unmask_register_offset);
1193
1194         if (cmd_completion.cq_head_db_register_offset)
1195                 io_cq->cq_head_db_reg =
1196                         (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1197                         cmd_completion.cq_head_db_register_offset);
1198
1199         if (cmd_completion.numa_node_register_offset)
1200                 io_cq->numa_node_cfg_reg =
1201                         (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1202                         cmd_completion.numa_node_register_offset);
1203
1204         pr_debug("created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth);
1205
1206         return ret;
1207 }
1208
1209 int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid,
1210                             struct ena_com_io_sq **io_sq,
1211                             struct ena_com_io_cq **io_cq)
1212 {
1213         if (qid >= ENA_TOTAL_NUM_QUEUES) {
1214                 pr_err("Invalid queue number %d but the max is %d\n", qid,
1215                        ENA_TOTAL_NUM_QUEUES);
1216                 return -EINVAL;
1217         }
1218
1219         *io_sq = &ena_dev->io_sq_queues[qid];
1220         *io_cq = &ena_dev->io_cq_queues[qid];
1221
1222         return 0;
1223 }
1224
1225 void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev)
1226 {
1227         struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1228         struct ena_comp_ctx *comp_ctx;
1229         u16 i;
1230
1231         if (!admin_queue->comp_ctx)
1232                 return;
1233
1234         for (i = 0; i < admin_queue->q_depth; i++) {
1235                 comp_ctx = get_comp_ctxt(admin_queue, i, false);
1236                 if (unlikely(!comp_ctx))
1237                         break;
1238
1239                 comp_ctx->status = ENA_CMD_ABORTED;
1240
1241                 complete(&comp_ctx->wait_event);
1242         }
1243 }
1244
1245 void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev)
1246 {
1247         struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1248         unsigned long flags;
1249
1250         spin_lock_irqsave(&admin_queue->q_lock, flags);
1251         while (atomic_read(&admin_queue->outstanding_cmds) != 0) {
1252                 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
1253                 msleep(ENA_POLL_MS);
1254                 spin_lock_irqsave(&admin_queue->q_lock, flags);
1255         }
1256         spin_unlock_irqrestore(&admin_queue->q_lock, flags);
1257 }
1258
1259 int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
1260                           struct ena_com_io_cq *io_cq)
1261 {
1262         struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1263         struct ena_admin_aq_destroy_cq_cmd destroy_cmd;
1264         struct ena_admin_acq_destroy_cq_resp_desc destroy_resp;
1265         int ret;
1266
1267         memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
1268
1269         destroy_cmd.cq_idx = io_cq->idx;
1270         destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_CQ;
1271
1272         ret = ena_com_execute_admin_command(admin_queue,
1273                                             (struct ena_admin_aq_entry *)&destroy_cmd,
1274                                             sizeof(destroy_cmd),
1275                                             (struct ena_admin_acq_entry *)&destroy_resp,
1276                                             sizeof(destroy_resp));
1277
1278         if (unlikely(ret && (ret != -ENODEV)))
1279                 pr_err("Failed to destroy IO CQ. error: %d\n", ret);
1280
1281         return ret;
1282 }
1283
1284 bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev)
1285 {
1286         return ena_dev->admin_queue.running_state;
1287 }
1288
1289 void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state)
1290 {
1291         struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1292         unsigned long flags;
1293
1294         spin_lock_irqsave(&admin_queue->q_lock, flags);
1295         ena_dev->admin_queue.running_state = state;
1296         spin_unlock_irqrestore(&admin_queue->q_lock, flags);
1297 }
1298
1299 void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev)
1300 {
1301         u16 depth = ena_dev->aenq.q_depth;
1302
1303         WARN(ena_dev->aenq.head != depth, "Invalid AENQ state\n");
1304
1305         /* Init head_db to mark that all entries in the queue
1306          * are initially available
1307          */
1308         writel(depth, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
1309 }
1310
1311 int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
1312 {
1313         struct ena_com_admin_queue *admin_queue;
1314         struct ena_admin_set_feat_cmd cmd;
1315         struct ena_admin_set_feat_resp resp;
1316         struct ena_admin_get_feat_resp get_resp;
1317         int ret;
1318
1319         ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG);
1320         if (ret) {
1321                 pr_info("Can't get aenq configuration\n");
1322                 return ret;
1323         }
1324
1325         if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) {
1326                 pr_warn("Trying to set unsupported aenq events. supported flag: %x asked flag: %x\n",
1327                         get_resp.u.aenq.supported_groups, groups_flag);
1328                 return -EOPNOTSUPP;
1329         }
1330
1331         memset(&cmd, 0x0, sizeof(cmd));
1332         admin_queue = &ena_dev->admin_queue;
1333
1334         cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
1335         cmd.aq_common_descriptor.flags = 0;
1336         cmd.feat_common.feature_id = ENA_ADMIN_AENQ_CONFIG;
1337         cmd.u.aenq.enabled_groups = groups_flag;
1338
1339         ret = ena_com_execute_admin_command(admin_queue,
1340                                             (struct ena_admin_aq_entry *)&cmd,
1341                                             sizeof(cmd),
1342                                             (struct ena_admin_acq_entry *)&resp,
1343                                             sizeof(resp));
1344
1345         if (unlikely(ret))
1346                 pr_err("Failed to config AENQ ret: %d\n", ret);
1347
1348         return ret;
1349 }
1350
1351 int ena_com_get_dma_width(struct ena_com_dev *ena_dev)
1352 {
1353         u32 caps = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
1354         int width;
1355
1356         if (unlikely(caps == ENA_MMIO_READ_TIMEOUT)) {
1357                 pr_err("Reg read timeout occurred\n");
1358                 return -ETIME;
1359         }
1360
1361         width = (caps & ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK) >>
1362                 ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT;
1363
1364         pr_debug("ENA dma width: %d\n", width);
1365
1366         if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) {
1367                 pr_err("DMA width illegal value: %d\n", width);
1368                 return -EINVAL;
1369         }
1370
1371         ena_dev->dma_addr_bits = width;
1372
1373         return width;
1374 }
1375
1376 int ena_com_validate_version(struct ena_com_dev *ena_dev)
1377 {
1378         u32 ver;
1379         u32 ctrl_ver;
1380         u32 ctrl_ver_masked;
1381
1382         /* Make sure the ENA version and the controller version are at least
1383          * as the driver expects
1384          */
1385         ver = ena_com_reg_bar_read32(ena_dev, ENA_REGS_VERSION_OFF);
1386         ctrl_ver = ena_com_reg_bar_read32(ena_dev,
1387                                           ENA_REGS_CONTROLLER_VERSION_OFF);
1388
1389         if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) ||
1390                      (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) {
1391                 pr_err("Reg read timeout occurred\n");
1392                 return -ETIME;
1393         }
1394
1395         pr_info("ena device version: %d.%d\n",
1396                 (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >>
1397                         ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
1398                 ver & ENA_REGS_VERSION_MINOR_VERSION_MASK);
1399
1400         pr_info("ena controller version: %d.%d.%d implementation version %d\n",
1401                 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) >>
1402                         ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
1403                 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) >>
1404                         ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT,
1405                 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK),
1406                 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK) >>
1407                         ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT);
1408
1409         ctrl_ver_masked =
1410                 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) |
1411                 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) |
1412                 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK);
1413
1414         /* Validate the ctrl version without the implementation ID */
1415         if (ctrl_ver_masked < MIN_ENA_CTRL_VER) {
1416                 pr_err("ENA ctrl version is lower than the minimal ctrl version the driver supports\n");
1417                 return -1;
1418         }
1419
1420         return 0;
1421 }
1422
1423 void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
1424 {
1425         struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1426         struct ena_com_admin_cq *cq = &admin_queue->cq;
1427         struct ena_com_admin_sq *sq = &admin_queue->sq;
1428         struct ena_com_aenq *aenq = &ena_dev->aenq;
1429         u16 size;
1430
1431         if (admin_queue->comp_ctx)
1432                 devm_kfree(ena_dev->dmadev, admin_queue->comp_ctx);
1433         admin_queue->comp_ctx = NULL;
1434         size = ADMIN_SQ_SIZE(admin_queue->q_depth);
1435         if (sq->entries)
1436                 dma_free_coherent(ena_dev->dmadev, size, sq->entries,
1437                                   sq->dma_addr);
1438         sq->entries = NULL;
1439
1440         size = ADMIN_CQ_SIZE(admin_queue->q_depth);
1441         if (cq->entries)
1442                 dma_free_coherent(ena_dev->dmadev, size, cq->entries,
1443                                   cq->dma_addr);
1444         cq->entries = NULL;
1445
1446         size = ADMIN_AENQ_SIZE(aenq->q_depth);
1447         if (ena_dev->aenq.entries)
1448                 dma_free_coherent(ena_dev->dmadev, size, aenq->entries,
1449                                   aenq->dma_addr);
1450         aenq->entries = NULL;
1451 }
1452
1453 void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
1454 {
1455         u32 mask_value = 0;
1456
1457         if (polling)
1458                 mask_value = ENA_REGS_ADMIN_INTR_MASK;
1459
1460         writel(mask_value, ena_dev->reg_bar + ENA_REGS_INTR_MASK_OFF);
1461         ena_dev->admin_queue.polling = polling;
1462 }
1463
1464 int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
1465 {
1466         struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1467
1468         spin_lock_init(&mmio_read->lock);
1469         mmio_read->read_resp =
1470                 dma_zalloc_coherent(ena_dev->dmadev,
1471                                     sizeof(*mmio_read->read_resp),
1472                                     &mmio_read->read_resp_dma_addr, GFP_KERNEL);
1473         if (unlikely(!mmio_read->read_resp))
1474                 return -ENOMEM;
1475
1476         ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
1477
1478         mmio_read->read_resp->req_id = 0x0;
1479         mmio_read->seq_num = 0x0;
1480         mmio_read->readless_supported = true;
1481
1482         return 0;
1483 }
1484
1485 void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported)
1486 {
1487         struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1488
1489         mmio_read->readless_supported = readless_supported;
1490 }
1491
1492 void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev)
1493 {
1494         struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1495
1496         writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
1497         writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
1498
1499         dma_free_coherent(ena_dev->dmadev, sizeof(*mmio_read->read_resp),
1500                           mmio_read->read_resp, mmio_read->read_resp_dma_addr);
1501
1502         mmio_read->read_resp = NULL;
1503 }
1504
1505 void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev)
1506 {
1507         struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1508         u32 addr_low, addr_high;
1509
1510         addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(mmio_read->read_resp_dma_addr);
1511         addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(mmio_read->read_resp_dma_addr);
1512
1513         writel(addr_low, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
1514         writel(addr_high, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
1515 }
1516
1517 int ena_com_admin_init(struct ena_com_dev *ena_dev,
1518                        struct ena_aenq_handlers *aenq_handlers,
1519                        bool init_spinlock)
1520 {
1521         struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1522         u32 aq_caps, acq_caps, dev_sts, addr_low, addr_high;
1523         int ret;
1524
1525         dev_sts = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
1526
1527         if (unlikely(dev_sts == ENA_MMIO_READ_TIMEOUT)) {
1528                 pr_err("Reg read timeout occurred\n");
1529                 return -ETIME;
1530         }
1531
1532         if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) {
1533                 pr_err("Device isn't ready, abort com init\n");
1534                 return -ENODEV;
1535         }
1536
1537         admin_queue->q_depth = ENA_ADMIN_QUEUE_DEPTH;
1538
1539         admin_queue->q_dmadev = ena_dev->dmadev;
1540         admin_queue->polling = false;
1541         admin_queue->curr_cmd_id = 0;
1542
1543         atomic_set(&admin_queue->outstanding_cmds, 0);
1544
1545         if (init_spinlock)
1546                 spin_lock_init(&admin_queue->q_lock);
1547
1548         ret = ena_com_init_comp_ctxt(admin_queue);
1549         if (ret)
1550                 goto error;
1551
1552         ret = ena_com_admin_init_sq(admin_queue);
1553         if (ret)
1554                 goto error;
1555
1556         ret = ena_com_admin_init_cq(admin_queue);
1557         if (ret)
1558                 goto error;
1559
1560         admin_queue->sq.db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1561                 ENA_REGS_AQ_DB_OFF);
1562
1563         addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->sq.dma_addr);
1564         addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->sq.dma_addr);
1565
1566         writel(addr_low, ena_dev->reg_bar + ENA_REGS_AQ_BASE_LO_OFF);
1567         writel(addr_high, ena_dev->reg_bar + ENA_REGS_AQ_BASE_HI_OFF);
1568
1569         addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->cq.dma_addr);
1570         addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->cq.dma_addr);
1571
1572         writel(addr_low, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_LO_OFF);
1573         writel(addr_high, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_HI_OFF);
1574
1575         aq_caps = 0;
1576         aq_caps |= admin_queue->q_depth & ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK;
1577         aq_caps |= (sizeof(struct ena_admin_aq_entry) <<
1578                         ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT) &
1579                         ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK;
1580
1581         acq_caps = 0;
1582         acq_caps |= admin_queue->q_depth & ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK;
1583         acq_caps |= (sizeof(struct ena_admin_acq_entry) <<
1584                 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT) &
1585                 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK;
1586
1587         writel(aq_caps, ena_dev->reg_bar + ENA_REGS_AQ_CAPS_OFF);
1588         writel(acq_caps, ena_dev->reg_bar + ENA_REGS_ACQ_CAPS_OFF);
1589         ret = ena_com_admin_init_aenq(ena_dev, aenq_handlers);
1590         if (ret)
1591                 goto error;
1592
1593         admin_queue->running_state = true;
1594
1595         return 0;
1596 error:
1597         ena_com_admin_destroy(ena_dev);
1598
1599         return ret;
1600 }
1601
1602 int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
1603                             struct ena_com_create_io_ctx *ctx)
1604 {
1605         struct ena_com_io_sq *io_sq;
1606         struct ena_com_io_cq *io_cq;
1607         int ret;
1608
1609         if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) {
1610                 pr_err("Qid (%d) is bigger than max num of queues (%d)\n",
1611                        ctx->qid, ENA_TOTAL_NUM_QUEUES);
1612                 return -EINVAL;
1613         }
1614
1615         io_sq = &ena_dev->io_sq_queues[ctx->qid];
1616         io_cq = &ena_dev->io_cq_queues[ctx->qid];
1617
1618         memset(io_sq, 0x0, sizeof(*io_sq));
1619         memset(io_cq, 0x0, sizeof(*io_cq));
1620
1621         /* Init CQ */
1622         io_cq->q_depth = ctx->queue_size;
1623         io_cq->direction = ctx->direction;
1624         io_cq->qid = ctx->qid;
1625
1626         io_cq->msix_vector = ctx->msix_vector;
1627
1628         io_sq->q_depth = ctx->queue_size;
1629         io_sq->direction = ctx->direction;
1630         io_sq->qid = ctx->qid;
1631
1632         io_sq->mem_queue_type = ctx->mem_queue_type;
1633
1634         if (ctx->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
1635                 /* header length is limited to 8 bits */
1636                 io_sq->tx_max_header_size =
1637                         min_t(u32, ena_dev->tx_max_header_size, SZ_256);
1638
1639         ret = ena_com_init_io_sq(ena_dev, ctx, io_sq);
1640         if (ret)
1641                 goto error;
1642         ret = ena_com_init_io_cq(ena_dev, ctx, io_cq);
1643         if (ret)
1644                 goto error;
1645
1646         ret = ena_com_create_io_cq(ena_dev, io_cq);
1647         if (ret)
1648                 goto error;
1649
1650         ret = ena_com_create_io_sq(ena_dev, io_sq, io_cq->idx);
1651         if (ret)
1652                 goto destroy_io_cq;
1653
1654         return 0;
1655
1656 destroy_io_cq:
1657         ena_com_destroy_io_cq(ena_dev, io_cq);
1658 error:
1659         ena_com_io_queue_free(ena_dev, io_sq, io_cq);
1660         return ret;
1661 }
1662
1663 void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid)
1664 {
1665         struct ena_com_io_sq *io_sq;
1666         struct ena_com_io_cq *io_cq;
1667
1668         if (qid >= ENA_TOTAL_NUM_QUEUES) {
1669                 pr_err("Qid (%d) is bigger than max num of queues (%d)\n", qid,
1670                        ENA_TOTAL_NUM_QUEUES);
1671                 return;
1672         }
1673
1674         io_sq = &ena_dev->io_sq_queues[qid];
1675         io_cq = &ena_dev->io_cq_queues[qid];
1676
1677         ena_com_destroy_io_sq(ena_dev, io_sq);
1678         ena_com_destroy_io_cq(ena_dev, io_cq);
1679
1680         ena_com_io_queue_free(ena_dev, io_sq, io_cq);
1681 }
1682
1683 int ena_com_get_link_params(struct ena_com_dev *ena_dev,
1684                             struct ena_admin_get_feat_resp *resp)
1685 {
1686         return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG);
1687 }
1688
1689 int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
1690                               struct ena_com_dev_get_features_ctx *get_feat_ctx)
1691 {
1692         struct ena_admin_get_feat_resp get_resp;
1693         int rc;
1694
1695         rc = ena_com_get_feature(ena_dev, &get_resp,
1696                                  ENA_ADMIN_DEVICE_ATTRIBUTES);
1697         if (rc)
1698                 return rc;
1699
1700         memcpy(&get_feat_ctx->dev_attr, &get_resp.u.dev_attr,
1701                sizeof(get_resp.u.dev_attr));
1702         ena_dev->supported_features = get_resp.u.dev_attr.supported_features;
1703
1704         rc = ena_com_get_feature(ena_dev, &get_resp,
1705                                  ENA_ADMIN_MAX_QUEUES_NUM);
1706         if (rc)
1707                 return rc;
1708
1709         memcpy(&get_feat_ctx->max_queues, &get_resp.u.max_queue,
1710                sizeof(get_resp.u.max_queue));
1711         ena_dev->tx_max_header_size = get_resp.u.max_queue.max_header_size;
1712
1713         rc = ena_com_get_feature(ena_dev, &get_resp,
1714                                  ENA_ADMIN_AENQ_CONFIG);
1715         if (rc)
1716                 return rc;
1717
1718         memcpy(&get_feat_ctx->aenq, &get_resp.u.aenq,
1719                sizeof(get_resp.u.aenq));
1720
1721         rc = ena_com_get_feature(ena_dev, &get_resp,
1722                                  ENA_ADMIN_STATELESS_OFFLOAD_CONFIG);
1723         if (rc)
1724                 return rc;
1725
1726         memcpy(&get_feat_ctx->offload, &get_resp.u.offload,
1727                sizeof(get_resp.u.offload));
1728
1729         /* Driver hints isn't mandatory admin command. So in case the
1730          * command isn't supported set driver hints to 0
1731          */
1732         rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_HW_HINTS);
1733
1734         if (!rc)
1735                 memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints,
1736                        sizeof(get_resp.u.hw_hints));
1737         else if (rc == -EOPNOTSUPP)
1738                 memset(&get_feat_ctx->hw_hints, 0x0,
1739                        sizeof(get_feat_ctx->hw_hints));
1740         else
1741                 return rc;
1742
1743         return 0;
1744 }
1745
1746 void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev)
1747 {
1748         ena_com_handle_admin_completion(&ena_dev->admin_queue);
1749 }
1750
1751 /* ena_handle_specific_aenq_event:
1752  * return the handler that is relevant to the specific event group
1753  */
1754 static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *dev,
1755                                                      u16 group)
1756 {
1757         struct ena_aenq_handlers *aenq_handlers = dev->aenq.aenq_handlers;
1758
1759         if ((group < ENA_MAX_HANDLERS) && aenq_handlers->handlers[group])
1760                 return aenq_handlers->handlers[group];
1761
1762         return aenq_handlers->unimplemented_handler;
1763 }
1764
1765 /* ena_aenq_intr_handler:
1766  * handles the aenq incoming events.
1767  * pop events from the queue and apply the specific handler
1768  */
1769 void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
1770 {
1771         struct ena_admin_aenq_entry *aenq_e;
1772         struct ena_admin_aenq_common_desc *aenq_common;
1773         struct ena_com_aenq *aenq  = &dev->aenq;
1774         ena_aenq_handler handler_cb;
1775         u16 masked_head, processed = 0;
1776         u8 phase;
1777
1778         masked_head = aenq->head & (aenq->q_depth - 1);
1779         phase = aenq->phase;
1780         aenq_e = &aenq->entries[masked_head]; /* Get first entry */
1781         aenq_common = &aenq_e->aenq_common_desc;
1782
1783         /* Go over all the events */
1784         while ((READ_ONCE(aenq_common->flags) &
1785                 ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) {
1786                 /* Make sure the phase bit (ownership) is as expected before
1787                  * reading the rest of the descriptor.
1788                  */
1789                 dma_rmb();
1790
1791                 pr_debug("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n",
1792                          aenq_common->group, aenq_common->syndrom,
1793                          (u64)aenq_common->timestamp_low +
1794                                  ((u64)aenq_common->timestamp_high << 32));
1795
1796                 /* Handle specific event*/
1797                 handler_cb = ena_com_get_specific_aenq_cb(dev,
1798                                                           aenq_common->group);
1799                 handler_cb(data, aenq_e); /* call the actual event handler*/
1800
1801                 /* Get next event entry */
1802                 masked_head++;
1803                 processed++;
1804
1805                 if (unlikely(masked_head == aenq->q_depth)) {
1806                         masked_head = 0;
1807                         phase = !phase;
1808                 }
1809                 aenq_e = &aenq->entries[masked_head];
1810                 aenq_common = &aenq_e->aenq_common_desc;
1811         }
1812
1813         aenq->head += processed;
1814         aenq->phase = phase;
1815
1816         /* Don't update aenq doorbell if there weren't any processed events */
1817         if (!processed)
1818                 return;
1819
1820         /* write the aenq doorbell after all AENQ descriptors were read */
1821         mb();
1822         writel_relaxed((u32)aenq->head,
1823                        dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
1824         mmiowb();
1825 }
1826
1827 int ena_com_dev_reset(struct ena_com_dev *ena_dev,
1828                       enum ena_regs_reset_reason_types reset_reason)
1829 {
1830         u32 stat, timeout, cap, reset_val;
1831         int rc;
1832
1833         stat = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
1834         cap = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
1835
1836         if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) ||
1837                      (cap == ENA_MMIO_READ_TIMEOUT))) {
1838                 pr_err("Reg read32 timeout occurred\n");
1839                 return -ETIME;
1840         }
1841
1842         if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) {
1843                 pr_err("Device isn't ready, can't reset device\n");
1844                 return -EINVAL;
1845         }
1846
1847         timeout = (cap & ENA_REGS_CAPS_RESET_TIMEOUT_MASK) >>
1848                         ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT;
1849         if (timeout == 0) {
1850                 pr_err("Invalid timeout value\n");
1851                 return -EINVAL;
1852         }
1853
1854         /* start reset */
1855         reset_val = ENA_REGS_DEV_CTL_DEV_RESET_MASK;
1856         reset_val |= (reset_reason << ENA_REGS_DEV_CTL_RESET_REASON_SHIFT) &
1857                      ENA_REGS_DEV_CTL_RESET_REASON_MASK;
1858         writel(reset_val, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
1859
1860         /* Write again the MMIO read request address */
1861         ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
1862
1863         rc = wait_for_reset_state(ena_dev, timeout,
1864                                   ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK);
1865         if (rc != 0) {
1866                 pr_err("Reset indication didn't turn on\n");
1867                 return rc;
1868         }
1869
1870         /* reset done */
1871         writel(0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
1872         rc = wait_for_reset_state(ena_dev, timeout, 0);
1873         if (rc != 0) {
1874                 pr_err("Reset indication didn't turn off\n");
1875                 return rc;
1876         }
1877
1878         timeout = (cap & ENA_REGS_CAPS_ADMIN_CMD_TO_MASK) >>
1879                 ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT;
1880         if (timeout)
1881                 /* the resolution of timeout reg is 100ms */
1882                 ena_dev->admin_queue.completion_timeout = timeout * 100000;
1883         else
1884                 ena_dev->admin_queue.completion_timeout = ADMIN_CMD_TIMEOUT_US;
1885
1886         return 0;
1887 }
1888
1889 static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
1890                              struct ena_com_stats_ctx *ctx,
1891                              enum ena_admin_get_stats_type type)
1892 {
1893         struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx->get_cmd;
1894         struct ena_admin_acq_get_stats_resp *get_resp = &ctx->get_resp;
1895         struct ena_com_admin_queue *admin_queue;
1896         int ret;
1897
1898         admin_queue = &ena_dev->admin_queue;
1899
1900         get_cmd->aq_common_descriptor.opcode = ENA_ADMIN_GET_STATS;
1901         get_cmd->aq_common_descriptor.flags = 0;
1902         get_cmd->type = type;
1903
1904         ret =  ena_com_execute_admin_command(admin_queue,
1905                                              (struct ena_admin_aq_entry *)get_cmd,
1906                                              sizeof(*get_cmd),
1907                                              (struct ena_admin_acq_entry *)get_resp,
1908                                              sizeof(*get_resp));
1909
1910         if (unlikely(ret))
1911                 pr_err("Failed to get stats. error: %d\n", ret);
1912
1913         return ret;
1914 }
1915
1916 int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
1917                                 struct ena_admin_basic_stats *stats)
1918 {
1919         struct ena_com_stats_ctx ctx;
1920         int ret;
1921
1922         memset(&ctx, 0x0, sizeof(ctx));
1923         ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_BASIC);
1924         if (likely(ret == 0))
1925                 memcpy(stats, &ctx.get_resp.basic_stats,
1926                        sizeof(ctx.get_resp.basic_stats));
1927
1928         return ret;
1929 }
1930
1931 int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu)
1932 {
1933         struct ena_com_admin_queue *admin_queue;
1934         struct ena_admin_set_feat_cmd cmd;
1935         struct ena_admin_set_feat_resp resp;
1936         int ret;
1937
1938         if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) {
1939                 pr_debug("Feature %d isn't supported\n", ENA_ADMIN_MTU);
1940                 return -EOPNOTSUPP;
1941         }
1942
1943         memset(&cmd, 0x0, sizeof(cmd));
1944         admin_queue = &ena_dev->admin_queue;
1945
1946         cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
1947         cmd.aq_common_descriptor.flags = 0;
1948         cmd.feat_common.feature_id = ENA_ADMIN_MTU;
1949         cmd.u.mtu.mtu = mtu;
1950
1951         ret = ena_com_execute_admin_command(admin_queue,
1952                                             (struct ena_admin_aq_entry *)&cmd,
1953                                             sizeof(cmd),
1954                                             (struct ena_admin_acq_entry *)&resp,
1955                                             sizeof(resp));
1956
1957         if (unlikely(ret))
1958                 pr_err("Failed to set mtu %d. error: %d\n", mtu, ret);
1959
1960         return ret;
1961 }
1962
1963 int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
1964                                  struct ena_admin_feature_offload_desc *offload)
1965 {
1966         int ret;
1967         struct ena_admin_get_feat_resp resp;
1968
1969         ret = ena_com_get_feature(ena_dev, &resp,
1970                                   ENA_ADMIN_STATELESS_OFFLOAD_CONFIG);
1971         if (unlikely(ret)) {
1972                 pr_err("Failed to get offload capabilities %d\n", ret);
1973                 return ret;
1974         }
1975
1976         memcpy(offload, &resp.u.offload, sizeof(resp.u.offload));
1977
1978         return 0;
1979 }
1980
1981 int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
1982 {
1983         struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1984         struct ena_rss *rss = &ena_dev->rss;
1985         struct ena_admin_set_feat_cmd cmd;
1986         struct ena_admin_set_feat_resp resp;
1987         struct ena_admin_get_feat_resp get_resp;
1988         int ret;
1989
1990         if (!ena_com_check_supported_feature_id(ena_dev,
1991                                                 ENA_ADMIN_RSS_HASH_FUNCTION)) {
1992                 pr_debug("Feature %d isn't supported\n",
1993                          ENA_ADMIN_RSS_HASH_FUNCTION);
1994                 return -EOPNOTSUPP;
1995         }
1996
1997         /* Validate hash function is supported */
1998         ret = ena_com_get_feature(ena_dev, &get_resp,
1999                                   ENA_ADMIN_RSS_HASH_FUNCTION);
2000         if (unlikely(ret))
2001                 return ret;
2002
2003         if (get_resp.u.flow_hash_func.supported_func & (1 << rss->hash_func)) {
2004                 pr_err("Func hash %d isn't supported by device, abort\n",
2005                        rss->hash_func);
2006                 return -EOPNOTSUPP;
2007         }
2008
2009         memset(&cmd, 0x0, sizeof(cmd));
2010
2011         cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2012         cmd.aq_common_descriptor.flags =
2013                 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2014         cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_FUNCTION;
2015         cmd.u.flow_hash_func.init_val = rss->hash_init_val;
2016         cmd.u.flow_hash_func.selected_func = 1 << rss->hash_func;
2017
2018         ret = ena_com_mem_addr_set(ena_dev,
2019                                    &cmd.control_buffer.address,
2020                                    rss->hash_key_dma_addr);
2021         if (unlikely(ret)) {
2022                 pr_err("memory address set failed\n");
2023                 return ret;
2024         }
2025
2026         cmd.control_buffer.length = sizeof(*rss->hash_key);
2027
2028         ret = ena_com_execute_admin_command(admin_queue,
2029                                             (struct ena_admin_aq_entry *)&cmd,
2030                                             sizeof(cmd),
2031                                             (struct ena_admin_acq_entry *)&resp,
2032                                             sizeof(resp));
2033         if (unlikely(ret)) {
2034                 pr_err("Failed to set hash function %d. error: %d\n",
2035                        rss->hash_func, ret);
2036                 return -EINVAL;
2037         }
2038
2039         return 0;
2040 }
2041
2042 int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
2043                                enum ena_admin_hash_functions func,
2044                                const u8 *key, u16 key_len, u32 init_val)
2045 {
2046         struct ena_rss *rss = &ena_dev->rss;
2047         struct ena_admin_get_feat_resp get_resp;
2048         struct ena_admin_feature_rss_flow_hash_control *hash_key =
2049                 rss->hash_key;
2050         int rc;
2051
2052         /* Make sure size is a mult of DWs */
2053         if (unlikely(key_len & 0x3))
2054                 return -EINVAL;
2055
2056         rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2057                                     ENA_ADMIN_RSS_HASH_FUNCTION,
2058                                     rss->hash_key_dma_addr,
2059                                     sizeof(*rss->hash_key));
2060         if (unlikely(rc))
2061                 return rc;
2062
2063         if (!((1 << func) & get_resp.u.flow_hash_func.supported_func)) {
2064                 pr_err("Flow hash function %d isn't supported\n", func);
2065                 return -EOPNOTSUPP;
2066         }
2067
2068         switch (func) {
2069         case ENA_ADMIN_TOEPLITZ:
2070                 if (key_len > sizeof(hash_key->key)) {
2071                         pr_err("key len (%hu) is bigger than the max supported (%zu)\n",
2072                                key_len, sizeof(hash_key->key));
2073                         return -EINVAL;
2074                 }
2075
2076                 memcpy(hash_key->key, key, key_len);
2077                 rss->hash_init_val = init_val;
2078                 hash_key->keys_num = key_len >> 2;
2079                 break;
2080         case ENA_ADMIN_CRC32:
2081                 rss->hash_init_val = init_val;
2082                 break;
2083         default:
2084                 pr_err("Invalid hash function (%d)\n", func);
2085                 return -EINVAL;
2086         }
2087
2088         rc = ena_com_set_hash_function(ena_dev);
2089
2090         /* Restore the old function */
2091         if (unlikely(rc))
2092                 ena_com_get_hash_function(ena_dev, NULL, NULL);
2093
2094         return rc;
2095 }
2096
2097 int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
2098                               enum ena_admin_hash_functions *func,
2099                               u8 *key)
2100 {
2101         struct ena_rss *rss = &ena_dev->rss;
2102         struct ena_admin_get_feat_resp get_resp;
2103         struct ena_admin_feature_rss_flow_hash_control *hash_key =
2104                 rss->hash_key;
2105         int rc;
2106
2107         rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2108                                     ENA_ADMIN_RSS_HASH_FUNCTION,
2109                                     rss->hash_key_dma_addr,
2110                                     sizeof(*rss->hash_key));
2111         if (unlikely(rc))
2112                 return rc;
2113
2114         rss->hash_func = get_resp.u.flow_hash_func.selected_func;
2115         if (func)
2116                 *func = rss->hash_func;
2117
2118         if (key)
2119                 memcpy(key, hash_key->key, (size_t)(hash_key->keys_num) << 2);
2120
2121         return 0;
2122 }
2123
2124 int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev,
2125                           enum ena_admin_flow_hash_proto proto,
2126                           u16 *fields)
2127 {
2128         struct ena_rss *rss = &ena_dev->rss;
2129         struct ena_admin_get_feat_resp get_resp;
2130         int rc;
2131
2132         rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2133                                     ENA_ADMIN_RSS_HASH_INPUT,
2134                                     rss->hash_ctrl_dma_addr,
2135                                     sizeof(*rss->hash_ctrl));
2136         if (unlikely(rc))
2137                 return rc;
2138
2139         if (fields)
2140                 *fields = rss->hash_ctrl->selected_fields[proto].fields;
2141
2142         return 0;
2143 }
2144
2145 int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
2146 {
2147         struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2148         struct ena_rss *rss = &ena_dev->rss;
2149         struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
2150         struct ena_admin_set_feat_cmd cmd;
2151         struct ena_admin_set_feat_resp resp;
2152         int ret;
2153
2154         if (!ena_com_check_supported_feature_id(ena_dev,
2155                                                 ENA_ADMIN_RSS_HASH_INPUT)) {
2156                 pr_debug("Feature %d isn't supported\n",
2157                          ENA_ADMIN_RSS_HASH_INPUT);
2158                 return -EOPNOTSUPP;
2159         }
2160
2161         memset(&cmd, 0x0, sizeof(cmd));
2162
2163         cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2164         cmd.aq_common_descriptor.flags =
2165                 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2166         cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_INPUT;
2167         cmd.u.flow_hash_input.enabled_input_sort =
2168                 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK |
2169                 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK;
2170
2171         ret = ena_com_mem_addr_set(ena_dev,
2172                                    &cmd.control_buffer.address,
2173                                    rss->hash_ctrl_dma_addr);
2174         if (unlikely(ret)) {
2175                 pr_err("memory address set failed\n");
2176                 return ret;
2177         }
2178         cmd.control_buffer.length = sizeof(*hash_ctrl);
2179
2180         ret = ena_com_execute_admin_command(admin_queue,
2181                                             (struct ena_admin_aq_entry *)&cmd,
2182                                             sizeof(cmd),
2183                                             (struct ena_admin_acq_entry *)&resp,
2184                                             sizeof(resp));
2185         if (unlikely(ret))
2186                 pr_err("Failed to set hash input. error: %d\n", ret);
2187
2188         return ret;
2189 }
2190
2191 int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
2192 {
2193         struct ena_rss *rss = &ena_dev->rss;
2194         struct ena_admin_feature_rss_hash_control *hash_ctrl =
2195                 rss->hash_ctrl;
2196         u16 available_fields = 0;
2197         int rc, i;
2198
2199         /* Get the supported hash input */
2200         rc = ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2201         if (unlikely(rc))
2202                 return rc;
2203
2204         hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP4].fields =
2205                 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2206                 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2207
2208         hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP4].fields =
2209                 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2210                 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2211
2212         hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP6].fields =
2213                 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2214                 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2215
2216         hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP6].fields =
2217                 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2218                 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2219
2220         hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4].fields =
2221                 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2222
2223         hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP6].fields =
2224                 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2225
2226         hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields =
2227                 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2228
2229         hash_ctrl->selected_fields[ENA_ADMIN_RSS_NOT_IP].fields =
2230                 ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA;
2231
2232         for (i = 0; i < ENA_ADMIN_RSS_PROTO_NUM; i++) {
2233                 available_fields = hash_ctrl->selected_fields[i].fields &
2234                                 hash_ctrl->supported_fields[i].fields;
2235                 if (available_fields != hash_ctrl->selected_fields[i].fields) {
2236                         pr_err("hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
2237                                i, hash_ctrl->supported_fields[i].fields,
2238                                hash_ctrl->selected_fields[i].fields);
2239                         return -EOPNOTSUPP;
2240                 }
2241         }
2242
2243         rc = ena_com_set_hash_ctrl(ena_dev);
2244
2245         /* In case of failure, restore the old hash ctrl */
2246         if (unlikely(rc))
2247                 ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2248
2249         return rc;
2250 }
2251
2252 int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
2253                            enum ena_admin_flow_hash_proto proto,
2254                            u16 hash_fields)
2255 {
2256         struct ena_rss *rss = &ena_dev->rss;
2257         struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
2258         u16 supported_fields;
2259         int rc;
2260
2261         if (proto >= ENA_ADMIN_RSS_PROTO_NUM) {
2262                 pr_err("Invalid proto num (%u)\n", proto);
2263                 return -EINVAL;
2264         }
2265
2266         /* Get the ctrl table */
2267         rc = ena_com_get_hash_ctrl(ena_dev, proto, NULL);
2268         if (unlikely(rc))
2269                 return rc;
2270
2271         /* Make sure all the fields are supported */
2272         supported_fields = hash_ctrl->supported_fields[proto].fields;
2273         if ((hash_fields & supported_fields) != hash_fields) {
2274                 pr_err("proto %d doesn't support the required fields %x. supports only: %x\n",
2275                        proto, hash_fields, supported_fields);
2276         }
2277
2278         hash_ctrl->selected_fields[proto].fields = hash_fields;
2279
2280         rc = ena_com_set_hash_ctrl(ena_dev);
2281
2282         /* In case of failure, restore the old hash ctrl */
2283         if (unlikely(rc))
2284                 ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2285
2286         return 0;
2287 }
2288
2289 int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev,
2290                                       u16 entry_idx, u16 entry_value)
2291 {
2292         struct ena_rss *rss = &ena_dev->rss;
2293
2294         if (unlikely(entry_idx >= (1 << rss->tbl_log_size)))
2295                 return -EINVAL;
2296
2297         if (unlikely((entry_value > ENA_TOTAL_NUM_QUEUES)))
2298                 return -EINVAL;
2299
2300         rss->host_rss_ind_tbl[entry_idx] = entry_value;
2301
2302         return 0;
2303 }
2304
2305 int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
2306 {
2307         struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2308         struct ena_rss *rss = &ena_dev->rss;
2309         struct ena_admin_set_feat_cmd cmd;
2310         struct ena_admin_set_feat_resp resp;
2311         int ret;
2312
2313         if (!ena_com_check_supported_feature_id(
2314                     ena_dev, ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG)) {
2315                 pr_debug("Feature %d isn't supported\n",
2316                          ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
2317                 return -EOPNOTSUPP;
2318         }
2319
2320         ret = ena_com_ind_tbl_convert_to_device(ena_dev);
2321         if (ret) {
2322                 pr_err("Failed to convert host indirection table to device table\n");
2323                 return ret;
2324         }
2325
2326         memset(&cmd, 0x0, sizeof(cmd));
2327
2328         cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2329         cmd.aq_common_descriptor.flags =
2330                 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2331         cmd.feat_common.feature_id = ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG;
2332         cmd.u.ind_table.size = rss->tbl_log_size;
2333         cmd.u.ind_table.inline_index = 0xFFFFFFFF;
2334
2335         ret = ena_com_mem_addr_set(ena_dev,
2336                                    &cmd.control_buffer.address,
2337                                    rss->rss_ind_tbl_dma_addr);
2338         if (unlikely(ret)) {
2339                 pr_err("memory address set failed\n");
2340                 return ret;
2341         }
2342
2343         cmd.control_buffer.length = (1ULL << rss->tbl_log_size) *
2344                 sizeof(struct ena_admin_rss_ind_table_entry);
2345
2346         ret = ena_com_execute_admin_command(admin_queue,
2347                                             (struct ena_admin_aq_entry *)&cmd,
2348                                             sizeof(cmd),
2349                                             (struct ena_admin_acq_entry *)&resp,
2350                                             sizeof(resp));
2351
2352         if (unlikely(ret))
2353                 pr_err("Failed to set indirect table. error: %d\n", ret);
2354
2355         return ret;
2356 }
2357
2358 int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl)
2359 {
2360         struct ena_rss *rss = &ena_dev->rss;
2361         struct ena_admin_get_feat_resp get_resp;
2362         u32 tbl_size;
2363         int i, rc;
2364
2365         tbl_size = (1ULL << rss->tbl_log_size) *
2366                 sizeof(struct ena_admin_rss_ind_table_entry);
2367
2368         rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2369                                     ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG,
2370                                     rss->rss_ind_tbl_dma_addr,
2371                                     tbl_size);
2372         if (unlikely(rc))
2373                 return rc;
2374
2375         if (!ind_tbl)
2376                 return 0;
2377
2378         rc = ena_com_ind_tbl_convert_from_device(ena_dev);
2379         if (unlikely(rc))
2380                 return rc;
2381
2382         for (i = 0; i < (1 << rss->tbl_log_size); i++)
2383                 ind_tbl[i] = rss->host_rss_ind_tbl[i];
2384
2385         return 0;
2386 }
2387
2388 int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size)
2389 {
2390         int rc;
2391
2392         memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
2393
2394         rc = ena_com_indirect_table_allocate(ena_dev, indr_tbl_log_size);
2395         if (unlikely(rc))
2396                 goto err_indr_tbl;
2397
2398         rc = ena_com_hash_key_allocate(ena_dev);
2399         if (unlikely(rc))
2400                 goto err_hash_key;
2401
2402         rc = ena_com_hash_ctrl_init(ena_dev);
2403         if (unlikely(rc))
2404                 goto err_hash_ctrl;
2405
2406         return 0;
2407
2408 err_hash_ctrl:
2409         ena_com_hash_key_destroy(ena_dev);
2410 err_hash_key:
2411         ena_com_indirect_table_destroy(ena_dev);
2412 err_indr_tbl:
2413
2414         return rc;
2415 }
2416
2417 void ena_com_rss_destroy(struct ena_com_dev *ena_dev)
2418 {
2419         ena_com_indirect_table_destroy(ena_dev);
2420         ena_com_hash_key_destroy(ena_dev);
2421         ena_com_hash_ctrl_destroy(ena_dev);
2422
2423         memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
2424 }
2425
2426 int ena_com_allocate_host_info(struct ena_com_dev *ena_dev)
2427 {
2428         struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2429
2430         host_attr->host_info =
2431                 dma_zalloc_coherent(ena_dev->dmadev, SZ_4K,
2432                                     &host_attr->host_info_dma_addr, GFP_KERNEL);
2433         if (unlikely(!host_attr->host_info))
2434                 return -ENOMEM;
2435
2436         host_attr->host_info->ena_spec_version =
2437                 ((ENA_COMMON_SPEC_VERSION_MAJOR << ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) |
2438                 (ENA_COMMON_SPEC_VERSION_MINOR));
2439
2440         return 0;
2441 }
2442
2443 int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
2444                                 u32 debug_area_size)
2445 {
2446         struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2447
2448         host_attr->debug_area_virt_addr =
2449                 dma_zalloc_coherent(ena_dev->dmadev, debug_area_size,
2450                                     &host_attr->debug_area_dma_addr, GFP_KERNEL);
2451         if (unlikely(!host_attr->debug_area_virt_addr)) {
2452                 host_attr->debug_area_size = 0;
2453                 return -ENOMEM;
2454         }
2455
2456         host_attr->debug_area_size = debug_area_size;
2457
2458         return 0;
2459 }
2460
2461 void ena_com_delete_host_info(struct ena_com_dev *ena_dev)
2462 {
2463         struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2464
2465         if (host_attr->host_info) {
2466                 dma_free_coherent(ena_dev->dmadev, SZ_4K, host_attr->host_info,
2467                                   host_attr->host_info_dma_addr);
2468                 host_attr->host_info = NULL;
2469         }
2470 }
2471
2472 void ena_com_delete_debug_area(struct ena_com_dev *ena_dev)
2473 {
2474         struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2475
2476         if (host_attr->debug_area_virt_addr) {
2477                 dma_free_coherent(ena_dev->dmadev, host_attr->debug_area_size,
2478                                   host_attr->debug_area_virt_addr,
2479                                   host_attr->debug_area_dma_addr);
2480                 host_attr->debug_area_virt_addr = NULL;
2481         }
2482 }
2483
2484 int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
2485 {
2486         struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2487         struct ena_com_admin_queue *admin_queue;
2488         struct ena_admin_set_feat_cmd cmd;
2489         struct ena_admin_set_feat_resp resp;
2490
2491         int ret;
2492
2493         /* Host attribute config is called before ena_com_get_dev_attr_feat
2494          * so ena_com can't check if the feature is supported.
2495          */
2496
2497         memset(&cmd, 0x0, sizeof(cmd));
2498         admin_queue = &ena_dev->admin_queue;
2499
2500         cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2501         cmd.feat_common.feature_id = ENA_ADMIN_HOST_ATTR_CONFIG;
2502
2503         ret = ena_com_mem_addr_set(ena_dev,
2504                                    &cmd.u.host_attr.debug_ba,
2505                                    host_attr->debug_area_dma_addr);
2506         if (unlikely(ret)) {
2507                 pr_err("memory address set failed\n");
2508                 return ret;
2509         }
2510
2511         ret = ena_com_mem_addr_set(ena_dev,
2512                                    &cmd.u.host_attr.os_info_ba,
2513                                    host_attr->host_info_dma_addr);
2514         if (unlikely(ret)) {
2515                 pr_err("memory address set failed\n");
2516                 return ret;
2517         }
2518
2519         cmd.u.host_attr.debug_area_size = host_attr->debug_area_size;
2520
2521         ret = ena_com_execute_admin_command(admin_queue,
2522                                             (struct ena_admin_aq_entry *)&cmd,
2523                                             sizeof(cmd),
2524                                             (struct ena_admin_acq_entry *)&resp,
2525                                             sizeof(resp));
2526
2527         if (unlikely(ret))
2528                 pr_err("Failed to set host attributes: %d\n", ret);
2529
2530         return ret;
2531 }
2532
2533 /* Interrupt moderation */
2534 bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev)
2535 {
2536         return ena_com_check_supported_feature_id(ena_dev,
2537                                                   ENA_ADMIN_INTERRUPT_MODERATION);
2538 }
2539
2540 int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
2541                                                       u32 tx_coalesce_usecs)
2542 {
2543         if (!ena_dev->intr_delay_resolution) {
2544                 pr_err("Illegal interrupt delay granularity value\n");
2545                 return -EFAULT;
2546         }
2547
2548         ena_dev->intr_moder_tx_interval = tx_coalesce_usecs /
2549                 ena_dev->intr_delay_resolution;
2550
2551         return 0;
2552 }
2553
2554 int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
2555                                                       u32 rx_coalesce_usecs)
2556 {
2557         if (!ena_dev->intr_delay_resolution) {
2558                 pr_err("Illegal interrupt delay granularity value\n");
2559                 return -EFAULT;
2560         }
2561
2562         /* We use LOWEST entry of moderation table for storing
2563          * nonadaptive interrupt coalescing values
2564          */
2565         ena_dev->intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval =
2566                 rx_coalesce_usecs / ena_dev->intr_delay_resolution;
2567
2568         return 0;
2569 }
2570
2571 void ena_com_destroy_interrupt_moderation(struct ena_com_dev *ena_dev)
2572 {
2573         if (ena_dev->intr_moder_tbl)
2574                 devm_kfree(ena_dev->dmadev, ena_dev->intr_moder_tbl);
2575         ena_dev->intr_moder_tbl = NULL;
2576 }
2577
2578 int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
2579 {
2580         struct ena_admin_get_feat_resp get_resp;
2581         u16 delay_resolution;
2582         int rc;
2583
2584         rc = ena_com_get_feature(ena_dev, &get_resp,
2585                                  ENA_ADMIN_INTERRUPT_MODERATION);
2586
2587         if (rc) {
2588                 if (rc == -EOPNOTSUPP) {
2589                         pr_debug("Feature %d isn't supported\n",
2590                                  ENA_ADMIN_INTERRUPT_MODERATION);
2591                         rc = 0;
2592                 } else {
2593                         pr_err("Failed to get interrupt moderation admin cmd. rc: %d\n",
2594                                rc);
2595                 }
2596
2597                 /* no moderation supported, disable adaptive support */
2598                 ena_com_disable_adaptive_moderation(ena_dev);
2599                 return rc;
2600         }
2601
2602         rc = ena_com_init_interrupt_moderation_table(ena_dev);
2603         if (rc)
2604                 goto err;
2605
2606         /* if moderation is supported by device we set adaptive moderation */
2607         delay_resolution = get_resp.u.intr_moderation.intr_delay_resolution;
2608         ena_com_update_intr_delay_resolution(ena_dev, delay_resolution);
2609         ena_com_enable_adaptive_moderation(ena_dev);
2610
2611         return 0;
2612 err:
2613         ena_com_destroy_interrupt_moderation(ena_dev);
2614         return rc;
2615 }
2616
2617 void ena_com_config_default_interrupt_moderation_table(struct ena_com_dev *ena_dev)
2618 {
2619         struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
2620
2621         if (!intr_moder_tbl)
2622                 return;
2623
2624         intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval =
2625                 ENA_INTR_LOWEST_USECS;
2626         intr_moder_tbl[ENA_INTR_MODER_LOWEST].pkts_per_interval =
2627                 ENA_INTR_LOWEST_PKTS;
2628         intr_moder_tbl[ENA_INTR_MODER_LOWEST].bytes_per_interval =
2629                 ENA_INTR_LOWEST_BYTES;
2630
2631         intr_moder_tbl[ENA_INTR_MODER_LOW].intr_moder_interval =
2632                 ENA_INTR_LOW_USECS;
2633         intr_moder_tbl[ENA_INTR_MODER_LOW].pkts_per_interval =
2634                 ENA_INTR_LOW_PKTS;
2635         intr_moder_tbl[ENA_INTR_MODER_LOW].bytes_per_interval =
2636                 ENA_INTR_LOW_BYTES;
2637
2638         intr_moder_tbl[ENA_INTR_MODER_MID].intr_moder_interval =
2639                 ENA_INTR_MID_USECS;
2640         intr_moder_tbl[ENA_INTR_MODER_MID].pkts_per_interval =
2641                 ENA_INTR_MID_PKTS;
2642         intr_moder_tbl[ENA_INTR_MODER_MID].bytes_per_interval =
2643                 ENA_INTR_MID_BYTES;
2644
2645         intr_moder_tbl[ENA_INTR_MODER_HIGH].intr_moder_interval =
2646                 ENA_INTR_HIGH_USECS;
2647         intr_moder_tbl[ENA_INTR_MODER_HIGH].pkts_per_interval =
2648                 ENA_INTR_HIGH_PKTS;
2649         intr_moder_tbl[ENA_INTR_MODER_HIGH].bytes_per_interval =
2650                 ENA_INTR_HIGH_BYTES;
2651
2652         intr_moder_tbl[ENA_INTR_MODER_HIGHEST].intr_moder_interval =
2653                 ENA_INTR_HIGHEST_USECS;
2654         intr_moder_tbl[ENA_INTR_MODER_HIGHEST].pkts_per_interval =
2655                 ENA_INTR_HIGHEST_PKTS;
2656         intr_moder_tbl[ENA_INTR_MODER_HIGHEST].bytes_per_interval =
2657                 ENA_INTR_HIGHEST_BYTES;
2658 }
2659
2660 unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev)
2661 {
2662         return ena_dev->intr_moder_tx_interval;
2663 }
2664
2665 unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev)
2666 {
2667         struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
2668
2669         if (intr_moder_tbl)
2670                 return intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval;
2671
2672         return 0;
2673 }
2674
2675 void ena_com_init_intr_moderation_entry(struct ena_com_dev *ena_dev,
2676                                         enum ena_intr_moder_level level,
2677                                         struct ena_intr_moder_entry *entry)
2678 {
2679         struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
2680
2681         if (level >= ENA_INTR_MAX_NUM_OF_LEVELS)
2682                 return;
2683
2684         intr_moder_tbl[level].intr_moder_interval = entry->intr_moder_interval;
2685         if (ena_dev->intr_delay_resolution)
2686                 intr_moder_tbl[level].intr_moder_interval /=
2687                         ena_dev->intr_delay_resolution;
2688         intr_moder_tbl[level].pkts_per_interval = entry->pkts_per_interval;
2689
2690         /* use hardcoded value until ethtool supports bytecount parameter */
2691         if (entry->bytes_per_interval != ENA_INTR_BYTE_COUNT_NOT_SUPPORTED)
2692                 intr_moder_tbl[level].bytes_per_interval = entry->bytes_per_interval;
2693 }
2694
2695 void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev,
2696                                        enum ena_intr_moder_level level,
2697                                        struct ena_intr_moder_entry *entry)
2698 {
2699         struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
2700
2701         if (level >= ENA_INTR_MAX_NUM_OF_LEVELS)
2702                 return;
2703
2704         entry->intr_moder_interval = intr_moder_tbl[level].intr_moder_interval;
2705         if (ena_dev->intr_delay_resolution)
2706                 entry->intr_moder_interval *= ena_dev->intr_delay_resolution;
2707         entry->pkts_per_interval =
2708         intr_moder_tbl[level].pkts_per_interval;
2709         entry->bytes_per_interval = intr_moder_tbl[level].bytes_per_interval;
2710 }