net: ena: minor performance improvement
authorArthur Kiyanovski <akiyano@amazon.com>
Thu, 11 Oct 2018 08:26:16 +0000 (11:26 +0300)
committerDavid S. Miller <davem@davemloft.net>
Thu, 11 Oct 2018 17:13:50 +0000 (10:13 -0700)
Reduce fastpath overhead by making ena_com_tx_comp_req_id_get() inline.
Also move it to ena_eth_com.h file with its dependency function
ena_com_cq_inc_head().

Signed-off-by: Arthur Kiyanovski <akiyano@amazon.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/amazon/ena/ena_eth_com.c
drivers/net/ethernet/amazon/ena/ena_eth_com.h

index 1c682b76190f9eb9ecbe6e428735ae21c4ed8b71..2fa032bab0c321dd528ac990cca8ee41d3241365 100644 (file)
@@ -59,15 +59,6 @@ static inline struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
        return cdesc;
 }
 
-static inline void ena_com_cq_inc_head(struct ena_com_io_cq *io_cq)
-{
-       io_cq->head++;
-
-       /* Switch phase bit in case of wrap around */
-       if (unlikely((io_cq->head & (io_cq->q_depth - 1)) == 0))
-               io_cq->phase ^= 1;
-}
-
 static inline void *get_sq_desc(struct ena_com_io_sq *io_sq)
 {
        u16 tail_masked;
@@ -477,40 +468,6 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
        return 0;
 }
 
-int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id)
-{
-       u8 expected_phase, cdesc_phase;
-       struct ena_eth_io_tx_cdesc *cdesc;
-       u16 masked_head;
-
-       masked_head = io_cq->head & (io_cq->q_depth - 1);
-       expected_phase = io_cq->phase;
-
-       cdesc = (struct ena_eth_io_tx_cdesc *)
-               ((uintptr_t)io_cq->cdesc_addr.virt_addr +
-               (masked_head * io_cq->cdesc_entry_size_in_bytes));
-
-       /* When the current completion descriptor phase isn't the same as the
-        * expected, it mean that the device still didn't update
-        * this completion.
-        */
-       cdesc_phase = READ_ONCE(cdesc->flags) & ENA_ETH_IO_TX_CDESC_PHASE_MASK;
-       if (cdesc_phase != expected_phase)
-               return -EAGAIN;
-
-       dma_rmb();
-       if (unlikely(cdesc->req_id >= io_cq->q_depth)) {
-               pr_err("Invalid req id %d\n", cdesc->req_id);
-               return -EINVAL;
-       }
-
-       ena_com_cq_inc_head(io_cq);
-
-       *req_id = READ_ONCE(cdesc->req_id);
-
-       return 0;
-}
-
 bool ena_com_cq_empty(struct ena_com_io_cq *io_cq)
 {
        struct ena_eth_io_rx_cdesc_base *cdesc;
index 2f7657227cfe9c60d77482c98df22fdb2f89e3c9..4930324e9d8d8ffdc241383aa88068b7daa201f0 100644 (file)
@@ -86,8 +86,6 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
                               struct ena_com_buf *ena_buf,
                               u16 req_id);
 
-int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id);
-
 bool ena_com_cq_empty(struct ena_com_io_cq *io_cq);
 
 static inline void ena_com_unmask_intr(struct ena_com_io_cq *io_cq,
@@ -159,4 +157,48 @@ static inline void ena_com_comp_ack(struct ena_com_io_sq *io_sq, u16 elem)
        io_sq->next_to_comp += elem;
 }
 
+static inline void ena_com_cq_inc_head(struct ena_com_io_cq *io_cq)
+{
+       io_cq->head++;
+
+       /* Switch phase bit in case of wrap around */
+       if (unlikely((io_cq->head & (io_cq->q_depth - 1)) == 0))
+               io_cq->phase ^= 1;
+}
+
+static inline int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq,
+                                            u16 *req_id)
+{
+       u8 expected_phase, cdesc_phase;
+       struct ena_eth_io_tx_cdesc *cdesc;
+       u16 masked_head;
+
+       masked_head = io_cq->head & (io_cq->q_depth - 1);
+       expected_phase = io_cq->phase;
+
+       cdesc = (struct ena_eth_io_tx_cdesc *)
+               ((uintptr_t)io_cq->cdesc_addr.virt_addr +
+               (masked_head * io_cq->cdesc_entry_size_in_bytes));
+
+       /* When the current completion descriptor phase isn't the same as the
+        * expected, it mean that the device still didn't update
+        * this completion.
+        */
+       cdesc_phase = READ_ONCE(cdesc->flags) & ENA_ETH_IO_TX_CDESC_PHASE_MASK;
+       if (cdesc_phase != expected_phase)
+               return -EAGAIN;
+
+       dma_rmb();
+
+       *req_id = READ_ONCE(cdesc->req_id);
+       if (unlikely(*req_id >= io_cq->q_depth)) {
+               pr_err("Invalid req id %d\n", cdesc->req_id);
+               return -EINVAL;
+       }
+
+       ena_com_cq_inc_head(io_cq);
+
+       return 0;
+}
+
 #endif /* ENA_ETH_COM_H_ */