1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright (c) 2018, Intel Corporation. */
7 #define ICE_DFLT_IRQ_WORK 256
8 #define ICE_RXBUF_2048 2048
9 #define ICE_MAX_CHAINED_RX_BUFS 5
10 #define ICE_MAX_BUF_TXD 8
11 #define ICE_MIN_TX_LEN 17
13 /* The size limit for a transmit buffer in a descriptor is (16K - 1).
14 * In order to align with the read requests we will align the value to
15 * the nearest 4K which represents our maximum read request size.
17 #define ICE_MAX_READ_REQ_SIZE 4096
18 #define ICE_MAX_DATA_PER_TXD (16 * 1024 - 1)
19 #define ICE_MAX_DATA_PER_TXD_ALIGNED \
20 (~(ICE_MAX_READ_REQ_SIZE - 1) & ICE_MAX_DATA_PER_TXD)
22 #define ICE_RX_BUF_WRITE 16 /* Must be power of 2 */
23 #define ICE_MAX_TXQ_PER_TXQG 128
25 /* We are assuming that the cache line is always 64 Bytes here for ice.
26 * In order to make sure that is a correct assumption there is a check in probe
27 * to print a warning if the read from GLPCI_CNF2 tells us that the cache line
28 * size is 128 bytes. We do it this way because we do not want to read the
29 * GLPCI_CNF2 register or a variable containing the value on every pass through
32 #define ICE_CACHE_LINE_BYTES 64
33 #define ICE_DESCS_PER_CACHE_LINE (ICE_CACHE_LINE_BYTES / \
34 sizeof(struct ice_tx_desc))
35 #define ICE_DESCS_FOR_CTX_DESC 1
36 #define ICE_DESCS_FOR_SKB_DATA_PTR 1
37 /* Tx descriptors needed, worst case */
38 #define DESC_NEEDED (MAX_SKB_FRAGS + ICE_DESCS_FOR_CTX_DESC + \
39 ICE_DESCS_PER_CACHE_LINE + ICE_DESCS_FOR_SKB_DATA_PTR)
40 #define ICE_DESC_UNUSED(R) \
41 ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
42 (R)->next_to_clean - (R)->next_to_use - 1)
44 #define ICE_TX_FLAGS_TSO BIT(0)
45 #define ICE_TX_FLAGS_HW_VLAN BIT(1)
46 #define ICE_TX_FLAGS_SW_VLAN BIT(2)
47 #define ICE_TX_FLAGS_VLAN_M 0xffff0000
48 #define ICE_TX_FLAGS_VLAN_S 16
51 struct ice_tx_desc *next_to_watch;
53 unsigned int bytecount;
54 unsigned short gso_segs;
56 DEFINE_DMA_UNMAP_ADDR(dma);
57 DEFINE_DMA_UNMAP_LEN(len);
60 struct ice_tx_offload_params {
68 struct ice_ring *tx_ring;
75 unsigned int page_offset;
83 struct ice_txq_stats {
87 int prev_pkt; /* negative if no pending Tx descriptors */
90 struct ice_rxq_stats {
92 u64 alloc_page_failed;
97 /* this enum matches hardware bits and is meant to be used by DYN_CTLN
98 * registers and QINT registers or more generally anywhere in the manual
99 * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any
100 * register but instead is a special value meaning "don't update" ITR0/1/2.
106 ICE_ITR_NONE = 3 /* ITR_NONE must not be used as an index */
109 /* Header split modes defined by DTYPE field of Rx RLAN context */
111 ICE_RX_DTYPE_NO_SPLIT = 0,
112 ICE_RX_DTYPE_HEADER_SPLIT = 1,
113 ICE_RX_DTYPE_SPLIT_ALWAYS = 2,
116 /* indices into GLINT_ITR registers */
117 #define ICE_RX_ITR ICE_IDX_ITR0
118 #define ICE_TX_ITR ICE_IDX_ITR1
119 #define ICE_ITR_DYNAMIC 0x8000 /* use top bit as a flag */
120 #define ICE_ITR_8K 125
121 #define ICE_ITR_20K 50
122 #define ICE_DFLT_TX_ITR ICE_ITR_20K
123 #define ICE_DFLT_RX_ITR ICE_ITR_20K
124 /* apply ITR granularity translation to program the register. itr_gran is either
125 * 2 or 4 usecs so we need to divide by 2 first then shift by that value
127 #define ITR_TO_REG(val, itr_gran) (((val) & ~ICE_ITR_DYNAMIC) >> \
130 #define ICE_DFLT_INTRL 0
132 /* Legacy or Advanced Mode Queue */
133 #define ICE_TX_ADVANCED 0
134 #define ICE_TX_LEGACY 1
136 /* descriptor ring, associated with a VSI */
138 struct ice_ring *next; /* pointer to next ring in q_vector */
139 void *desc; /* Descriptor ring memory */
140 struct device *dev; /* Used for DMA mapping */
141 struct net_device *netdev; /* netdev ring maps to */
142 struct ice_vsi *vsi; /* Backreference to associated VSI */
143 struct ice_q_vector *q_vector; /* Backreference to associated vector */
146 struct ice_tx_buf *tx_buf;
147 struct ice_rx_buf *rx_buf;
149 u16 q_index; /* Queue number of ring */
150 u32 txq_teid; /* Added Tx queue TEID */
152 u16 count; /* Number of descriptors */
153 u16 reg_idx; /* HW register index of the ring */
155 /* used in interrupt processing */
159 u8 ring_active; /* is ring online or not */
162 struct ice_q_stats stats;
163 struct u64_stats_sync syncp;
165 struct ice_txq_stats tx_stats;
166 struct ice_rxq_stats rx_stats;
169 unsigned int size; /* length of descriptor ring in bytes */
170 dma_addr_t dma; /* physical address of ring */
171 struct rcu_head rcu; /* to avoid race on free */
173 } ____cacheline_internodealigned_in_smp;
175 enum ice_latency_range {
176 ICE_LOWEST_LATENCY = 0,
178 ICE_BULK_LATENCY = 2,
179 ICE_ULTRA_LATENCY = 3,
182 struct ice_ring_container {
183 /* array of pointers to rings */
184 struct ice_ring *ring;
185 unsigned int total_bytes; /* total bytes processed this int */
186 unsigned int total_pkts; /* total packets processed this int */
187 enum ice_latency_range latency_range;
188 int itr_idx; /* index in the interrupt vector */
192 /* iterator for handling rings in ring container */
193 #define ice_for_each_ring(pos, head) \
194 for (pos = (head).ring; pos; pos = pos->next)
196 bool ice_alloc_rx_bufs(struct ice_ring *rxr, u16 cleaned_count);
197 netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev);
198 void ice_clean_tx_ring(struct ice_ring *tx_ring);
199 void ice_clean_rx_ring(struct ice_ring *rx_ring);
200 int ice_setup_tx_ring(struct ice_ring *tx_ring);
201 int ice_setup_rx_ring(struct ice_ring *rx_ring);
202 void ice_free_tx_ring(struct ice_ring *tx_ring);
203 void ice_free_rx_ring(struct ice_ring *rx_ring);
204 int ice_napi_poll(struct napi_struct *napi, int budget);
206 #endif /* _ICE_TXRX_H_ */