2 * Copyright (C) 2015 Cavium, Inc.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License
6 * as published by the Free Software Foundation.
10 #define NICVF_QUEUES_H
12 #include <linux/netdevice.h>
13 #include <linux/iommu.h>
14 #include <linux/bpf.h>
18 #define MAX_QUEUE_SET 128
19 #define MAX_RCV_QUEUES_PER_QS 8
20 #define MAX_RCV_BUF_DESC_RINGS_PER_QS 2
21 #define MAX_SND_QUEUES_PER_QS 8
22 #define MAX_CMP_QUEUES_PER_QS 8
24 /* VF's queue interrupt ranges */
25 #define NICVF_INTR_ID_CQ 0
26 #define NICVF_INTR_ID_SQ 8
27 #define NICVF_INTR_ID_RBDR 16
28 #define NICVF_INTR_ID_MISC 18
29 #define NICVF_INTR_ID_QS_ERR 19
31 #define for_each_cq_irq(irq) \
32 for (irq = NICVF_INTR_ID_CQ; irq < NICVF_INTR_ID_SQ; irq++)
33 #define for_each_sq_irq(irq) \
34 for (irq = NICVF_INTR_ID_SQ; irq < NICVF_INTR_ID_RBDR; irq++)
35 #define for_each_rbdr_irq(irq) \
36 for (irq = NICVF_INTR_ID_RBDR; irq < NICVF_INTR_ID_MISC; irq++)
38 #define RBDR_SIZE0 0ULL /* 8K entries */
39 #define RBDR_SIZE1 1ULL /* 16K entries */
40 #define RBDR_SIZE2 2ULL /* 32K entries */
41 #define RBDR_SIZE3 3ULL /* 64K entries */
42 #define RBDR_SIZE4 4ULL /* 126K entries */
43 #define RBDR_SIZE5 5ULL /* 256K entries */
44 #define RBDR_SIZE6 6ULL /* 512K entries */
46 #define SND_QUEUE_SIZE0 0ULL /* 1K entries */
47 #define SND_QUEUE_SIZE1 1ULL /* 2K entries */
48 #define SND_QUEUE_SIZE2 2ULL /* 4K entries */
49 #define SND_QUEUE_SIZE3 3ULL /* 8K entries */
50 #define SND_QUEUE_SIZE4 4ULL /* 16K entries */
51 #define SND_QUEUE_SIZE5 5ULL /* 32K entries */
52 #define SND_QUEUE_SIZE6 6ULL /* 64K entries */
54 #define CMP_QUEUE_SIZE0 0ULL /* 1K entries */
55 #define CMP_QUEUE_SIZE1 1ULL /* 2K entries */
56 #define CMP_QUEUE_SIZE2 2ULL /* 4K entries */
57 #define CMP_QUEUE_SIZE3 3ULL /* 8K entries */
58 #define CMP_QUEUE_SIZE4 4ULL /* 16K entries */
59 #define CMP_QUEUE_SIZE5 5ULL /* 32K entries */
60 #define CMP_QUEUE_SIZE6 6ULL /* 64K entries */
62 /* Default queue count per QS, its lengths and threshold values */
63 #define DEFAULT_RBDR_CNT 1
65 #define SND_QSIZE SND_QUEUE_SIZE0
66 #define SND_QUEUE_LEN (1ULL << (SND_QSIZE + 10))
67 #define MIN_SND_QUEUE_LEN (1ULL << (SND_QUEUE_SIZE0 + 10))
68 #define MAX_SND_QUEUE_LEN (1ULL << (SND_QUEUE_SIZE6 + 10))
69 #define SND_QUEUE_THRESH 2ULL
70 #define MIN_SQ_DESC_PER_PKT_XMIT 2
71 /* Since timestamp not enabled, otherwise 2 */
72 #define MAX_CQE_PER_PKT_XMIT 1
74 /* Keep CQ and SQ sizes same, if timestamping
75 * is enabled this equation will change.
77 #define CMP_QSIZE CMP_QUEUE_SIZE0
78 #define CMP_QUEUE_LEN (1ULL << (CMP_QSIZE + 10))
79 #define MIN_CMP_QUEUE_LEN (1ULL << (CMP_QUEUE_SIZE0 + 10))
80 #define MAX_CMP_QUEUE_LEN (1ULL << (CMP_QUEUE_SIZE6 + 10))
81 #define CMP_QUEUE_CQE_THRESH (NAPI_POLL_WEIGHT / 2)
82 #define CMP_QUEUE_TIMER_THRESH 80 /* ~2usec */
84 /* No of CQEs that might anyway gets used by HW due to pipelining
85 * effects irrespective of PASS/DROP/LEVELS being configured
87 #define CMP_QUEUE_PIPELINE_RSVD 544
89 #define RBDR_SIZE RBDR_SIZE0
90 #define RCV_BUF_COUNT (1ULL << (RBDR_SIZE + 13))
91 #define MAX_RCV_BUF_COUNT (1ULL << (RBDR_SIZE6 + 13))
92 #define RBDR_THRESH (RCV_BUF_COUNT / 2)
93 #define DMA_BUFFER_LEN 1536 /* In multiples of 128bytes */
94 #define RCV_FRAG_LEN (SKB_DATA_ALIGN(DMA_BUFFER_LEN + NET_SKB_PAD) + \
95 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
97 #define RCV_BUF_HEADROOM 128 /* To store dma address for XDP redirect */
98 #define XDP_HEADROOM (XDP_PACKET_HEADROOM + RCV_BUF_HEADROOM)
100 #define MAX_CQES_FOR_TX ((SND_QUEUE_LEN / MIN_SQ_DESC_PER_PKT_XMIT) * \
101 MAX_CQE_PER_PKT_XMIT)
103 /* RED and Backpressure levels of CQ for pkt reception
104 * For CQ, level is a measure of emptiness i.e 0x0 means full
105 * eg: For CQ of size 4K, and for pass/drop levels of 160/144
106 * HW accepts pkt if unused CQE >= 2560
107 * RED accepts pkt if unused CQE < 2304 & >= 2560
108 * DROPs pkts if unused CQE < 2304
110 #define RQ_PASS_CQ_LVL 192ULL
111 #define RQ_DROP_CQ_LVL 184ULL
113 /* RED and Backpressure levels of RBDR for pkt reception
114 * For RBDR, level is a measure of fullness i.e 0x0 means empty
115 * eg: For RBDR of size 8K, and for pass/drop levels of 4/0
116 * HW accepts pkt if unused RBs >= 256
117 * RED accepts pkt if unused RBs < 256 & >= 0
118 * DROPs pkts if unused RBs < 0
120 #define RQ_PASS_RBDR_LVL 8ULL
121 #define RQ_DROP_RBDR_LVL 0ULL
123 /* Descriptor size in bytes */
124 #define SND_QUEUE_DESC_SIZE 16
125 #define CMP_QUEUE_DESC_SIZE 512
127 /* Buffer / descriptor alignments */
128 #define NICVF_RCV_BUF_ALIGN 7
129 #define NICVF_RCV_BUF_ALIGN_BYTES (1ULL << NICVF_RCV_BUF_ALIGN)
130 #define NICVF_CQ_BASE_ALIGN_BYTES 512 /* 9 bits */
131 #define NICVF_SQ_BASE_ALIGN_BYTES 128 /* 7 bits */
133 #define NICVF_ALIGNED_ADDR(ADDR, ALIGN_BYTES) ALIGN(ADDR, ALIGN_BYTES)
135 /* Queue enable/disable */
136 #define NICVF_SQ_EN BIT_ULL(19)
139 #define NICVF_CQ_RESET BIT_ULL(41)
140 #define NICVF_SQ_RESET BIT_ULL(17)
141 #define NICVF_RBDR_RESET BIT_ULL(43)
143 enum CQ_RX_ERRLVL_E {
151 CQ_RX_ERROP_RE_NONE = 0x0,
152 CQ_RX_ERROP_RE_PARTIAL = 0x1,
153 CQ_RX_ERROP_RE_JABBER = 0x2,
154 CQ_RX_ERROP_RE_FCS = 0x7,
155 CQ_RX_ERROP_RE_TERMINATE = 0x9,
156 CQ_RX_ERROP_RE_RX_CTL = 0xb,
157 CQ_RX_ERROP_PREL2_ERR = 0x1f,
158 CQ_RX_ERROP_L2_FRAGMENT = 0x20,
159 CQ_RX_ERROP_L2_OVERRUN = 0x21,
160 CQ_RX_ERROP_L2_PFCS = 0x22,
161 CQ_RX_ERROP_L2_PUNY = 0x23,
162 CQ_RX_ERROP_L2_MAL = 0x24,
163 CQ_RX_ERROP_L2_OVERSIZE = 0x25,
164 CQ_RX_ERROP_L2_UNDERSIZE = 0x26,
165 CQ_RX_ERROP_L2_LENMISM = 0x27,
166 CQ_RX_ERROP_L2_PCLP = 0x28,
167 CQ_RX_ERROP_IP_NOT = 0x41,
168 CQ_RX_ERROP_IP_CSUM_ERR = 0x42,
169 CQ_RX_ERROP_IP_MAL = 0x43,
170 CQ_RX_ERROP_IP_MALD = 0x44,
171 CQ_RX_ERROP_IP_HOP = 0x45,
172 CQ_RX_ERROP_L3_ICRC = 0x46,
173 CQ_RX_ERROP_L3_PCLP = 0x47,
174 CQ_RX_ERROP_L4_MAL = 0x61,
175 CQ_RX_ERROP_L4_CHK = 0x62,
176 CQ_RX_ERROP_UDP_LEN = 0x63,
177 CQ_RX_ERROP_L4_PORT = 0x64,
178 CQ_RX_ERROP_TCP_FLAG = 0x65,
179 CQ_RX_ERROP_TCP_OFFSET = 0x66,
180 CQ_RX_ERROP_L4_PCLP = 0x67,
181 CQ_RX_ERROP_RBDR_TRUNC = 0x70,
185 CQ_TX_ERROP_GOOD = 0x0,
186 CQ_TX_ERROP_DESC_FAULT = 0x10,
187 CQ_TX_ERROP_HDR_CONS_ERR = 0x11,
188 CQ_TX_ERROP_SUBDC_ERR = 0x12,
189 CQ_TX_ERROP_MAX_SIZE_VIOL = 0x13,
190 CQ_TX_ERROP_IMM_SIZE_OFLOW = 0x80,
191 CQ_TX_ERROP_DATA_SEQUENCE_ERR = 0x81,
192 CQ_TX_ERROP_MEM_SEQUENCE_ERR = 0x82,
193 CQ_TX_ERROP_LOCK_VIOL = 0x83,
194 CQ_TX_ERROP_DATA_FAULT = 0x84,
195 CQ_TX_ERROP_TSTMP_CONFLICT = 0x85,
196 CQ_TX_ERROP_TSTMP_TIMEOUT = 0x86,
197 CQ_TX_ERROP_MEM_FAULT = 0x87,
198 CQ_TX_ERROP_CK_OVERLAP = 0x88,
199 CQ_TX_ERROP_CK_OFLOW = 0x89,
200 CQ_TX_ERROP_ENUM_LAST = 0x8a,
208 struct rx_tx_queue_stats {
211 } ____cacheline_aligned_in_smp;
217 dma_addr_t phys_base;
232 u32 thresh; /* Threshold level for interrupt */
236 struct q_desc_mem dmem;
239 /* For page recycling */
243 struct pgcache *pgcache;
244 } ____cacheline_aligned_in_smp;
248 struct rbdr *rbdr_start;
249 struct rbdr *rbdr_cont;
250 bool en_tcp_reassembly;
251 u8 cq_qs; /* CQ's QS to which this RQ is assigned */
252 u8 cq_idx; /* CQ index (0 to 7) in the QS */
253 u8 cont_rbdr_qs; /* Continue buffer ptrs - QS num */
254 u8 cont_qs_rbdr_idx; /* RBDR idx in the cont QS */
255 u8 start_rbdr_qs; /* First buffer ptrs - QS num */
256 u8 start_qs_rbdr_idx; /* RBDR idx in the above QS */
258 struct rx_tx_queue_stats stats;
259 struct xdp_rxq_info xdp_rxq;
260 } ____cacheline_aligned_in_smp;
265 spinlock_t lock; /* lock to serialize processing CQEs */
267 struct q_desc_mem dmem;
269 } ____cacheline_aligned_in_smp;
273 u8 cq_qs; /* CQ's QS to which this SQ is pointing */
274 u8 cq_idx; /* CQ index (0 to 7) in the above QS */
286 /* For TSO segment's header */
288 dma_addr_t tso_hdrs_phys;
290 cpumask_t affinity_mask;
291 struct q_desc_mem dmem;
292 struct rx_tx_queue_stats stats;
293 } ____cacheline_aligned_in_smp;
306 struct rcv_queue rq[MAX_RCV_QUEUES_PER_QS];
307 struct cmp_queue cq[MAX_CMP_QUEUES_PER_QS];
308 struct snd_queue sq[MAX_SND_QUEUES_PER_QS];
309 struct rbdr rbdr[MAX_RCV_BUF_DESC_RINGS_PER_QS];
310 } ____cacheline_aligned_in_smp;
312 #define GET_RBDR_DESC(RING, idx)\
313 (&(((struct rbdr_entry_t *)((RING)->desc))[idx]))
314 #define GET_SQ_DESC(RING, idx)\
315 (&(((struct sq_hdr_subdesc *)((RING)->desc))[idx]))
316 #define GET_CQ_DESC(RING, idx)\
317 (&(((union cq_desc_t *)((RING)->desc))[idx]))
320 #define CQ_WR_FULL BIT(26)
321 #define CQ_WR_DISABLE BIT(25)
322 #define CQ_WR_FAULT BIT(24)
323 #define CQ_CQE_COUNT (0xFFFF << 0)
325 #define CQ_ERR_MASK (CQ_WR_FULL | CQ_WR_DISABLE | CQ_WR_FAULT)
327 static inline u64 nicvf_iova_to_phys(struct nicvf *nic, dma_addr_t dma_addr)
329 /* Translation is installed only when IOMMU is present */
330 if (nic->iommu_domain)
331 return iommu_iova_to_phys(nic->iommu_domain, dma_addr);
335 void nicvf_unmap_sndq_buffers(struct nicvf *nic, struct snd_queue *sq,
336 int hdr_sqe, u8 subdesc_cnt);
337 void nicvf_config_vlan_stripping(struct nicvf *nic,
338 netdev_features_t features);
339 int nicvf_set_qset_resources(struct nicvf *nic);
340 int nicvf_config_data_transfer(struct nicvf *nic, bool enable);
341 void nicvf_qset_config(struct nicvf *nic, bool enable);
342 void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs,
343 int qidx, bool enable);
345 void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx);
346 void nicvf_sq_disable(struct nicvf *nic, int qidx);
347 void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt);
348 void nicvf_sq_free_used_descs(struct net_device *netdev,
349 struct snd_queue *sq, int qidx);
350 int nicvf_sq_append_skb(struct nicvf *nic, struct snd_queue *sq,
351 struct sk_buff *skb, u8 sq_num);
352 int nicvf_xdp_sq_append_pkt(struct nicvf *nic, struct snd_queue *sq,
353 u64 bufaddr, u64 dma_addr, u16 len);
354 void nicvf_xdp_sq_doorbell(struct nicvf *nic, struct snd_queue *sq, int sq_num);
356 struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic,
357 struct cqe_rx_t *cqe_rx, bool xdp);
358 void nicvf_rbdr_task(unsigned long data);
359 void nicvf_rbdr_work(struct work_struct *work);
361 void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx);
362 void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx);
363 void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx);
364 int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx);
366 /* Register access APIs */
367 void nicvf_reg_write(struct nicvf *nic, u64 offset, u64 val);
368 u64 nicvf_reg_read(struct nicvf *nic, u64 offset);
369 void nicvf_qset_reg_write(struct nicvf *nic, u64 offset, u64 val);
370 u64 nicvf_qset_reg_read(struct nicvf *nic, u64 offset);
371 void nicvf_queue_reg_write(struct nicvf *nic, u64 offset,
373 u64 nicvf_queue_reg_read(struct nicvf *nic,
374 u64 offset, u64 qidx);
377 void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx);
378 void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx);
379 int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx);
380 int nicvf_check_cqe_tx_errs(struct nicvf *nic, struct cqe_send_t *cqe_tx);
381 #endif /* NICVF_QUEUES_H */