1 // SPDX-License-Identifier: BSD-3-Clause-Clear
3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
5 #include <linux/dma-mapping.h>
11 static const struct hal_srng_config hw_srng_config[] = {
12 /* TODO: max_rings can populated by querying HW capabilities */
14 .start_ring_id = HAL_SRNG_RING_ID_REO2SW1,
16 .entry_size = sizeof(struct hal_reo_dest_ring) >> 2,
18 .ring_dir = HAL_SRNG_DIR_DST,
20 HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_BASE_LSB,
21 HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_HP,
24 HAL_REO2_RING_BASE_LSB - HAL_REO1_RING_BASE_LSB,
25 HAL_REO2_RING_HP - HAL_REO1_RING_HP,
27 .max_size = HAL_REO_REO2SW1_RING_BASE_MSB_RING_SIZE,
30 /* Designating REO2TCL ring as exception ring. This ring is
31 * similar to other REO2SW rings though it is named as REO2TCL.
32 * Any of theREO2SW rings can be used as exception ring.
34 .start_ring_id = HAL_SRNG_RING_ID_REO2TCL,
36 .entry_size = sizeof(struct hal_reo_dest_ring) >> 2,
38 .ring_dir = HAL_SRNG_DIR_DST,
40 HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_TCL_RING_BASE_LSB,
41 HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_TCL_RING_HP,
43 .max_size = HAL_REO_REO2TCL_RING_BASE_MSB_RING_SIZE,
46 .start_ring_id = HAL_SRNG_RING_ID_SW2REO,
48 .entry_size = sizeof(struct hal_reo_entrance_ring) >> 2,
50 .ring_dir = HAL_SRNG_DIR_SRC,
52 HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_BASE_LSB,
53 HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_HP,
55 .max_size = HAL_REO_SW2REO_RING_BASE_MSB_RING_SIZE,
58 .start_ring_id = HAL_SRNG_RING_ID_REO_CMD,
60 .entry_size = (sizeof(struct hal_tlv_hdr) +
61 sizeof(struct hal_reo_get_queue_stats)) >> 2,
63 .ring_dir = HAL_SRNG_DIR_SRC,
65 HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_RING_BASE_LSB,
66 HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_HP,
68 .max_size = HAL_REO_CMD_RING_BASE_MSB_RING_SIZE,
71 .start_ring_id = HAL_SRNG_RING_ID_REO_STATUS,
73 .entry_size = (sizeof(struct hal_tlv_hdr) +
74 sizeof(struct hal_reo_get_queue_stats_status)) >> 2,
76 .ring_dir = HAL_SRNG_DIR_DST,
78 HAL_SEQ_WCSS_UMAC_REO_REG +
79 HAL_REO_STATUS_RING_BASE_LSB,
80 HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_HP,
82 .max_size = HAL_REO_STATUS_RING_BASE_MSB_RING_SIZE,
85 .start_ring_id = HAL_SRNG_RING_ID_SW2TCL1,
87 .entry_size = (sizeof(struct hal_tlv_hdr) +
88 sizeof(struct hal_tcl_data_cmd)) >> 2,
90 .ring_dir = HAL_SRNG_DIR_SRC,
92 HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_BASE_LSB,
93 HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_HP,
96 HAL_TCL2_RING_BASE_LSB - HAL_TCL1_RING_BASE_LSB,
97 HAL_TCL2_RING_HP - HAL_TCL1_RING_HP,
99 .max_size = HAL_SW2TCL1_RING_BASE_MSB_RING_SIZE,
102 .start_ring_id = HAL_SRNG_RING_ID_SW2TCL_CMD,
104 .entry_size = (sizeof(struct hal_tlv_hdr) +
105 sizeof(struct hal_tcl_gse_cmd)) >> 2,
107 .ring_dir = HAL_SRNG_DIR_SRC,
109 HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_BASE_LSB,
110 HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_HP,
112 .max_size = HAL_SW2TCL1_CMD_RING_BASE_MSB_RING_SIZE,
115 .start_ring_id = HAL_SRNG_RING_ID_TCL_STATUS,
117 .entry_size = (sizeof(struct hal_tlv_hdr) +
118 sizeof(struct hal_tcl_status_ring)) >> 2,
120 .ring_dir = HAL_SRNG_DIR_DST,
122 HAL_SEQ_WCSS_UMAC_TCL_REG +
123 HAL_TCL_STATUS_RING_BASE_LSB,
124 HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_HP,
126 .max_size = HAL_TCL_STATUS_RING_BASE_MSB_RING_SIZE,
129 .start_ring_id = HAL_SRNG_RING_ID_CE0_SRC,
131 .entry_size = sizeof(struct hal_ce_srng_src_desc) >> 2,
133 .ring_dir = HAL_SRNG_DIR_SRC,
135 (HAL_SEQ_WCSS_UMAC_CE0_SRC_REG +
136 HAL_CE_DST_RING_BASE_LSB),
137 HAL_SEQ_WCSS_UMAC_CE0_SRC_REG + HAL_CE_DST_RING_HP,
140 (HAL_SEQ_WCSS_UMAC_CE1_SRC_REG -
141 HAL_SEQ_WCSS_UMAC_CE0_SRC_REG),
142 (HAL_SEQ_WCSS_UMAC_CE1_SRC_REG -
143 HAL_SEQ_WCSS_UMAC_CE0_SRC_REG),
145 .max_size = HAL_CE_SRC_RING_BASE_MSB_RING_SIZE,
148 .start_ring_id = HAL_SRNG_RING_ID_CE0_DST,
150 .entry_size = sizeof(struct hal_ce_srng_dest_desc) >> 2,
152 .ring_dir = HAL_SRNG_DIR_SRC,
154 (HAL_SEQ_WCSS_UMAC_CE0_DST_REG +
155 HAL_CE_DST_RING_BASE_LSB),
156 HAL_SEQ_WCSS_UMAC_CE0_DST_REG + HAL_CE_DST_RING_HP,
159 (HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
160 HAL_SEQ_WCSS_UMAC_CE0_DST_REG),
161 (HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
162 HAL_SEQ_WCSS_UMAC_CE0_DST_REG),
164 .max_size = HAL_CE_DST_RING_BASE_MSB_RING_SIZE,
166 { /* CE_DST_STATUS */
167 .start_ring_id = HAL_SRNG_RING_ID_CE0_DST_STATUS,
169 .entry_size = sizeof(struct hal_ce_srng_dst_status_desc) >> 2,
171 .ring_dir = HAL_SRNG_DIR_DST,
173 (HAL_SEQ_WCSS_UMAC_CE0_DST_REG +
174 HAL_CE_DST_STATUS_RING_BASE_LSB),
175 (HAL_SEQ_WCSS_UMAC_CE0_DST_REG +
176 HAL_CE_DST_STATUS_RING_HP),
179 (HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
180 HAL_SEQ_WCSS_UMAC_CE0_DST_REG),
181 (HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
182 HAL_SEQ_WCSS_UMAC_CE0_DST_REG),
184 .max_size = HAL_CE_DST_STATUS_RING_BASE_MSB_RING_SIZE,
186 { /* WBM_IDLE_LINK */
187 .start_ring_id = HAL_SRNG_RING_ID_WBM_IDLE_LINK,
189 .entry_size = sizeof(struct hal_wbm_link_desc) >> 2,
191 .ring_dir = HAL_SRNG_DIR_SRC,
193 (HAL_SEQ_WCSS_UMAC_WBM_REG +
194 HAL_WBM_IDLE_LINK_RING_BASE_LSB),
195 (HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_HP),
197 .max_size = HAL_WBM_IDLE_LINK_RING_BASE_MSB_RING_SIZE,
199 { /* SW2WBM_RELEASE */
200 .start_ring_id = HAL_SRNG_RING_ID_WBM_SW_RELEASE,
202 .entry_size = sizeof(struct hal_wbm_release_ring) >> 2,
204 .ring_dir = HAL_SRNG_DIR_SRC,
206 (HAL_SEQ_WCSS_UMAC_WBM_REG +
207 HAL_WBM_RELEASE_RING_BASE_LSB),
208 (HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_RELEASE_RING_HP),
210 .max_size = HAL_SW2WBM_RELEASE_RING_BASE_MSB_RING_SIZE,
212 { /* WBM2SW_RELEASE */
213 .start_ring_id = HAL_SRNG_RING_ID_WBM2SW0_RELEASE,
215 .entry_size = sizeof(struct hal_wbm_release_ring) >> 2,
217 .ring_dir = HAL_SRNG_DIR_DST,
219 (HAL_SEQ_WCSS_UMAC_WBM_REG +
220 HAL_WBM0_RELEASE_RING_BASE_LSB),
221 (HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM0_RELEASE_RING_HP),
224 (HAL_WBM1_RELEASE_RING_BASE_LSB -
225 HAL_WBM0_RELEASE_RING_BASE_LSB),
226 (HAL_WBM1_RELEASE_RING_HP - HAL_WBM0_RELEASE_RING_HP),
228 .max_size = HAL_WBM2SW_RELEASE_RING_BASE_MSB_RING_SIZE,
231 .start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA0_BUF,
233 .entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2,
235 .ring_dir = HAL_SRNG_DIR_SRC,
236 .max_size = HAL_RXDMA_RING_MAX_SIZE,
239 .start_ring_id = HAL_SRNG_RING_ID_WMAC1_RXDMA2SW0,
241 .entry_size = sizeof(struct hal_reo_entrance_ring) >> 2,
243 .ring_dir = HAL_SRNG_DIR_DST,
244 .max_size = HAL_RXDMA_RING_MAX_SIZE,
246 { /* RXDMA_MONITOR_BUF */
247 .start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA2_BUF,
249 .entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2,
251 .ring_dir = HAL_SRNG_DIR_SRC,
252 .max_size = HAL_RXDMA_RING_MAX_SIZE,
254 { /* RXDMA_MONITOR_STATUS */
255 .start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_STATBUF,
257 .entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2,
259 .ring_dir = HAL_SRNG_DIR_SRC,
260 .max_size = HAL_RXDMA_RING_MAX_SIZE,
262 { /* RXDMA_MONITOR_DST */
263 .start_ring_id = HAL_SRNG_RING_ID_WMAC1_RXDMA2SW1,
265 .entry_size = sizeof(struct hal_reo_entrance_ring) >> 2,
267 .ring_dir = HAL_SRNG_DIR_DST,
268 .max_size = HAL_RXDMA_RING_MAX_SIZE,
270 { /* RXDMA_MONITOR_DESC */
271 .start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_DESC,
273 .entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2,
275 .ring_dir = HAL_SRNG_DIR_SRC,
276 .max_size = HAL_RXDMA_RING_MAX_SIZE,
278 { /* RXDMA DIR BUF */
279 .start_ring_id = HAL_SRNG_RING_ID_RXDMA_DIR_BUF,
281 .entry_size = 8 >> 2, /* TODO: Define the struct */
283 .ring_dir = HAL_SRNG_DIR_SRC,
284 .max_size = HAL_RXDMA_RING_MAX_SIZE,
288 static int ath11k_hal_alloc_cont_rdp(struct ath11k_base *ab)
290 struct ath11k_hal *hal = &ab->hal;
293 size = sizeof(u32) * HAL_SRNG_RING_ID_MAX;
294 hal->rdp.vaddr = dma_alloc_coherent(ab->dev, size, &hal->rdp.paddr,
302 static void ath11k_hal_free_cont_rdp(struct ath11k_base *ab)
304 struct ath11k_hal *hal = &ab->hal;
310 size = sizeof(u32) * HAL_SRNG_RING_ID_MAX;
311 dma_free_coherent(ab->dev, size,
312 hal->rdp.vaddr, hal->rdp.paddr);
313 hal->rdp.vaddr = NULL;
316 static int ath11k_hal_alloc_cont_wrp(struct ath11k_base *ab)
318 struct ath11k_hal *hal = &ab->hal;
321 size = sizeof(u32) * HAL_SRNG_NUM_LMAC_RINGS;
322 hal->wrp.vaddr = dma_alloc_coherent(ab->dev, size, &hal->wrp.paddr,
330 static void ath11k_hal_free_cont_wrp(struct ath11k_base *ab)
332 struct ath11k_hal *hal = &ab->hal;
338 size = sizeof(u32) * HAL_SRNG_NUM_LMAC_RINGS;
339 dma_free_coherent(ab->dev, size,
340 hal->wrp.vaddr, hal->wrp.paddr);
341 hal->wrp.vaddr = NULL;
344 static void ath11k_hal_ce_dst_setup(struct ath11k_base *ab,
345 struct hal_srng *srng, int ring_num)
347 const struct hal_srng_config *srng_config = &hw_srng_config[HAL_CE_DST];
351 addr = HAL_CE_DST_RING_CTRL +
352 srng_config->reg_start[HAL_SRNG_REG_GRP_R0] +
353 ring_num * srng_config->reg_size[HAL_SRNG_REG_GRP_R0];
355 val = ath11k_hif_read32(ab, addr);
356 val &= ~HAL_CE_DST_R0_DEST_CTRL_MAX_LEN;
357 val |= FIELD_PREP(HAL_CE_DST_R0_DEST_CTRL_MAX_LEN,
358 srng->u.dst_ring.max_buffer_length);
359 ath11k_hif_write32(ab, addr, val);
362 static void ath11k_hal_srng_dst_hw_init(struct ath11k_base *ab,
363 struct hal_srng *srng)
365 struct ath11k_hal *hal = &ab->hal;
370 reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
372 if (srng->flags & HAL_SRNG_FLAGS_MSI_INTR) {
373 ath11k_hif_write32(ab, reg_base +
374 HAL_REO1_RING_MSI1_BASE_LSB_OFFSET,
375 (u32)srng->msi_addr);
377 val = FIELD_PREP(HAL_REO1_RING_MSI1_BASE_MSB_ADDR,
378 ((u64)srng->msi_addr >>
379 HAL_ADDR_MSB_REG_SHIFT)) |
380 HAL_REO1_RING_MSI1_BASE_MSB_MSI1_ENABLE;
381 ath11k_hif_write32(ab, reg_base +
382 HAL_REO1_RING_MSI1_BASE_MSB_OFFSET, val);
384 ath11k_hif_write32(ab,
385 reg_base + HAL_REO1_RING_MSI1_DATA_OFFSET,
389 ath11k_hif_write32(ab, reg_base, (u32)srng->ring_base_paddr);
391 val = FIELD_PREP(HAL_REO1_RING_BASE_MSB_RING_BASE_ADDR_MSB,
392 ((u64)srng->ring_base_paddr >>
393 HAL_ADDR_MSB_REG_SHIFT)) |
394 FIELD_PREP(HAL_REO1_RING_BASE_MSB_RING_SIZE,
395 (srng->entry_size * srng->num_entries));
396 ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_BASE_MSB_OFFSET, val);
398 val = FIELD_PREP(HAL_REO1_RING_ID_RING_ID, srng->ring_id) |
399 FIELD_PREP(HAL_REO1_RING_ID_ENTRY_SIZE, srng->entry_size);
400 ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_ID_OFFSET, val);
402 /* interrupt setup */
403 val = FIELD_PREP(HAL_REO1_RING_PRDR_INT_SETUP_INTR_TMR_THOLD,
404 (srng->intr_timer_thres_us >> 3));
406 val |= FIELD_PREP(HAL_REO1_RING_PRDR_INT_SETUP_BATCH_COUNTER_THOLD,
407 (srng->intr_batch_cntr_thres_entries *
410 ath11k_hif_write32(ab,
411 reg_base + HAL_REO1_RING_PRODUCER_INT_SETUP_OFFSET,
414 hp_addr = hal->rdp.paddr +
415 ((unsigned long)srng->u.dst_ring.hp_addr -
416 (unsigned long)hal->rdp.vaddr);
417 ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_HP_ADDR_LSB_OFFSET,
418 hp_addr & HAL_ADDR_LSB_REG_MASK);
419 ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_HP_ADDR_MSB_OFFSET,
420 hp_addr >> HAL_ADDR_MSB_REG_SHIFT);
422 /* Initialize head and tail pointers to indicate ring is empty */
423 reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2];
424 ath11k_hif_write32(ab, reg_base, 0);
425 ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_TP_OFFSET, 0);
426 *srng->u.dst_ring.hp_addr = 0;
428 reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
430 if (srng->flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP)
431 val |= HAL_REO1_RING_MISC_DATA_TLV_SWAP;
432 if (srng->flags & HAL_SRNG_FLAGS_RING_PTR_SWAP)
433 val |= HAL_REO1_RING_MISC_HOST_FW_SWAP;
434 if (srng->flags & HAL_SRNG_FLAGS_MSI_SWAP)
435 val |= HAL_REO1_RING_MISC_MSI_SWAP;
436 val |= HAL_REO1_RING_MISC_SRNG_ENABLE;
438 ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_MISC_OFFSET, val);
441 static void ath11k_hal_srng_src_hw_init(struct ath11k_base *ab,
442 struct hal_srng *srng)
444 struct ath11k_hal *hal = &ab->hal;
449 reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
451 if (srng->flags & HAL_SRNG_FLAGS_MSI_INTR) {
452 ath11k_hif_write32(ab, reg_base +
453 HAL_TCL1_RING_MSI1_BASE_LSB_OFFSET,
454 (u32)srng->msi_addr);
456 val = FIELD_PREP(HAL_TCL1_RING_MSI1_BASE_MSB_ADDR,
457 ((u64)srng->msi_addr >>
458 HAL_ADDR_MSB_REG_SHIFT)) |
459 HAL_TCL1_RING_MSI1_BASE_MSB_MSI1_ENABLE;
460 ath11k_hif_write32(ab, reg_base +
461 HAL_TCL1_RING_MSI1_BASE_MSB_OFFSET,
464 ath11k_hif_write32(ab, reg_base +
465 HAL_TCL1_RING_MSI1_DATA_OFFSET,
469 ath11k_hif_write32(ab, reg_base, (u32)srng->ring_base_paddr);
471 val = FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_BASE_ADDR_MSB,
472 ((u64)srng->ring_base_paddr >>
473 HAL_ADDR_MSB_REG_SHIFT)) |
474 FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_SIZE,
475 (srng->entry_size * srng->num_entries));
476 ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_BASE_MSB_OFFSET, val);
478 val = FIELD_PREP(HAL_REO1_RING_ID_ENTRY_SIZE, srng->entry_size);
479 ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_ID_OFFSET, val);
481 /* interrupt setup */
482 /* NOTE: IPQ8074 v2 requires the interrupt timer threshold in the
483 * unit of 8 usecs instead of 1 usec (as required by v1).
485 val = FIELD_PREP(HAL_TCL1_RING_CONSR_INT_SETUP_IX0_INTR_TMR_THOLD,
486 srng->intr_timer_thres_us);
488 val |= FIELD_PREP(HAL_TCL1_RING_CONSR_INT_SETUP_IX0_BATCH_COUNTER_THOLD,
489 (srng->intr_batch_cntr_thres_entries *
492 ath11k_hif_write32(ab,
493 reg_base + HAL_TCL1_RING_CONSR_INT_SETUP_IX0_OFFSET,
497 if (srng->flags & HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN) {
498 val |= FIELD_PREP(HAL_TCL1_RING_CONSR_INT_SETUP_IX1_LOW_THOLD,
499 srng->u.src_ring.low_threshold);
501 ath11k_hif_write32(ab,
502 reg_base + HAL_TCL1_RING_CONSR_INT_SETUP_IX1_OFFSET,
505 if (srng->ring_id != HAL_SRNG_RING_ID_WBM_IDLE_LINK) {
506 tp_addr = hal->rdp.paddr +
507 ((unsigned long)srng->u.src_ring.tp_addr -
508 (unsigned long)hal->rdp.vaddr);
509 ath11k_hif_write32(ab,
510 reg_base + HAL_TCL1_RING_TP_ADDR_LSB_OFFSET,
511 tp_addr & HAL_ADDR_LSB_REG_MASK);
512 ath11k_hif_write32(ab,
513 reg_base + HAL_TCL1_RING_TP_ADDR_MSB_OFFSET,
514 tp_addr >> HAL_ADDR_MSB_REG_SHIFT);
517 /* Initialize head and tail pointers to indicate ring is empty */
518 reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2];
519 ath11k_hif_write32(ab, reg_base, 0);
520 ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_TP_OFFSET, 0);
521 *srng->u.src_ring.tp_addr = 0;
523 reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
525 if (srng->flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP)
526 val |= HAL_TCL1_RING_MISC_DATA_TLV_SWAP;
527 if (srng->flags & HAL_SRNG_FLAGS_RING_PTR_SWAP)
528 val |= HAL_TCL1_RING_MISC_HOST_FW_SWAP;
529 if (srng->flags & HAL_SRNG_FLAGS_MSI_SWAP)
530 val |= HAL_TCL1_RING_MISC_MSI_SWAP;
532 /* Loop count is not used for SRC rings */
533 val |= HAL_TCL1_RING_MISC_MSI_LOOPCNT_DISABLE;
535 val |= HAL_TCL1_RING_MISC_SRNG_ENABLE;
537 ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_MISC_OFFSET, val);
540 static void ath11k_hal_srng_hw_init(struct ath11k_base *ab,
541 struct hal_srng *srng)
543 if (srng->ring_dir == HAL_SRNG_DIR_SRC)
544 ath11k_hal_srng_src_hw_init(ab, srng);
546 ath11k_hal_srng_dst_hw_init(ab, srng);
549 static int ath11k_hal_srng_get_ring_id(struct ath11k_base *ab,
550 enum hal_ring_type type,
551 int ring_num, int mac_id)
553 const struct hal_srng_config *srng_config = &hw_srng_config[type];
556 if (ring_num >= srng_config->max_rings) {
557 ath11k_warn(ab, "invalid ring number :%d\n", ring_num);
561 ring_id = srng_config->start_ring_id + ring_num;
562 if (srng_config->lmac_ring)
563 ring_id += mac_id * HAL_SRNG_RINGS_PER_LMAC;
565 if (WARN_ON(ring_id >= HAL_SRNG_RING_ID_MAX))
571 int ath11k_hal_srng_get_entrysize(u32 ring_type)
573 const struct hal_srng_config *srng_config;
575 if (WARN_ON(ring_type >= HAL_MAX_RING_TYPES))
578 srng_config = &hw_srng_config[ring_type];
580 return (srng_config->entry_size << 2);
583 int ath11k_hal_srng_get_max_entries(u32 ring_type)
585 const struct hal_srng_config *srng_config;
587 if (WARN_ON(ring_type >= HAL_MAX_RING_TYPES))
590 srng_config = &hw_srng_config[ring_type];
592 return (srng_config->max_size / srng_config->entry_size);
595 void ath11k_hal_srng_get_params(struct ath11k_base *ab, struct hal_srng *srng,
596 struct hal_srng_params *params)
598 params->ring_base_paddr = srng->ring_base_paddr;
599 params->ring_base_vaddr = srng->ring_base_vaddr;
600 params->num_entries = srng->num_entries;
601 params->intr_timer_thres_us = srng->intr_timer_thres_us;
602 params->intr_batch_cntr_thres_entries =
603 srng->intr_batch_cntr_thres_entries;
604 params->low_threshold = srng->u.src_ring.low_threshold;
605 params->flags = srng->flags;
608 dma_addr_t ath11k_hal_srng_get_hp_addr(struct ath11k_base *ab,
609 struct hal_srng *srng)
611 if (!(srng->flags & HAL_SRNG_FLAGS_LMAC_RING))
614 if (srng->ring_dir == HAL_SRNG_DIR_SRC)
615 return ab->hal.wrp.paddr +
616 ((unsigned long)srng->u.src_ring.hp_addr -
617 (unsigned long)ab->hal.wrp.vaddr);
619 return ab->hal.rdp.paddr +
620 ((unsigned long)srng->u.dst_ring.hp_addr -
621 (unsigned long)ab->hal.rdp.vaddr);
624 dma_addr_t ath11k_hal_srng_get_tp_addr(struct ath11k_base *ab,
625 struct hal_srng *srng)
627 if (!(srng->flags & HAL_SRNG_FLAGS_LMAC_RING))
630 if (srng->ring_dir == HAL_SRNG_DIR_SRC)
631 return ab->hal.rdp.paddr +
632 ((unsigned long)srng->u.src_ring.tp_addr -
633 (unsigned long)ab->hal.rdp.vaddr);
635 return ab->hal.wrp.paddr +
636 ((unsigned long)srng->u.dst_ring.tp_addr -
637 (unsigned long)ab->hal.wrp.vaddr);
640 u32 ath11k_hal_ce_get_desc_size(enum hal_ce_desc type)
643 case HAL_CE_DESC_SRC:
644 return sizeof(struct hal_ce_srng_src_desc);
645 case HAL_CE_DESC_DST:
646 return sizeof(struct hal_ce_srng_dest_desc);
647 case HAL_CE_DESC_DST_STATUS:
648 return sizeof(struct hal_ce_srng_dst_status_desc);
654 void ath11k_hal_ce_src_set_desc(void *buf, dma_addr_t paddr, u32 len, u32 id,
657 struct hal_ce_srng_src_desc *desc = (struct hal_ce_srng_src_desc *)buf;
659 desc->buffer_addr_low = paddr & HAL_ADDR_LSB_REG_MASK;
660 desc->buffer_addr_info =
661 FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_ADDR_HI,
662 ((u64)paddr >> HAL_ADDR_MSB_REG_SHIFT)) |
663 FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_BYTE_SWAP,
665 FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_GATHER, 0) |
666 FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_LEN, len);
667 desc->meta_info = FIELD_PREP(HAL_CE_SRC_DESC_META_INFO_DATA, id);
670 void ath11k_hal_ce_dst_set_desc(void *buf, dma_addr_t paddr)
672 struct hal_ce_srng_dest_desc *desc =
673 (struct hal_ce_srng_dest_desc *)buf;
675 desc->buffer_addr_low = paddr & HAL_ADDR_LSB_REG_MASK;
676 desc->buffer_addr_info =
677 FIELD_PREP(HAL_CE_DEST_DESC_ADDR_INFO_ADDR_HI,
678 ((u64)paddr >> HAL_ADDR_MSB_REG_SHIFT));
681 u32 ath11k_hal_ce_dst_status_get_length(void *buf)
683 struct hal_ce_srng_dst_status_desc *desc =
684 (struct hal_ce_srng_dst_status_desc *)buf;
687 len = FIELD_GET(HAL_CE_DST_STATUS_DESC_FLAGS_LEN, desc->flags);
688 desc->flags &= ~HAL_CE_DST_STATUS_DESC_FLAGS_LEN;
693 void ath11k_hal_set_link_desc_addr(struct hal_wbm_link_desc *desc, u32 cookie,
696 desc->buf_addr_info.info0 = FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
697 (paddr & HAL_ADDR_LSB_REG_MASK));
698 desc->buf_addr_info.info1 = FIELD_PREP(BUFFER_ADDR_INFO1_ADDR,
699 ((u64)paddr >> HAL_ADDR_MSB_REG_SHIFT)) |
700 FIELD_PREP(BUFFER_ADDR_INFO1_RET_BUF_MGR, 1) |
701 FIELD_PREP(BUFFER_ADDR_INFO1_SW_COOKIE, cookie);
704 u32 *ath11k_hal_srng_dst_peek(struct ath11k_base *ab, struct hal_srng *srng)
706 lockdep_assert_held(&srng->lock);
708 if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp)
709 return (srng->ring_base_vaddr + srng->u.dst_ring.tp);
714 u32 *ath11k_hal_srng_dst_get_next_entry(struct ath11k_base *ab,
715 struct hal_srng *srng)
719 lockdep_assert_held(&srng->lock);
721 if (srng->u.dst_ring.tp == srng->u.dst_ring.cached_hp)
724 desc = srng->ring_base_vaddr + srng->u.dst_ring.tp;
726 srng->u.dst_ring.tp = (srng->u.dst_ring.tp + srng->entry_size) %
732 int ath11k_hal_srng_dst_num_free(struct ath11k_base *ab, struct hal_srng *srng,
737 lockdep_assert_held(&srng->lock);
739 tp = srng->u.dst_ring.tp;
742 hp = *srng->u.dst_ring.hp_addr;
743 srng->u.dst_ring.cached_hp = hp;
745 hp = srng->u.dst_ring.cached_hp;
749 return (hp - tp) / srng->entry_size;
751 return (srng->ring_size - tp + hp) / srng->entry_size;
754 /* Returns number of available entries in src ring */
755 int ath11k_hal_srng_src_num_free(struct ath11k_base *ab, struct hal_srng *srng,
760 lockdep_assert_held(&srng->lock);
762 hp = srng->u.src_ring.hp;
765 tp = *srng->u.src_ring.tp_addr;
766 srng->u.src_ring.cached_tp = tp;
768 tp = srng->u.src_ring.cached_tp;
772 return ((tp - hp) / srng->entry_size) - 1;
774 return ((srng->ring_size - hp + tp) / srng->entry_size) - 1;
777 u32 *ath11k_hal_srng_src_get_next_entry(struct ath11k_base *ab,
778 struct hal_srng *srng)
783 lockdep_assert_held(&srng->lock);
785 /* TODO: Using % is expensive, but we have to do this since size of some
786 * SRNG rings is not power of 2 (due to descriptor sizes). Need to see
787 * if separate function is defined for rings having power of 2 ring size
788 * (TCL2SW, REO2SW, SW2RXDMA and CE rings) so that we can avoid the
789 * overhead of % by using mask (with &).
791 next_hp = (srng->u.src_ring.hp + srng->entry_size) % srng->ring_size;
793 if (next_hp == srng->u.src_ring.cached_tp)
796 desc = srng->ring_base_vaddr + srng->u.src_ring.hp;
797 srng->u.src_ring.hp = next_hp;
799 /* TODO: Reap functionality is not used by all rings. If particular
800 * ring does not use reap functionality, we need not update reap_hp
801 * with next_hp pointer. Need to make sure a separate function is used
802 * before doing any optimization by removing below code updating
805 srng->u.src_ring.reap_hp = next_hp;
810 u32 *ath11k_hal_srng_src_reap_next(struct ath11k_base *ab,
811 struct hal_srng *srng)
816 lockdep_assert_held(&srng->lock);
818 next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
821 if (next_reap_hp == srng->u.src_ring.cached_tp)
824 desc = srng->ring_base_vaddr + next_reap_hp;
825 srng->u.src_ring.reap_hp = next_reap_hp;
830 u32 *ath11k_hal_srng_src_get_next_reaped(struct ath11k_base *ab,
831 struct hal_srng *srng)
835 lockdep_assert_held(&srng->lock);
837 if (srng->u.src_ring.hp == srng->u.src_ring.reap_hp)
840 desc = srng->ring_base_vaddr + srng->u.src_ring.hp;
841 srng->u.src_ring.hp = (srng->u.src_ring.hp + srng->entry_size) %
847 u32 *ath11k_hal_srng_src_peek(struct ath11k_base *ab, struct hal_srng *srng)
849 lockdep_assert_held(&srng->lock);
851 if (((srng->u.src_ring.hp + srng->entry_size) % srng->ring_size) ==
852 srng->u.src_ring.cached_tp)
855 return srng->ring_base_vaddr + srng->u.src_ring.hp;
858 void ath11k_hal_srng_access_begin(struct ath11k_base *ab, struct hal_srng *srng)
860 lockdep_assert_held(&srng->lock);
862 if (srng->ring_dir == HAL_SRNG_DIR_SRC)
863 srng->u.src_ring.cached_tp =
864 *(volatile u32 *)srng->u.src_ring.tp_addr;
866 srng->u.dst_ring.cached_hp = *srng->u.dst_ring.hp_addr;
869 /* Update cached ring head/tail pointers to HW. ath11k_hal_srng_access_begin()
870 * should have been called before this.
872 void ath11k_hal_srng_access_end(struct ath11k_base *ab, struct hal_srng *srng)
874 lockdep_assert_held(&srng->lock);
876 /* TODO: See if we need a write memory barrier here */
877 if (srng->flags & HAL_SRNG_FLAGS_LMAC_RING) {
878 /* For LMAC rings, ring pointer updates are done through FW and
879 * hence written to a shared memory location that is read by FW
881 if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
882 srng->u.src_ring.last_tp =
883 *(volatile u32 *)srng->u.src_ring.tp_addr;
884 *srng->u.src_ring.hp_addr = srng->u.src_ring.hp;
886 srng->u.dst_ring.last_hp = *srng->u.dst_ring.hp_addr;
887 *srng->u.dst_ring.tp_addr = srng->u.dst_ring.tp;
890 if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
891 srng->u.src_ring.last_tp =
892 *(volatile u32 *)srng->u.src_ring.tp_addr;
893 ath11k_hif_write32(ab,
894 (unsigned long)srng->u.src_ring.hp_addr -
895 (unsigned long)ab->mem,
896 srng->u.src_ring.hp);
898 srng->u.dst_ring.last_hp = *srng->u.dst_ring.hp_addr;
899 ath11k_hif_write32(ab,
900 (unsigned long)srng->u.dst_ring.tp_addr -
901 (unsigned long)ab->mem,
902 srng->u.dst_ring.tp);
906 srng->timestamp = jiffies;
909 void ath11k_hal_setup_link_idle_list(struct ath11k_base *ab,
910 struct hal_wbm_idle_scatter_list *sbuf,
911 u32 nsbufs, u32 tot_link_desc,
914 struct ath11k_buffer_addr *link_addr;
916 u32 reg_scatter_buf_sz = HAL_WBM_IDLE_SCATTER_BUF_SIZE / 64;
918 link_addr = (void *)sbuf[0].vaddr + HAL_WBM_IDLE_SCATTER_BUF_SIZE;
920 for (i = 1; i < nsbufs; i++) {
921 link_addr->info0 = sbuf[i].paddr & HAL_ADDR_LSB_REG_MASK;
922 link_addr->info1 = FIELD_PREP(
923 HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32,
924 (u64)sbuf[i].paddr >> HAL_ADDR_MSB_REG_SHIFT) |
926 HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_MATCH_TAG,
927 BASE_ADDR_MATCH_TAG_VAL);
929 link_addr = (void *)sbuf[i].vaddr +
930 HAL_WBM_IDLE_SCATTER_BUF_SIZE;
933 ath11k_hif_write32(ab,
934 HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_R0_IDLE_LIST_CONTROL_ADDR,
935 FIELD_PREP(HAL_WBM_SCATTER_BUFFER_SIZE, reg_scatter_buf_sz) |
936 FIELD_PREP(HAL_WBM_LINK_DESC_IDLE_LIST_MODE, 0x1));
937 ath11k_hif_write32(ab,
938 HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_R0_IDLE_LIST_SIZE_ADDR,
939 FIELD_PREP(HAL_WBM_SCATTER_RING_SIZE_OF_IDLE_LINK_DESC_LIST,
940 reg_scatter_buf_sz * nsbufs));
941 ath11k_hif_write32(ab,
942 HAL_SEQ_WCSS_UMAC_WBM_REG +
943 HAL_WBM_SCATTERED_RING_BASE_LSB,
944 FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
945 sbuf[0].paddr & HAL_ADDR_LSB_REG_MASK));
946 ath11k_hif_write32(ab,
947 HAL_SEQ_WCSS_UMAC_WBM_REG +
948 HAL_WBM_SCATTERED_RING_BASE_MSB,
950 HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32,
951 (u64)sbuf[0].paddr >> HAL_ADDR_MSB_REG_SHIFT) |
953 HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_MATCH_TAG,
954 BASE_ADDR_MATCH_TAG_VAL));
956 /* Setup head and tail pointers for the idle list */
957 ath11k_hif_write32(ab,
958 HAL_SEQ_WCSS_UMAC_WBM_REG +
959 HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX0,
960 FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
961 sbuf[nsbufs - 1].paddr));
962 ath11k_hif_write32(ab,
963 HAL_SEQ_WCSS_UMAC_WBM_REG +
964 HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX1,
966 HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32,
967 ((u64)sbuf[nsbufs - 1].paddr >>
968 HAL_ADDR_MSB_REG_SHIFT)) |
969 FIELD_PREP(HAL_WBM_SCATTERED_DESC_HEAD_P_OFFSET_IX1,
971 ath11k_hif_write32(ab,
972 HAL_SEQ_WCSS_UMAC_WBM_REG +
973 HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX0,
974 FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
977 ath11k_hif_write32(ab,
978 HAL_SEQ_WCSS_UMAC_WBM_REG +
979 HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX0,
980 FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
982 ath11k_hif_write32(ab,
983 HAL_SEQ_WCSS_UMAC_WBM_REG +
984 HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX1,
986 HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32,
987 ((u64)sbuf[0].paddr >> HAL_ADDR_MSB_REG_SHIFT)) |
988 FIELD_PREP(HAL_WBM_SCATTERED_DESC_TAIL_P_OFFSET_IX1,
990 ath11k_hif_write32(ab,
991 HAL_SEQ_WCSS_UMAC_WBM_REG +
992 HAL_WBM_SCATTERED_DESC_PTR_HP_ADDR,
995 /* Enable the SRNG */
996 ath11k_hif_write32(ab,
997 HAL_SEQ_WCSS_UMAC_WBM_REG +
998 HAL_WBM_IDLE_LINK_RING_MISC_ADDR, 0x40);
1001 int ath11k_hal_srng_setup(struct ath11k_base *ab, enum hal_ring_type type,
1002 int ring_num, int mac_id,
1003 struct hal_srng_params *params)
1005 struct ath11k_hal *hal = &ab->hal;
1006 const struct hal_srng_config *srng_config = &hw_srng_config[type];
1007 struct hal_srng *srng;
1013 ring_id = ath11k_hal_srng_get_ring_id(ab, type, ring_num, mac_id);
1017 srng = &hal->srng_list[ring_id];
1019 srng->ring_id = ring_id;
1020 srng->ring_dir = srng_config->ring_dir;
1021 srng->ring_base_paddr = params->ring_base_paddr;
1022 srng->ring_base_vaddr = params->ring_base_vaddr;
1023 srng->entry_size = srng_config->entry_size;
1024 srng->num_entries = params->num_entries;
1025 srng->ring_size = srng->entry_size * srng->num_entries;
1026 srng->intr_batch_cntr_thres_entries =
1027 params->intr_batch_cntr_thres_entries;
1028 srng->intr_timer_thres_us = params->intr_timer_thres_us;
1029 srng->flags = params->flags;
1030 srng->initialized = 1;
1031 spin_lock_init(&srng->lock);
1033 for (i = 0; i < HAL_SRNG_NUM_REG_GRP; i++) {
1034 srng->hwreg_base[i] = srng_config->reg_start[i] +
1035 (ring_num * srng_config->reg_size[i]);
1038 memset(srng->ring_base_vaddr, 0,
1039 (srng->entry_size * srng->num_entries) << 2);
1041 /* TODO: Add comments on these swap configurations */
1042 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
1043 srng->flags |= HAL_SRNG_FLAGS_MSI_SWAP | HAL_SRNG_FLAGS_DATA_TLV_SWAP |
1044 HAL_SRNG_FLAGS_RING_PTR_SWAP;
1046 reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2];
1048 if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
1049 srng->u.src_ring.hp = 0;
1050 srng->u.src_ring.cached_tp = 0;
1051 srng->u.src_ring.reap_hp = srng->ring_size - srng->entry_size;
1052 srng->u.src_ring.tp_addr = (void *)(hal->rdp.vaddr + ring_id);
1053 srng->u.src_ring.low_threshold = params->low_threshold *
1055 if (srng_config->lmac_ring) {
1056 lmac_idx = ring_id - HAL_SRNG_RING_ID_LMAC1_ID_START;
1057 srng->u.src_ring.hp_addr = (void *)(hal->wrp.vaddr +
1059 srng->flags |= HAL_SRNG_FLAGS_LMAC_RING;
1061 srng->u.src_ring.hp_addr =
1062 (u32 *)((unsigned long)ab->mem + reg_base);
1065 /* During initialization loop count in all the descriptors
1066 * will be set to zero, and HW will set it to 1 on completing
1067 * descriptor update in first loop, and increments it by 1 on
1068 * subsequent loops (loop count wraps around after reaching
1069 * 0xffff). The 'loop_cnt' in SW ring state is the expected
1070 * loop count in descriptors updated by HW (to be processed
1073 srng->u.dst_ring.loop_cnt = 1;
1074 srng->u.dst_ring.tp = 0;
1075 srng->u.dst_ring.cached_hp = 0;
1076 srng->u.dst_ring.hp_addr = (void *)(hal->rdp.vaddr + ring_id);
1077 if (srng_config->lmac_ring) {
1078 /* For LMAC rings, tail pointer updates will be done
1079 * through FW by writing to a shared memory location
1081 lmac_idx = ring_id - HAL_SRNG_RING_ID_LMAC1_ID_START;
1082 srng->u.dst_ring.tp_addr = (void *)(hal->wrp.vaddr +
1084 srng->flags |= HAL_SRNG_FLAGS_LMAC_RING;
1086 srng->u.dst_ring.tp_addr =
1087 (u32 *)((unsigned long)ab->mem + reg_base +
1088 (HAL_REO1_RING_TP - HAL_REO1_RING_HP));
1092 if (srng_config->lmac_ring)
1095 ath11k_hal_srng_hw_init(ab, srng);
1097 if (type == HAL_CE_DST) {
1098 srng->u.dst_ring.max_buffer_length = params->max_buffer_len;
1099 ath11k_hal_ce_dst_setup(ab, srng, ring_num);
1105 int ath11k_hal_srng_init(struct ath11k_base *ab)
1107 struct ath11k_hal *hal = &ab->hal;
1110 memset(hal, 0, sizeof(*hal));
1112 hal->srng_config = hw_srng_config;
1114 ret = ath11k_hal_alloc_cont_rdp(ab);
1118 ret = ath11k_hal_alloc_cont_wrp(ab);
1120 goto err_free_cont_rdp;
1125 ath11k_hal_free_cont_rdp(ab);
1131 void ath11k_hal_srng_deinit(struct ath11k_base *ab)
1133 ath11k_hal_free_cont_rdp(ab);
1134 ath11k_hal_free_cont_wrp(ab);
1137 void ath11k_hal_dump_srng_stats(struct ath11k_base *ab)
1139 struct hal_srng *srng;
1140 struct ath11k_ext_irq_grp *irq_grp;
1141 struct ath11k_ce_pipe *ce_pipe;
1144 ath11k_err(ab, "Last interrupt received for each CE:\n");
1145 for (i = 0; i < CE_COUNT; i++) {
1146 ce_pipe = &ab->ce.ce_pipe[i];
1148 if (ath11k_ce_get_attr_flags(i) & CE_ATTR_DIS_INTR)
1151 ath11k_err(ab, "CE_id %d pipe_num %d %ums before\n",
1152 i, ce_pipe->pipe_num,
1153 jiffies_to_msecs(jiffies - ce_pipe->timestamp));
1156 ath11k_err(ab, "\nLast interrupt received for each group:\n");
1157 for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
1158 irq_grp = &ab->ext_irq_grp[i];
1159 ath11k_err(ab, "group_id %d %ums before\n",
1161 jiffies_to_msecs(jiffies - irq_grp->timestamp));
1164 for (i = 0; i < HAL_SRNG_RING_ID_MAX; i++) {
1165 srng = &ab->hal.srng_list[i];
1167 if (!srng->initialized)
1170 if (srng->ring_dir == HAL_SRNG_DIR_SRC)
1172 "src srng id %u hp %u, reap_hp %u, cur tp %u, cached tp %u last tp %u napi processed before %ums\n",
1173 srng->ring_id, srng->u.src_ring.hp,
1174 srng->u.src_ring.reap_hp,
1175 *srng->u.src_ring.tp_addr, srng->u.src_ring.cached_tp,
1176 srng->u.src_ring.last_tp,
1177 jiffies_to_msecs(jiffies - srng->timestamp));
1178 else if (srng->ring_dir == HAL_SRNG_DIR_DST)
1180 "dst srng id %u tp %u, cur hp %u, cached hp %u last hp %u napi processed before %ums\n",
1181 srng->ring_id, srng->u.dst_ring.tp,
1182 *srng->u.dst_ring.hp_addr,
1183 srng->u.dst_ring.cached_hp,
1184 srng->u.dst_ring.last_hp,
1185 jiffies_to_msecs(jiffies - srng->timestamp));