2 * CAAM/SEC 4.x QI transport/backend driver
3 * Queue Interface backend functionality
5 * Copyright 2013-2016 Freescale Semiconductor, Inc.
6 * Copyright 2016-2017 NXP
9 #include <linux/cpumask.h>
10 #include <linux/kthread.h>
11 #include <soc/fsl/qman.h>
17 #include "desc_constr.h"
19 #define PREHDR_RSLS_SHIFT 31
22 * Use a reasonable backlog of frames (per CPU) as congestion threshold,
23 * so that resources used by the in-flight buffers do not become a memory hog.
25 #define MAX_RSP_FQ_BACKLOG_PER_CPU 256
27 #define CAAM_QI_ENQUEUE_RETRIES 10000
29 #define CAAM_NAPI_WEIGHT 63
32 * caam_napi - struct holding CAAM NAPI-related params
33 * @irqtask: IRQ task for QI backend
37 struct napi_struct irqtask;
38 struct qman_portal *p;
42 * caam_qi_pcpu_priv - percpu private data structure to main list of pending
43 * responses expected on each cpu.
44 * @caam_napi: CAAM NAPI params
45 * @net_dev: netdev used by NAPI
46 * @rsp_fq: response FQ from CAAM
48 struct caam_qi_pcpu_priv {
49 struct caam_napi caam_napi;
50 struct net_device net_dev;
51 struct qman_fq *rsp_fq;
52 } ____cacheline_aligned;
54 static DEFINE_PER_CPU(struct caam_qi_pcpu_priv, pcpu_qipriv);
55 static DEFINE_PER_CPU(int, last_cpu);
58 * caam_qi_priv - CAAM QI backend private params
59 * @cgr: QMan congestion group
60 * @qi_pdev: platform device for QI backend
64 struct platform_device *qi_pdev;
67 static struct caam_qi_priv qipriv ____cacheline_aligned;
70 * This is written by only one core - the one that initialized the CGR - and
71 * read by multiple cores (all the others).
73 bool caam_congested __read_mostly;
74 EXPORT_SYMBOL(caam_congested);
76 #ifdef CONFIG_DEBUG_FS
78 * This is a counter for the number of times the congestion group (where all
79 * the request and response queueus are) reached congestion. Incremented
80 * each time the congestion callback is called with congested == true.
82 static u64 times_congested;
86 * CPU from where the module initialised. This is required because QMan driver
87 * requires CGRs to be removed from same CPU from where they were originally
90 static int mod_init_cpu;
93 * This is a a cache of buffers, from which the users of CAAM QI driver
94 * can allocate short (CAAM_QI_MEMCACHE_SIZE) buffers. It's faster than
95 * doing malloc on the hotpath.
96 * NOTE: A more elegant solution would be to have some headroom in the frames
97 * being processed. This could be added by the dpaa-ethernet driver.
98 * This would pose a problem for userspace application processing which
99 * cannot know of this limitation. So for now, this will work.
100 * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here
102 static struct kmem_cache *qi_cache;
104 int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req)
112 qm_fd_set_compound(&fd, qm_sg_entry_get_len(&req->fd_sgt[1]));
114 addr = dma_map_single(qidev, req->fd_sgt, sizeof(req->fd_sgt),
116 if (dma_mapping_error(qidev, addr)) {
117 dev_err(qidev, "DMA mapping error for QI enqueue request\n");
120 qm_fd_addr_set64(&fd, addr);
123 ret = qman_enqueue(req->drv_ctx->req_fq, &fd);
130 } while (num_retries < CAAM_QI_ENQUEUE_RETRIES);
132 dev_err(qidev, "qman_enqueue failed: %d\n", ret);
136 EXPORT_SYMBOL(caam_qi_enqueue);
138 static void caam_fq_ern_cb(struct qman_portal *qm, struct qman_fq *fq,
139 const union qm_mr_entry *msg)
141 const struct qm_fd *fd;
142 struct caam_drv_req *drv_req;
143 struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
147 if (qm_fd_get_format(fd) != qm_fd_compound) {
148 dev_err(qidev, "Non-compound FD from CAAM\n");
152 drv_req = (struct caam_drv_req *)phys_to_virt(qm_fd_addr_get64(fd));
155 "Can't find original request for CAAM response\n");
159 dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd),
160 sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL);
162 drv_req->cbk(drv_req, -EIO);
165 static struct qman_fq *create_caam_req_fq(struct device *qidev,
166 struct qman_fq *rsp_fq,
171 struct qman_fq *req_fq;
172 struct qm_mcc_initfq opts;
174 req_fq = kzalloc(sizeof(*req_fq), GFP_ATOMIC);
176 return ERR_PTR(-ENOMEM);
178 req_fq->cb.ern = caam_fq_ern_cb;
179 req_fq->cb.fqs = NULL;
181 ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID |
182 QMAN_FQ_FLAG_TO_DCPORTAL, req_fq);
184 dev_err(qidev, "Failed to create session req FQ\n");
185 goto create_req_fq_fail;
188 memset(&opts, 0, sizeof(opts));
189 opts.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
190 QM_INITFQ_WE_CONTEXTB |
191 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CGID);
192 opts.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_CPCSTASH | QM_FQCTRL_CGE);
193 qm_fqd_set_destwq(&opts.fqd, qm_channel_caam, 2);
194 opts.fqd.context_b = cpu_to_be32(qman_fq_fqid(rsp_fq));
195 qm_fqd_context_a_set64(&opts.fqd, hwdesc);
196 opts.fqd.cgid = qipriv.cgr.cgrid;
198 ret = qman_init_fq(req_fq, fq_sched_flag, &opts);
200 dev_err(qidev, "Failed to init session req FQ\n");
201 goto init_req_fq_fail;
204 dev_dbg(qidev, "Allocated request FQ %u for CPU %u\n", req_fq->fqid,
209 qman_destroy_fq(req_fq);
215 static int empty_retired_fq(struct device *qidev, struct qman_fq *fq)
219 ret = qman_volatile_dequeue(fq, QMAN_VOLATILE_FLAG_WAIT_INT |
220 QMAN_VOLATILE_FLAG_FINISH,
221 QM_VDQCR_PRECEDENCE_VDQCR |
222 QM_VDQCR_NUMFRAMES_TILLEMPTY);
224 dev_err(qidev, "Volatile dequeue fail for FQ: %u\n", fq->fqid);
229 struct qman_portal *p;
231 p = qman_get_affine_portal(smp_processor_id());
232 qman_p_poll_dqrr(p, 16);
233 } while (fq->flags & QMAN_FQ_STATE_NE);
238 static int kill_fq(struct device *qidev, struct qman_fq *fq)
243 ret = qman_retire_fq(fq, &flags);
245 dev_err(qidev, "qman_retire_fq failed: %d\n", ret);
252 /* Async FQ retirement condition */
254 /* Retry till FQ gets in retired state */
257 } while (fq->state != qman_fq_state_retired);
259 WARN_ON(fq->flags & QMAN_FQ_STATE_BLOCKOOS);
260 WARN_ON(fq->flags & QMAN_FQ_STATE_ORL);
264 if (fq->flags & QMAN_FQ_STATE_NE) {
265 ret = empty_retired_fq(qidev, fq);
267 dev_err(qidev, "empty_retired_fq fail for FQ: %u\n",
273 ret = qman_oos_fq(fq);
275 dev_err(qidev, "OOS of FQID: %u failed\n", fq->fqid);
283 static int empty_caam_fq(struct qman_fq *fq)
286 struct qm_mcr_queryfq_np np;
288 /* Wait till the older CAAM FQ get empty */
290 ret = qman_query_fq_np(fq, &np);
294 if (!qm_mcr_np_get(&np, frm_cnt))
301 * Give extra time for pending jobs from this FQ in holding tanks
308 int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc)
312 struct qman_fq *new_fq, *old_fq;
313 struct device *qidev = drv_ctx->qidev;
315 num_words = desc_len(sh_desc);
316 if (num_words > MAX_SDLEN) {
317 dev_err(qidev, "Invalid descriptor len: %d words\n", num_words);
321 /* Note down older req FQ */
322 old_fq = drv_ctx->req_fq;
324 /* Create a new req FQ in parked state */
325 new_fq = create_caam_req_fq(drv_ctx->qidev, drv_ctx->rsp_fq,
326 drv_ctx->context_a, 0);
327 if (unlikely(IS_ERR_OR_NULL(new_fq))) {
328 dev_err(qidev, "FQ allocation for shdesc update failed\n");
329 return PTR_ERR(new_fq);
332 /* Hook up new FQ to context so that new requests keep queuing */
333 drv_ctx->req_fq = new_fq;
335 /* Empty and remove the older FQ */
336 ret = empty_caam_fq(old_fq);
338 dev_err(qidev, "Old CAAM FQ empty failed: %d\n", ret);
340 /* We can revert to older FQ */
341 drv_ctx->req_fq = old_fq;
343 if (kill_fq(qidev, new_fq))
344 dev_warn(qidev, "New CAAM FQ kill failed\n");
350 * Re-initialise pre-header. Set RSLS and SDLEN.
351 * Update the shared descriptor for driver context.
353 drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) |
355 memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc));
356 dma_sync_single_for_device(qidev, drv_ctx->context_a,
357 sizeof(drv_ctx->sh_desc) +
358 sizeof(drv_ctx->prehdr),
361 /* Put the new FQ in scheduled state */
362 ret = qman_schedule_fq(new_fq);
364 dev_err(qidev, "Fail to sched new CAAM FQ, ecode = %d\n", ret);
367 * We can kill new FQ and revert to old FQ.
368 * Since the desc is already modified, it is success case
371 drv_ctx->req_fq = old_fq;
373 if (kill_fq(qidev, new_fq))
374 dev_warn(qidev, "New CAAM FQ kill failed\n");
375 } else if (kill_fq(qidev, old_fq)) {
376 dev_warn(qidev, "Old CAAM FQ kill failed\n");
381 EXPORT_SYMBOL(caam_drv_ctx_update);
383 struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev,
390 struct caam_drv_ctx *drv_ctx;
391 const cpumask_t *cpus = qman_affine_cpus();
393 num_words = desc_len(sh_desc);
394 if (num_words > MAX_SDLEN) {
395 dev_err(qidev, "Invalid descriptor len: %d words\n",
397 return ERR_PTR(-EINVAL);
400 drv_ctx = kzalloc(sizeof(*drv_ctx), GFP_ATOMIC);
402 return ERR_PTR(-ENOMEM);
405 * Initialise pre-header - set RSLS and SDLEN - and shared descriptor
408 drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) |
410 memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc));
411 size = sizeof(drv_ctx->prehdr) + sizeof(drv_ctx->sh_desc);
412 hwdesc = dma_map_single(qidev, drv_ctx->prehdr, size,
414 if (dma_mapping_error(qidev, hwdesc)) {
415 dev_err(qidev, "DMA map error for preheader + shdesc\n");
417 return ERR_PTR(-ENOMEM);
419 drv_ctx->context_a = hwdesc;
421 /* If given CPU does not own the portal, choose another one that does */
422 if (!cpumask_test_cpu(*cpu, cpus)) {
423 int *pcpu = &get_cpu_var(last_cpu);
425 *pcpu = cpumask_next(*pcpu, cpus);
426 if (*pcpu >= nr_cpu_ids)
427 *pcpu = cpumask_first(cpus);
430 put_cpu_var(last_cpu);
434 /* Find response FQ hooked with this CPU */
435 drv_ctx->rsp_fq = per_cpu(pcpu_qipriv.rsp_fq, drv_ctx->cpu);
437 /* Attach request FQ */
438 drv_ctx->req_fq = create_caam_req_fq(qidev, drv_ctx->rsp_fq, hwdesc,
439 QMAN_INITFQ_FLAG_SCHED);
440 if (unlikely(IS_ERR_OR_NULL(drv_ctx->req_fq))) {
441 dev_err(qidev, "create_caam_req_fq failed\n");
442 dma_unmap_single(qidev, hwdesc, size, DMA_BIDIRECTIONAL);
444 return ERR_PTR(-ENOMEM);
447 drv_ctx->qidev = qidev;
450 EXPORT_SYMBOL(caam_drv_ctx_init);
452 void *qi_cache_alloc(gfp_t flags)
454 return kmem_cache_alloc(qi_cache, flags);
456 EXPORT_SYMBOL(qi_cache_alloc);
458 void qi_cache_free(void *obj)
460 kmem_cache_free(qi_cache, obj);
462 EXPORT_SYMBOL(qi_cache_free);
464 static int caam_qi_poll(struct napi_struct *napi, int budget)
466 struct caam_napi *np = container_of(napi, struct caam_napi, irqtask);
468 int cleaned = qman_p_poll_dqrr(np->p, budget);
470 if (cleaned < budget) {
472 qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
478 void caam_drv_ctx_rel(struct caam_drv_ctx *drv_ctx)
480 if (IS_ERR_OR_NULL(drv_ctx))
483 /* Remove request FQ */
484 if (kill_fq(drv_ctx->qidev, drv_ctx->req_fq))
485 dev_err(drv_ctx->qidev, "Crypto session req FQ kill failed\n");
487 dma_unmap_single(drv_ctx->qidev, drv_ctx->context_a,
488 sizeof(drv_ctx->sh_desc) + sizeof(drv_ctx->prehdr),
492 EXPORT_SYMBOL(caam_drv_ctx_rel);
494 int caam_qi_shutdown(struct device *qidev)
497 struct caam_qi_priv *priv = dev_get_drvdata(qidev);
498 const cpumask_t *cpus = qman_affine_cpus();
499 struct cpumask old_cpumask = current->cpus_allowed;
501 for_each_cpu(i, cpus) {
502 struct napi_struct *irqtask;
504 irqtask = &per_cpu_ptr(&pcpu_qipriv.caam_napi, i)->irqtask;
505 napi_disable(irqtask);
506 netif_napi_del(irqtask);
508 if (kill_fq(qidev, per_cpu(pcpu_qipriv.rsp_fq, i)))
509 dev_err(qidev, "Rsp FQ kill failed, cpu: %d\n", i);
513 * QMan driver requires CGRs to be deleted from same CPU from where they
514 * were instantiated. Hence we get the module removal execute from the
515 * same CPU from where it was originally inserted.
517 set_cpus_allowed_ptr(current, get_cpu_mask(mod_init_cpu));
519 ret = qman_delete_cgr(&priv->cgr);
521 dev_err(qidev, "Deletion of CGR failed: %d\n", ret);
523 qman_release_cgrid(priv->cgr.cgrid);
525 kmem_cache_destroy(qi_cache);
527 /* Now that we're done with the CGRs, restore the cpus allowed mask */
528 set_cpus_allowed_ptr(current, &old_cpumask);
530 platform_device_unregister(priv->qi_pdev);
534 static void cgr_cb(struct qman_portal *qm, struct qman_cgr *cgr, int congested)
536 caam_congested = congested;
539 #ifdef CONFIG_DEBUG_FS
542 pr_debug_ratelimited("CAAM entered congestion\n");
545 pr_debug_ratelimited("CAAM exited congestion\n");
549 static int caam_qi_napi_schedule(struct qman_portal *p, struct caam_napi *np)
552 * In case of threaded ISR, for RT kernels in_irq() does not return
553 * appropriate value, so use in_serving_softirq to distinguish between
554 * softirq and irq contexts.
556 if (unlikely(in_irq() || !in_serving_softirq())) {
557 /* Disable QMan IRQ source and invoke NAPI */
558 qman_p_irqsource_remove(p, QM_PIRQ_DQRI);
560 napi_schedule(&np->irqtask);
566 static enum qman_cb_dqrr_result caam_rsp_fq_dqrr_cb(struct qman_portal *p,
567 struct qman_fq *rsp_fq,
568 const struct qm_dqrr_entry *dqrr)
570 struct caam_napi *caam_napi = raw_cpu_ptr(&pcpu_qipriv.caam_napi);
571 struct caam_drv_req *drv_req;
572 const struct qm_fd *fd;
573 struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
576 if (caam_qi_napi_schedule(p, caam_napi))
577 return qman_cb_dqrr_stop;
580 status = be32_to_cpu(fd->status);
581 if (unlikely(status))
582 dev_err(qidev, "Error: %#x in CAAM response FD\n", status);
584 if (unlikely(qm_fd_get_format(fd) != qm_fd_compound)) {
585 dev_err(qidev, "Non-compound FD from CAAM\n");
586 return qman_cb_dqrr_consume;
589 drv_req = (struct caam_drv_req *)phys_to_virt(qm_fd_addr_get64(fd));
590 if (unlikely(!drv_req)) {
592 "Can't find original request for caam response\n");
593 return qman_cb_dqrr_consume;
596 dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd),
597 sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL);
599 drv_req->cbk(drv_req, status);
600 return qman_cb_dqrr_consume;
603 static int alloc_rsp_fq_cpu(struct device *qidev, unsigned int cpu)
605 struct qm_mcc_initfq opts;
609 fq = kzalloc(sizeof(*fq), GFP_KERNEL | GFP_DMA);
613 fq->cb.dqrr = caam_rsp_fq_dqrr_cb;
615 ret = qman_create_fq(0, QMAN_FQ_FLAG_NO_ENQUEUE |
616 QMAN_FQ_FLAG_DYNAMIC_FQID, fq);
618 dev_err(qidev, "Rsp FQ create failed\n");
623 memset(&opts, 0, sizeof(opts));
624 opts.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
625 QM_INITFQ_WE_CONTEXTB |
626 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CGID);
627 opts.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_CTXASTASHING |
628 QM_FQCTRL_CPCSTASH | QM_FQCTRL_CGE);
629 qm_fqd_set_destwq(&opts.fqd, qman_affine_channel(cpu), 3);
630 opts.fqd.cgid = qipriv.cgr.cgrid;
631 opts.fqd.context_a.stashing.exclusive = QM_STASHING_EXCL_CTX |
632 QM_STASHING_EXCL_DATA;
633 qm_fqd_set_stashing(&opts.fqd, 0, 1, 1);
635 ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts);
637 dev_err(qidev, "Rsp FQ init failed\n");
642 per_cpu(pcpu_qipriv.rsp_fq, cpu) = fq;
644 dev_dbg(qidev, "Allocated response FQ %u for CPU %u", fq->fqid, cpu);
648 static int init_cgr(struct device *qidev)
651 struct qm_mcc_initcgr opts;
652 const u64 cpus = *(u64 *)qman_affine_cpus();
653 const int num_cpus = hweight64(cpus);
654 const u64 val = num_cpus * MAX_RSP_FQ_BACKLOG_PER_CPU;
656 ret = qman_alloc_cgrid(&qipriv.cgr.cgrid);
658 dev_err(qidev, "CGR alloc failed for rsp FQs: %d\n", ret);
662 qipriv.cgr.cb = cgr_cb;
663 memset(&opts, 0, sizeof(opts));
664 opts.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES |
666 opts.cgr.cscn_en = QM_CGR_EN;
667 opts.cgr.mode = QMAN_CGR_MODE_FRAME;
668 qm_cgr_cs_thres_set64(&opts.cgr.cs_thres, val, 1);
670 ret = qman_create_cgr(&qipriv.cgr, QMAN_CGR_FLAG_USE_INIT, &opts);
672 dev_err(qidev, "Error %d creating CAAM CGRID: %u\n", ret,
677 dev_dbg(qidev, "Congestion threshold set to %llu\n", val);
681 static int alloc_rsp_fqs(struct device *qidev)
684 const cpumask_t *cpus = qman_affine_cpus();
686 /*Now create response FQs*/
687 for_each_cpu(i, cpus) {
688 ret = alloc_rsp_fq_cpu(qidev, i);
690 dev_err(qidev, "CAAM rsp FQ alloc failed, cpu: %u", i);
698 static void free_rsp_fqs(void)
701 const cpumask_t *cpus = qman_affine_cpus();
703 for_each_cpu(i, cpus)
704 kfree(per_cpu(pcpu_qipriv.rsp_fq, i));
707 int caam_qi_init(struct platform_device *caam_pdev)
710 struct platform_device *qi_pdev;
711 struct device *ctrldev = &caam_pdev->dev, *qidev;
712 struct caam_drv_private *ctrlpriv;
713 const cpumask_t *cpus = qman_affine_cpus();
714 struct cpumask old_cpumask = current->cpus_allowed;
715 static struct platform_device_info qi_pdev_info = {
717 .id = PLATFORM_DEVID_NONE
721 * QMAN requires CGRs to be removed from same CPU+portal from where it
722 * was originally allocated. Hence we need to note down the
723 * initialisation CPU and use the same CPU for module exit.
724 * We select the first CPU to from the list of portal owning CPUs.
725 * Then we pin module init to this CPU.
727 mod_init_cpu = cpumask_first(cpus);
728 set_cpus_allowed_ptr(current, get_cpu_mask(mod_init_cpu));
730 qi_pdev_info.parent = ctrldev;
731 qi_pdev_info.dma_mask = dma_get_mask(ctrldev);
732 qi_pdev = platform_device_register_full(&qi_pdev_info);
734 return PTR_ERR(qi_pdev);
735 set_dma_ops(&qi_pdev->dev, get_dma_ops(ctrldev));
737 ctrlpriv = dev_get_drvdata(ctrldev);
738 qidev = &qi_pdev->dev;
740 qipriv.qi_pdev = qi_pdev;
741 dev_set_drvdata(qidev, &qipriv);
743 /* Initialize the congestion detection */
744 err = init_cgr(qidev);
746 dev_err(qidev, "CGR initialization failed: %d\n", err);
747 platform_device_unregister(qi_pdev);
751 /* Initialise response FQs */
752 err = alloc_rsp_fqs(qidev);
754 dev_err(qidev, "Can't allocate CAAM response FQs: %d\n", err);
756 platform_device_unregister(qi_pdev);
761 * Enable the NAPI contexts on each of the core which has an affine
764 for_each_cpu(i, cpus) {
765 struct caam_qi_pcpu_priv *priv = per_cpu_ptr(&pcpu_qipriv, i);
766 struct caam_napi *caam_napi = &priv->caam_napi;
767 struct napi_struct *irqtask = &caam_napi->irqtask;
768 struct net_device *net_dev = &priv->net_dev;
770 net_dev->dev = *qidev;
771 INIT_LIST_HEAD(&net_dev->napi_list);
773 netif_napi_add(net_dev, irqtask, caam_qi_poll,
776 napi_enable(irqtask);
779 /* Hook up QI device to parent controlling caam device */
780 ctrlpriv->qidev = qidev;
782 qi_cache = kmem_cache_create("caamqicache", CAAM_QI_MEMCACHE_SIZE, 0,
783 SLAB_CACHE_DMA, NULL);
785 dev_err(qidev, "Can't allocate CAAM cache\n");
787 platform_device_unregister(qi_pdev);
791 /* Done with the CGRs; restore the cpus allowed mask */
792 set_cpus_allowed_ptr(current, &old_cpumask);
793 #ifdef CONFIG_DEBUG_FS
794 debugfs_create_file("qi_congested", 0444, ctrlpriv->ctl,
795 ×_congested, &caam_fops_u64_ro);
797 dev_info(qidev, "Linux CAAM Queue I/F driver initialised\n");