Merge tag 'trace-v6.9-2' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux...
[sfrench/cifs-2.6.git] / drivers / ufs / core / ufs-mcq.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2022 Qualcomm Innovation Center. All rights reserved.
4  *
5  * Authors:
6  *      Asutosh Das <quic_asutoshd@quicinc.com>
7  *      Can Guo <quic_cang@quicinc.com>
8  */
9
10 #include <asm/unaligned.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/module.h>
13 #include <linux/platform_device.h>
14 #include "ufshcd-priv.h"
15 #include <linux/delay.h>
16 #include <scsi/scsi_cmnd.h>
17 #include <linux/bitfield.h>
18 #include <linux/iopoll.h>
19
20 #define MAX_QUEUE_SUP GENMASK(7, 0)
21 #define UFS_MCQ_MIN_RW_QUEUES 2
22 #define UFS_MCQ_MIN_READ_QUEUES 0
23 #define UFS_MCQ_MIN_POLL_QUEUES 0
24 #define QUEUE_EN_OFFSET 31
25 #define QUEUE_ID_OFFSET 16
26
27 #define MCQ_CFG_MAC_MASK        GENMASK(16, 8)
28 #define MCQ_QCFG_SIZE           0x40
29 #define MCQ_ENTRY_SIZE_IN_DWORD 8
30 #define CQE_UCD_BA GENMASK_ULL(63, 7)
31
32 /* Max mcq register polling time in microseconds */
33 #define MCQ_POLL_US 500000
34
35 static int rw_queue_count_set(const char *val, const struct kernel_param *kp)
36 {
37         return param_set_uint_minmax(val, kp, UFS_MCQ_MIN_RW_QUEUES,
38                                      num_possible_cpus());
39 }
40
41 static const struct kernel_param_ops rw_queue_count_ops = {
42         .set = rw_queue_count_set,
43         .get = param_get_uint,
44 };
45
46 static unsigned int rw_queues;
47 module_param_cb(rw_queues, &rw_queue_count_ops, &rw_queues, 0644);
48 MODULE_PARM_DESC(rw_queues,
49                  "Number of interrupt driven I/O queues used for rw. Default value is nr_cpus");
50
51 static int read_queue_count_set(const char *val, const struct kernel_param *kp)
52 {
53         return param_set_uint_minmax(val, kp, UFS_MCQ_MIN_READ_QUEUES,
54                                      num_possible_cpus());
55 }
56
57 static const struct kernel_param_ops read_queue_count_ops = {
58         .set = read_queue_count_set,
59         .get = param_get_uint,
60 };
61
62 static unsigned int read_queues;
63 module_param_cb(read_queues, &read_queue_count_ops, &read_queues, 0644);
64 MODULE_PARM_DESC(read_queues,
65                  "Number of interrupt driven read queues used for read. Default value is 0");
66
67 static int poll_queue_count_set(const char *val, const struct kernel_param *kp)
68 {
69         return param_set_uint_minmax(val, kp, UFS_MCQ_MIN_POLL_QUEUES,
70                                      num_possible_cpus());
71 }
72
73 static const struct kernel_param_ops poll_queue_count_ops = {
74         .set = poll_queue_count_set,
75         .get = param_get_uint,
76 };
77
78 static unsigned int poll_queues = 1;
79 module_param_cb(poll_queues, &poll_queue_count_ops, &poll_queues, 0644);
80 MODULE_PARM_DESC(poll_queues,
81                  "Number of poll queues used for r/w. Default value is 1");
82
83 /**
84  * ufshcd_mcq_config_mac - Set the #Max Activ Cmds.
85  * @hba: per adapter instance
86  * @max_active_cmds: maximum # of active commands to the device at any time.
87  *
88  * The controller won't send more than the max_active_cmds to the device at
89  * any time.
90  */
91 void ufshcd_mcq_config_mac(struct ufs_hba *hba, u32 max_active_cmds)
92 {
93         u32 val;
94
95         val = ufshcd_readl(hba, REG_UFS_MCQ_CFG);
96         val &= ~MCQ_CFG_MAC_MASK;
97         val |= FIELD_PREP(MCQ_CFG_MAC_MASK, max_active_cmds);
98         ufshcd_writel(hba, val, REG_UFS_MCQ_CFG);
99 }
100 EXPORT_SYMBOL_GPL(ufshcd_mcq_config_mac);
101
102 /**
103  * ufshcd_mcq_req_to_hwq - find the hardware queue on which the
104  * request would be issued.
105  * @hba: per adapter instance
106  * @req: pointer to the request to be issued
107  *
108  * Return: the hardware queue instance on which the request would
109  * be queued.
110  */
111 struct ufs_hw_queue *ufshcd_mcq_req_to_hwq(struct ufs_hba *hba,
112                                          struct request *req)
113 {
114         u32 utag = blk_mq_unique_tag(req);
115         u32 hwq = blk_mq_unique_tag_to_hwq(utag);
116
117         return &hba->uhq[hwq];
118 }
119
120 /**
121  * ufshcd_mcq_decide_queue_depth - decide the queue depth
122  * @hba: per adapter instance
123  *
124  * Return: queue-depth on success, non-zero on error
125  *
126  * MAC - Max. Active Command of the Host Controller (HC)
127  * HC wouldn't send more than this commands to the device.
128  * It is mandatory to implement get_hba_mac() to enable MCQ mode.
129  * Calculates and adjusts the queue depth based on the depth
130  * supported by the HC and ufs device.
131  */
132 int ufshcd_mcq_decide_queue_depth(struct ufs_hba *hba)
133 {
134         int mac;
135
136         /* Mandatory to implement get_hba_mac() */
137         mac = ufshcd_mcq_vops_get_hba_mac(hba);
138         if (mac < 0) {
139                 dev_err(hba->dev, "Failed to get mac, err=%d\n", mac);
140                 return mac;
141         }
142
143         WARN_ON_ONCE(!hba->dev_info.bqueuedepth);
144         /*
145          * max. value of bqueuedepth = 256, mac is host dependent.
146          * It is mandatory for UFS device to define bQueueDepth if
147          * shared queuing architecture is enabled.
148          */
149         return min_t(int, mac, hba->dev_info.bqueuedepth);
150 }
151
152 static int ufshcd_mcq_config_nr_queues(struct ufs_hba *hba)
153 {
154         int i;
155         u32 hba_maxq, rem, tot_queues;
156         struct Scsi_Host *host = hba->host;
157
158         /* maxq is 0 based value */
159         hba_maxq = FIELD_GET(MAX_QUEUE_SUP, hba->mcq_capabilities) + 1;
160
161         tot_queues = read_queues + poll_queues + rw_queues;
162
163         if (hba_maxq < tot_queues) {
164                 dev_err(hba->dev, "Total queues (%d) exceeds HC capacity (%d)\n",
165                         tot_queues, hba_maxq);
166                 return -EOPNOTSUPP;
167         }
168
169         rem = hba_maxq;
170
171         if (rw_queues) {
172                 hba->nr_queues[HCTX_TYPE_DEFAULT] = rw_queues;
173                 rem -= hba->nr_queues[HCTX_TYPE_DEFAULT];
174         } else {
175                 rw_queues = num_possible_cpus();
176         }
177
178         if (poll_queues) {
179                 hba->nr_queues[HCTX_TYPE_POLL] = poll_queues;
180                 rem -= hba->nr_queues[HCTX_TYPE_POLL];
181         }
182
183         if (read_queues) {
184                 hba->nr_queues[HCTX_TYPE_READ] = read_queues;
185                 rem -= hba->nr_queues[HCTX_TYPE_READ];
186         }
187
188         if (!hba->nr_queues[HCTX_TYPE_DEFAULT])
189                 hba->nr_queues[HCTX_TYPE_DEFAULT] = min3(rem, rw_queues,
190                                                          num_possible_cpus());
191
192         for (i = 0; i < HCTX_MAX_TYPES; i++)
193                 host->nr_hw_queues += hba->nr_queues[i];
194
195         hba->nr_hw_queues = host->nr_hw_queues;
196         return 0;
197 }
198
199 int ufshcd_mcq_memory_alloc(struct ufs_hba *hba)
200 {
201         struct ufs_hw_queue *hwq;
202         size_t utrdl_size, cqe_size;
203         int i;
204
205         for (i = 0; i < hba->nr_hw_queues; i++) {
206                 hwq = &hba->uhq[i];
207
208                 utrdl_size = sizeof(struct utp_transfer_req_desc) *
209                              hwq->max_entries;
210                 hwq->sqe_base_addr = dmam_alloc_coherent(hba->dev, utrdl_size,
211                                                          &hwq->sqe_dma_addr,
212                                                          GFP_KERNEL);
213                 if (!hwq->sqe_dma_addr) {
214                         dev_err(hba->dev, "SQE allocation failed\n");
215                         return -ENOMEM;
216                 }
217
218                 cqe_size = sizeof(struct cq_entry) * hwq->max_entries;
219                 hwq->cqe_base_addr = dmam_alloc_coherent(hba->dev, cqe_size,
220                                                          &hwq->cqe_dma_addr,
221                                                          GFP_KERNEL);
222                 if (!hwq->cqe_dma_addr) {
223                         dev_err(hba->dev, "CQE allocation failed\n");
224                         return -ENOMEM;
225                 }
226         }
227
228         return 0;
229 }
230
231
232 /* Operation and runtime registers configuration */
233 #define MCQ_CFG_n(r, i) ((r) + MCQ_QCFG_SIZE * (i))
234 #define MCQ_OPR_OFFSET_n(p, i) \
235         (hba->mcq_opr[(p)].offset + hba->mcq_opr[(p)].stride * (i))
236
237 static void __iomem *mcq_opr_base(struct ufs_hba *hba,
238                                          enum ufshcd_mcq_opr n, int i)
239 {
240         struct ufshcd_mcq_opr_info_t *opr = &hba->mcq_opr[n];
241
242         return opr->base + opr->stride * i;
243 }
244
245 u32 ufshcd_mcq_read_cqis(struct ufs_hba *hba, int i)
246 {
247         return readl(mcq_opr_base(hba, OPR_CQIS, i) + REG_CQIS);
248 }
249 EXPORT_SYMBOL_GPL(ufshcd_mcq_read_cqis);
250
251 void ufshcd_mcq_write_cqis(struct ufs_hba *hba, u32 val, int i)
252 {
253         writel(val, mcq_opr_base(hba, OPR_CQIS, i) + REG_CQIS);
254 }
255 EXPORT_SYMBOL_GPL(ufshcd_mcq_write_cqis);
256
257 /*
258  * Current MCQ specification doesn't provide a Task Tag or its equivalent in
259  * the Completion Queue Entry. Find the Task Tag using an indirect method.
260  */
261 static int ufshcd_mcq_get_tag(struct ufs_hba *hba, struct cq_entry *cqe)
262 {
263         u64 addr;
264
265         /* sizeof(struct utp_transfer_cmd_desc) must be a multiple of 128 */
266         BUILD_BUG_ON(sizeof(struct utp_transfer_cmd_desc) & GENMASK(6, 0));
267
268         /* Bits 63:7 UCD base address, 6:5 are reserved, 4:0 is SQ ID */
269         addr = (le64_to_cpu(cqe->command_desc_base_addr) & CQE_UCD_BA) -
270                 hba->ucdl_dma_addr;
271
272         return div_u64(addr, ufshcd_get_ucd_size(hba));
273 }
274
275 static void ufshcd_mcq_process_cqe(struct ufs_hba *hba,
276                                    struct ufs_hw_queue *hwq)
277 {
278         struct cq_entry *cqe = ufshcd_mcq_cur_cqe(hwq);
279         int tag = ufshcd_mcq_get_tag(hba, cqe);
280
281         if (cqe->command_desc_base_addr) {
282                 ufshcd_compl_one_cqe(hba, tag, cqe);
283                 /* After processed the cqe, mark it empty (invalid) entry */
284                 cqe->command_desc_base_addr = 0;
285         }
286 }
287
288 void ufshcd_mcq_compl_all_cqes_lock(struct ufs_hba *hba,
289                                     struct ufs_hw_queue *hwq)
290 {
291         unsigned long flags;
292         u32 entries = hwq->max_entries;
293
294         spin_lock_irqsave(&hwq->cq_lock, flags);
295         while (entries > 0) {
296                 ufshcd_mcq_process_cqe(hba, hwq);
297                 ufshcd_mcq_inc_cq_head_slot(hwq);
298                 entries--;
299         }
300
301         ufshcd_mcq_update_cq_tail_slot(hwq);
302         hwq->cq_head_slot = hwq->cq_tail_slot;
303         spin_unlock_irqrestore(&hwq->cq_lock, flags);
304 }
305
306 unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba,
307                                        struct ufs_hw_queue *hwq)
308 {
309         unsigned long completed_reqs = 0;
310         unsigned long flags;
311
312         spin_lock_irqsave(&hwq->cq_lock, flags);
313         ufshcd_mcq_update_cq_tail_slot(hwq);
314         while (!ufshcd_mcq_is_cq_empty(hwq)) {
315                 ufshcd_mcq_process_cqe(hba, hwq);
316                 ufshcd_mcq_inc_cq_head_slot(hwq);
317                 completed_reqs++;
318         }
319
320         if (completed_reqs)
321                 ufshcd_mcq_update_cq_head(hwq);
322         spin_unlock_irqrestore(&hwq->cq_lock, flags);
323
324         return completed_reqs;
325 }
326 EXPORT_SYMBOL_GPL(ufshcd_mcq_poll_cqe_lock);
327
328 void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba)
329 {
330         struct ufs_hw_queue *hwq;
331         u16 qsize;
332         int i;
333
334         for (i = 0; i < hba->nr_hw_queues; i++) {
335                 hwq = &hba->uhq[i];
336                 hwq->id = i;
337                 qsize = hwq->max_entries * MCQ_ENTRY_SIZE_IN_DWORD - 1;
338
339                 /* Submission Queue Lower Base Address */
340                 ufsmcq_writelx(hba, lower_32_bits(hwq->sqe_dma_addr),
341                               MCQ_CFG_n(REG_SQLBA, i));
342                 /* Submission Queue Upper Base Address */
343                 ufsmcq_writelx(hba, upper_32_bits(hwq->sqe_dma_addr),
344                               MCQ_CFG_n(REG_SQUBA, i));
345                 /* Submission Queue Doorbell Address Offset */
346                 ufsmcq_writelx(hba, MCQ_OPR_OFFSET_n(OPR_SQD, i),
347                               MCQ_CFG_n(REG_SQDAO, i));
348                 /* Submission Queue Interrupt Status Address Offset */
349                 ufsmcq_writelx(hba, MCQ_OPR_OFFSET_n(OPR_SQIS, i),
350                               MCQ_CFG_n(REG_SQISAO, i));
351
352                 /* Completion Queue Lower Base Address */
353                 ufsmcq_writelx(hba, lower_32_bits(hwq->cqe_dma_addr),
354                               MCQ_CFG_n(REG_CQLBA, i));
355                 /* Completion Queue Upper Base Address */
356                 ufsmcq_writelx(hba, upper_32_bits(hwq->cqe_dma_addr),
357                               MCQ_CFG_n(REG_CQUBA, i));
358                 /* Completion Queue Doorbell Address Offset */
359                 ufsmcq_writelx(hba, MCQ_OPR_OFFSET_n(OPR_CQD, i),
360                               MCQ_CFG_n(REG_CQDAO, i));
361                 /* Completion Queue Interrupt Status Address Offset */
362                 ufsmcq_writelx(hba, MCQ_OPR_OFFSET_n(OPR_CQIS, i),
363                               MCQ_CFG_n(REG_CQISAO, i));
364
365                 /* Save the base addresses for quicker access */
366                 hwq->mcq_sq_head = mcq_opr_base(hba, OPR_SQD, i) + REG_SQHP;
367                 hwq->mcq_sq_tail = mcq_opr_base(hba, OPR_SQD, i) + REG_SQTP;
368                 hwq->mcq_cq_head = mcq_opr_base(hba, OPR_CQD, i) + REG_CQHP;
369                 hwq->mcq_cq_tail = mcq_opr_base(hba, OPR_CQD, i) + REG_CQTP;
370
371                 /* Reinitializing is needed upon HC reset */
372                 hwq->sq_tail_slot = hwq->cq_tail_slot = hwq->cq_head_slot = 0;
373
374                 /* Enable Tail Entry Push Status interrupt only for non-poll queues */
375                 if (i < hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL])
376                         writel(1, mcq_opr_base(hba, OPR_CQIS, i) + REG_CQIE);
377
378                 /* Completion Queue Enable|Size to Completion Queue Attribute */
379                 ufsmcq_writel(hba, (1 << QUEUE_EN_OFFSET) | qsize,
380                               MCQ_CFG_n(REG_CQATTR, i));
381
382                 /*
383                  * Submission Qeueue Enable|Size|Completion Queue ID to
384                  * Submission Queue Attribute
385                  */
386                 ufsmcq_writel(hba, (1 << QUEUE_EN_OFFSET) | qsize |
387                               (i << QUEUE_ID_OFFSET),
388                               MCQ_CFG_n(REG_SQATTR, i));
389         }
390 }
391 EXPORT_SYMBOL_GPL(ufshcd_mcq_make_queues_operational);
392
393 void ufshcd_mcq_enable_esi(struct ufs_hba *hba)
394 {
395         ufshcd_writel(hba, ufshcd_readl(hba, REG_UFS_MEM_CFG) | 0x2,
396                       REG_UFS_MEM_CFG);
397 }
398 EXPORT_SYMBOL_GPL(ufshcd_mcq_enable_esi);
399
400 void ufshcd_mcq_enable(struct ufs_hba *hba)
401 {
402         ufshcd_rmwl(hba, MCQ_MODE_SELECT, MCQ_MODE_SELECT, REG_UFS_MEM_CFG);
403 }
404 EXPORT_SYMBOL_GPL(ufshcd_mcq_enable);
405
406 void ufshcd_mcq_config_esi(struct ufs_hba *hba, struct msi_msg *msg)
407 {
408         ufshcd_writel(hba, msg->address_lo, REG_UFS_ESILBA);
409         ufshcd_writel(hba, msg->address_hi, REG_UFS_ESIUBA);
410 }
411 EXPORT_SYMBOL_GPL(ufshcd_mcq_config_esi);
412
413 int ufshcd_mcq_init(struct ufs_hba *hba)
414 {
415         struct Scsi_Host *host = hba->host;
416         struct ufs_hw_queue *hwq;
417         int ret, i;
418
419         ret = ufshcd_mcq_config_nr_queues(hba);
420         if (ret)
421                 return ret;
422
423         ret = ufshcd_vops_mcq_config_resource(hba);
424         if (ret)
425                 return ret;
426
427         ret = ufshcd_mcq_vops_op_runtime_config(hba);
428         if (ret) {
429                 dev_err(hba->dev, "Operation runtime config failed, ret=%d\n",
430                         ret);
431                 return ret;
432         }
433         hba->uhq = devm_kzalloc(hba->dev,
434                                 hba->nr_hw_queues * sizeof(struct ufs_hw_queue),
435                                 GFP_KERNEL);
436         if (!hba->uhq) {
437                 dev_err(hba->dev, "ufs hw queue memory allocation failed\n");
438                 return -ENOMEM;
439         }
440
441         for (i = 0; i < hba->nr_hw_queues; i++) {
442                 hwq = &hba->uhq[i];
443                 hwq->max_entries = hba->nutrs + 1;
444                 spin_lock_init(&hwq->sq_lock);
445                 spin_lock_init(&hwq->cq_lock);
446                 mutex_init(&hwq->sq_mutex);
447         }
448
449         /* The very first HW queue serves device commands */
450         hba->dev_cmd_queue = &hba->uhq[0];
451
452         host->host_tagset = 1;
453         return 0;
454 }
455
456 static int ufshcd_mcq_sq_stop(struct ufs_hba *hba, struct ufs_hw_queue *hwq)
457 {
458         void __iomem *reg;
459         u32 id = hwq->id, val;
460         int err;
461
462         if (hba->quirks & UFSHCD_QUIRK_MCQ_BROKEN_RTC)
463                 return -ETIMEDOUT;
464
465         writel(SQ_STOP, mcq_opr_base(hba, OPR_SQD, id) + REG_SQRTC);
466         reg = mcq_opr_base(hba, OPR_SQD, id) + REG_SQRTS;
467         err = read_poll_timeout(readl, val, val & SQ_STS, 20,
468                                 MCQ_POLL_US, false, reg);
469         if (err)
470                 dev_err(hba->dev, "%s: failed. hwq-id=%d, err=%d\n",
471                         __func__, id, err);
472         return err;
473 }
474
475 static int ufshcd_mcq_sq_start(struct ufs_hba *hba, struct ufs_hw_queue *hwq)
476 {
477         void __iomem *reg;
478         u32 id = hwq->id, val;
479         int err;
480
481         if (hba->quirks & UFSHCD_QUIRK_MCQ_BROKEN_RTC)
482                 return -ETIMEDOUT;
483
484         writel(SQ_START, mcq_opr_base(hba, OPR_SQD, id) + REG_SQRTC);
485         reg = mcq_opr_base(hba, OPR_SQD, id) + REG_SQRTS;
486         err = read_poll_timeout(readl, val, !(val & SQ_STS), 20,
487                                 MCQ_POLL_US, false, reg);
488         if (err)
489                 dev_err(hba->dev, "%s: failed. hwq-id=%d, err=%d\n",
490                         __func__, id, err);
491         return err;
492 }
493
494 /**
495  * ufshcd_mcq_sq_cleanup - Clean up submission queue resources
496  * associated with the pending command.
497  * @hba: per adapter instance.
498  * @task_tag: The command's task tag.
499  *
500  * Return: 0 for success; error code otherwise.
501  */
502 int ufshcd_mcq_sq_cleanup(struct ufs_hba *hba, int task_tag)
503 {
504         struct ufshcd_lrb *lrbp = &hba->lrb[task_tag];
505         struct scsi_cmnd *cmd = lrbp->cmd;
506         struct ufs_hw_queue *hwq;
507         void __iomem *reg, *opr_sqd_base;
508         u32 nexus, id, val;
509         int err;
510
511         if (hba->quirks & UFSHCD_QUIRK_MCQ_BROKEN_RTC)
512                 return -ETIMEDOUT;
513
514         if (task_tag != hba->nutrs - UFSHCD_NUM_RESERVED) {
515                 if (!cmd)
516                         return -EINVAL;
517                 hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
518         } else {
519                 hwq = hba->dev_cmd_queue;
520         }
521
522         id = hwq->id;
523
524         mutex_lock(&hwq->sq_mutex);
525
526         /* stop the SQ fetching before working on it */
527         err = ufshcd_mcq_sq_stop(hba, hwq);
528         if (err)
529                 goto unlock;
530
531         /* SQCTI = EXT_IID, IID, LUN, Task Tag */
532         nexus = lrbp->lun << 8 | task_tag;
533         opr_sqd_base = mcq_opr_base(hba, OPR_SQD, id);
534         writel(nexus, opr_sqd_base + REG_SQCTI);
535
536         /* SQRTCy.ICU = 1 */
537         writel(SQ_ICU, opr_sqd_base + REG_SQRTC);
538
539         /* Poll SQRTSy.CUS = 1. Return result from SQRTSy.RTC */
540         reg = opr_sqd_base + REG_SQRTS;
541         err = read_poll_timeout(readl, val, val & SQ_CUS, 20,
542                                 MCQ_POLL_US, false, reg);
543         if (err)
544                 dev_err(hba->dev, "%s: failed. hwq=%d, tag=%d err=%ld\n",
545                         __func__, id, task_tag,
546                         FIELD_GET(SQ_ICU_ERR_CODE_MASK, readl(reg)));
547
548         if (ufshcd_mcq_sq_start(hba, hwq))
549                 err = -ETIMEDOUT;
550
551 unlock:
552         mutex_unlock(&hwq->sq_mutex);
553         return err;
554 }
555
556 /**
557  * ufshcd_mcq_nullify_sqe - Nullify the submission queue entry.
558  * Write the sqe's Command Type to 0xF. The host controller will not
559  * fetch any sqe with Command Type = 0xF.
560  *
561  * @utrd: UTP Transfer Request Descriptor to be nullified.
562  */
563 static void ufshcd_mcq_nullify_sqe(struct utp_transfer_req_desc *utrd)
564 {
565         utrd->header.command_type = 0xf;
566 }
567
568 /**
569  * ufshcd_mcq_sqe_search - Search for the command in the submission queue
570  * If the command is in the submission queue and not issued to the device yet,
571  * nullify the sqe so the host controller will skip fetching the sqe.
572  *
573  * @hba: per adapter instance.
574  * @hwq: Hardware Queue to be searched.
575  * @task_tag: The command's task tag.
576  *
577  * Return: true if the SQE containing the command is present in the SQ
578  * (not fetched by the controller); returns false if the SQE is not in the SQ.
579  */
580 static bool ufshcd_mcq_sqe_search(struct ufs_hba *hba,
581                                   struct ufs_hw_queue *hwq, int task_tag)
582 {
583         struct ufshcd_lrb *lrbp = &hba->lrb[task_tag];
584         struct utp_transfer_req_desc *utrd;
585         __le64  cmd_desc_base_addr;
586         bool ret = false;
587         u64 addr, match;
588         u32 sq_head_slot;
589
590         if (hba->quirks & UFSHCD_QUIRK_MCQ_BROKEN_RTC)
591                 return true;
592
593         mutex_lock(&hwq->sq_mutex);
594
595         ufshcd_mcq_sq_stop(hba, hwq);
596         sq_head_slot = ufshcd_mcq_get_sq_head_slot(hwq);
597         if (sq_head_slot == hwq->sq_tail_slot)
598                 goto out;
599
600         cmd_desc_base_addr = lrbp->utr_descriptor_ptr->command_desc_base_addr;
601         addr = le64_to_cpu(cmd_desc_base_addr) & CQE_UCD_BA;
602
603         while (sq_head_slot != hwq->sq_tail_slot) {
604                 utrd = hwq->sqe_base_addr +
605                                 sq_head_slot * sizeof(struct utp_transfer_req_desc);
606                 match = le64_to_cpu(utrd->command_desc_base_addr) & CQE_UCD_BA;
607                 if (addr == match) {
608                         ufshcd_mcq_nullify_sqe(utrd);
609                         ret = true;
610                         goto out;
611                 }
612
613                 sq_head_slot++;
614                 if (sq_head_slot == hwq->max_entries)
615                         sq_head_slot = 0;
616         }
617
618 out:
619         ufshcd_mcq_sq_start(hba, hwq);
620         mutex_unlock(&hwq->sq_mutex);
621         return ret;
622 }
623
624 /**
625  * ufshcd_mcq_abort - Abort the command in MCQ.
626  * @cmd: The command to be aborted.
627  *
628  * Return: SUCCESS or FAILED error codes
629  */
630 int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
631 {
632         struct Scsi_Host *host = cmd->device->host;
633         struct ufs_hba *hba = shost_priv(host);
634         int tag = scsi_cmd_to_rq(cmd)->tag;
635         struct ufshcd_lrb *lrbp = &hba->lrb[tag];
636         struct ufs_hw_queue *hwq;
637         unsigned long flags;
638         int err = FAILED;
639
640         if (!ufshcd_cmd_inflight(lrbp->cmd)) {
641                 dev_err(hba->dev,
642                         "%s: skip abort. cmd at tag %d already completed.\n",
643                         __func__, tag);
644                 goto out;
645         }
646
647         /* Skip task abort in case previous aborts failed and report failure */
648         if (lrbp->req_abort_skip) {
649                 dev_err(hba->dev, "%s: skip abort. tag %d failed earlier\n",
650                         __func__, tag);
651                 goto out;
652         }
653
654         hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
655
656         if (ufshcd_mcq_sqe_search(hba, hwq, tag)) {
657                 /*
658                  * Failure. The command should not be "stuck" in SQ for
659                  * a long time which resulted in command being aborted.
660                  */
661                 dev_err(hba->dev, "%s: cmd found in sq. hwq=%d, tag=%d\n",
662                         __func__, hwq->id, tag);
663                 goto out;
664         }
665
666         /*
667          * The command is not in the submission queue, and it is not
668          * in the completion queue either. Query the device to see if
669          * the command is being processed in the device.
670          */
671         if (ufshcd_try_to_abort_task(hba, tag)) {
672                 dev_err(hba->dev, "%s: device abort failed %d\n", __func__, err);
673                 lrbp->req_abort_skip = true;
674                 goto out;
675         }
676
677         err = SUCCESS;
678         spin_lock_irqsave(&hwq->cq_lock, flags);
679         if (ufshcd_cmd_inflight(lrbp->cmd))
680                 ufshcd_release_scsi_cmd(hba, lrbp);
681         spin_unlock_irqrestore(&hwq->cq_lock, flags);
682
683 out:
684         return err;
685 }