tracing: Add __string_src() helper to help compilers not to get confused
[sfrench/cifs-2.6.git] / drivers / ufs / core / ufshcd.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Universal Flash Storage Host controller driver Core
4  * Copyright (C) 2011-2013 Samsung India Software Operations
5  * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
6  *
7  * Authors:
8  *      Santosh Yaraganavi <santosh.sy@samsung.com>
9  *      Vinayak Holikatti <h.vinayak@samsung.com>
10  */
11
12 #include <linux/async.h>
13 #include <linux/devfreq.h>
14 #include <linux/nls.h>
15 #include <linux/of.h>
16 #include <linux/bitfield.h>
17 #include <linux/blk-pm.h>
18 #include <linux/blkdev.h>
19 #include <linux/clk.h>
20 #include <linux/delay.h>
21 #include <linux/interrupt.h>
22 #include <linux/module.h>
23 #include <linux/pm_opp.h>
24 #include <linux/regulator/consumer.h>
25 #include <linux/sched/clock.h>
26 #include <linux/iopoll.h>
27 #include <scsi/scsi_cmnd.h>
28 #include <scsi/scsi_dbg.h>
29 #include <scsi/scsi_driver.h>
30 #include <scsi/scsi_eh.h>
31 #include "ufshcd-priv.h"
32 #include <ufs/ufs_quirks.h>
33 #include <ufs/unipro.h>
34 #include "ufs-sysfs.h"
35 #include "ufs-debugfs.h"
36 #include "ufs-fault-injection.h"
37 #include "ufs_bsg.h"
38 #include "ufshcd-crypto.h"
39 #include <asm/unaligned.h>
40
41 #define CREATE_TRACE_POINTS
42 #include <trace/events/ufs.h>
43
44 #define UFSHCD_ENABLE_INTRS     (UTP_TRANSFER_REQ_COMPL |\
45                                  UTP_TASK_REQ_COMPL |\
46                                  UFSHCD_ERROR_MASK)
47
48 #define UFSHCD_ENABLE_MCQ_INTRS (UTP_TASK_REQ_COMPL |\
49                                  UFSHCD_ERROR_MASK |\
50                                  MCQ_CQ_EVENT_STATUS)
51
52
53 /* UIC command timeout, unit: ms */
54 #define UIC_CMD_TIMEOUT 500
55
56 /* NOP OUT retries waiting for NOP IN response */
57 #define NOP_OUT_RETRIES    10
58 /* Timeout after 50 msecs if NOP OUT hangs without response */
59 #define NOP_OUT_TIMEOUT    50 /* msecs */
60
61 /* Query request retries */
62 #define QUERY_REQ_RETRIES 3
63 /* Query request timeout */
64 #define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
65
66 /* Advanced RPMB request timeout */
67 #define ADVANCED_RPMB_REQ_TIMEOUT  3000 /* 3 seconds */
68
69 /* Task management command timeout */
70 #define TM_CMD_TIMEOUT  100 /* msecs */
71
72 /* maximum number of retries for a general UIC command  */
73 #define UFS_UIC_COMMAND_RETRIES 3
74
75 /* maximum number of link-startup retries */
76 #define DME_LINKSTARTUP_RETRIES 3
77
78 /* maximum number of reset retries before giving up */
79 #define MAX_HOST_RESET_RETRIES 5
80
81 /* Maximum number of error handler retries before giving up */
82 #define MAX_ERR_HANDLER_RETRIES 5
83
84 /* Expose the flag value from utp_upiu_query.value */
85 #define MASK_QUERY_UPIU_FLAG_LOC 0xFF
86
87 /* Interrupt aggregation default timeout, unit: 40us */
88 #define INT_AGGR_DEF_TO 0x02
89
90 /* default delay of autosuspend: 2000 ms */
91 #define RPM_AUTOSUSPEND_DELAY_MS 2000
92
93 /* Default delay of RPM device flush delayed work */
94 #define RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS 5000
95
96 /* Default value of wait time before gating device ref clock */
97 #define UFSHCD_REF_CLK_GATING_WAIT_US 0xFF /* microsecs */
98
99 /* Polling time to wait for fDeviceInit */
100 #define FDEVICEINIT_COMPL_TIMEOUT 1500 /* millisecs */
101
102 /* Default RTC update every 10 seconds */
103 #define UFS_RTC_UPDATE_INTERVAL_MS (10 * MSEC_PER_SEC)
104
105 /* UFSHC 4.0 compliant HC support this mode. */
106 static bool use_mcq_mode = true;
107
108 static bool is_mcq_supported(struct ufs_hba *hba)
109 {
110         return hba->mcq_sup && use_mcq_mode;
111 }
112
113 module_param(use_mcq_mode, bool, 0644);
114 MODULE_PARM_DESC(use_mcq_mode, "Control MCQ mode for controllers starting from UFSHCI 4.0. 1 - enable MCQ, 0 - disable MCQ. MCQ is enabled by default");
115
116 #define ufshcd_toggle_vreg(_dev, _vreg, _on)                            \
117         ({                                                              \
118                 int _ret;                                               \
119                 if (_on)                                                \
120                         _ret = ufshcd_enable_vreg(_dev, _vreg);         \
121                 else                                                    \
122                         _ret = ufshcd_disable_vreg(_dev, _vreg);        \
123                 _ret;                                                   \
124         })
125
126 #define ufshcd_hex_dump(prefix_str, buf, len) do {                       \
127         size_t __len = (len);                                            \
128         print_hex_dump(KERN_ERR, prefix_str,                             \
129                        __len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,\
130                        16, 4, buf, __len, false);                        \
131 } while (0)
132
133 int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
134                      const char *prefix)
135 {
136         u32 *regs;
137         size_t pos;
138
139         if (offset % 4 != 0 || len % 4 != 0) /* keep readl happy */
140                 return -EINVAL;
141
142         regs = kzalloc(len, GFP_ATOMIC);
143         if (!regs)
144                 return -ENOMEM;
145
146         for (pos = 0; pos < len; pos += 4) {
147                 if (offset == 0 &&
148                     pos >= REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER &&
149                     pos <= REG_UIC_ERROR_CODE_DME)
150                         continue;
151                 regs[pos / 4] = ufshcd_readl(hba, offset + pos);
152         }
153
154         ufshcd_hex_dump(prefix, regs, len);
155         kfree(regs);
156
157         return 0;
158 }
159 EXPORT_SYMBOL_GPL(ufshcd_dump_regs);
160
161 enum {
162         UFSHCD_MAX_CHANNEL      = 0,
163         UFSHCD_MAX_ID           = 1,
164         UFSHCD_CMD_PER_LUN      = 32 - UFSHCD_NUM_RESERVED,
165         UFSHCD_CAN_QUEUE        = 32 - UFSHCD_NUM_RESERVED,
166 };
167
168 static const char *const ufshcd_state_name[] = {
169         [UFSHCD_STATE_RESET]                    = "reset",
170         [UFSHCD_STATE_OPERATIONAL]              = "operational",
171         [UFSHCD_STATE_ERROR]                    = "error",
172         [UFSHCD_STATE_EH_SCHEDULED_FATAL]       = "eh_fatal",
173         [UFSHCD_STATE_EH_SCHEDULED_NON_FATAL]   = "eh_non_fatal",
174 };
175
176 /* UFSHCD error handling flags */
177 enum {
178         UFSHCD_EH_IN_PROGRESS = (1 << 0),
179 };
180
181 /* UFSHCD UIC layer error flags */
182 enum {
183         UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */
184         UFSHCD_UIC_DL_NAC_RECEIVED_ERROR = (1 << 1), /* Data link layer error */
185         UFSHCD_UIC_DL_TCx_REPLAY_ERROR = (1 << 2), /* Data link layer error */
186         UFSHCD_UIC_NL_ERROR = (1 << 3), /* Network layer error */
187         UFSHCD_UIC_TL_ERROR = (1 << 4), /* Transport Layer error */
188         UFSHCD_UIC_DME_ERROR = (1 << 5), /* DME error */
189         UFSHCD_UIC_PA_GENERIC_ERROR = (1 << 6), /* Generic PA error */
190 };
191
192 #define ufshcd_set_eh_in_progress(h) \
193         ((h)->eh_flags |= UFSHCD_EH_IN_PROGRESS)
194 #define ufshcd_eh_in_progress(h) \
195         ((h)->eh_flags & UFSHCD_EH_IN_PROGRESS)
196 #define ufshcd_clear_eh_in_progress(h) \
197         ((h)->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
198
199 const struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
200         [UFS_PM_LVL_0] = {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
201         [UFS_PM_LVL_1] = {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE},
202         [UFS_PM_LVL_2] = {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE},
203         [UFS_PM_LVL_3] = {UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE},
204         [UFS_PM_LVL_4] = {UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE},
205         [UFS_PM_LVL_5] = {UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE},
206         /*
207          * For DeepSleep, the link is first put in hibern8 and then off.
208          * Leaving the link in hibern8 is not supported.
209          */
210         [UFS_PM_LVL_6] = {UFS_DEEPSLEEP_PWR_MODE, UIC_LINK_OFF_STATE},
211 };
212
213 static inline enum ufs_dev_pwr_mode
214 ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl)
215 {
216         return ufs_pm_lvl_states[lvl].dev_state;
217 }
218
219 static inline enum uic_link_state
220 ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl)
221 {
222         return ufs_pm_lvl_states[lvl].link_state;
223 }
224
225 static inline enum ufs_pm_level
226 ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
227                                         enum uic_link_state link_state)
228 {
229         enum ufs_pm_level lvl;
230
231         for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++) {
232                 if ((ufs_pm_lvl_states[lvl].dev_state == dev_state) &&
233                         (ufs_pm_lvl_states[lvl].link_state == link_state))
234                         return lvl;
235         }
236
237         /* if no match found, return the level 0 */
238         return UFS_PM_LVL_0;
239 }
240
241 static bool ufshcd_is_ufs_dev_busy(struct ufs_hba *hba)
242 {
243         return (hba->clk_gating.active_reqs || hba->outstanding_reqs || hba->outstanding_tasks ||
244                 hba->active_uic_cmd || hba->uic_async_done);
245 }
246
247 static const struct ufs_dev_quirk ufs_fixups[] = {
248         /* UFS cards deviations table */
249         { .wmanufacturerid = UFS_VENDOR_MICRON,
250           .model = UFS_ANY_MODEL,
251           .quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM },
252         { .wmanufacturerid = UFS_VENDOR_SAMSUNG,
253           .model = UFS_ANY_MODEL,
254           .quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
255                    UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE |
256                    UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS },
257         { .wmanufacturerid = UFS_VENDOR_SKHYNIX,
258           .model = UFS_ANY_MODEL,
259           .quirk = UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME },
260         { .wmanufacturerid = UFS_VENDOR_SKHYNIX,
261           .model = "hB8aL1" /*H28U62301AMR*/,
262           .quirk = UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME },
263         { .wmanufacturerid = UFS_VENDOR_TOSHIBA,
264           .model = UFS_ANY_MODEL,
265           .quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM },
266         { .wmanufacturerid = UFS_VENDOR_TOSHIBA,
267           .model = "THGLF2G9C8KBADG",
268           .quirk = UFS_DEVICE_QUIRK_PA_TACTIVATE },
269         { .wmanufacturerid = UFS_VENDOR_TOSHIBA,
270           .model = "THGLF2G9D8KBADG",
271           .quirk = UFS_DEVICE_QUIRK_PA_TACTIVATE },
272         {}
273 };
274
275 static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba);
276 static void ufshcd_async_scan(void *data, async_cookie_t cookie);
277 static int ufshcd_reset_and_restore(struct ufs_hba *hba);
278 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
279 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
280 static void ufshcd_hba_exit(struct ufs_hba *hba);
281 static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params);
282 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
283 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
284 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
285 static void ufshcd_resume_clkscaling(struct ufs_hba *hba);
286 static void ufshcd_suspend_clkscaling(struct ufs_hba *hba);
287 static int ufshcd_scale_clks(struct ufs_hba *hba, unsigned long freq,
288                              bool scale_up);
289 static irqreturn_t ufshcd_intr(int irq, void *__hba);
290 static int ufshcd_change_power_mode(struct ufs_hba *hba,
291                              struct ufs_pa_layer_attr *pwr_mode);
292 static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on);
293 static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on);
294 static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
295                                          struct ufs_vreg *vreg);
296 static void ufshcd_wb_toggle_buf_flush_during_h8(struct ufs_hba *hba,
297                                                  bool enable);
298 static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba);
299 static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba);
300
301 void ufshcd_enable_irq(struct ufs_hba *hba)
302 {
303         if (!hba->is_irq_enabled) {
304                 enable_irq(hba->irq);
305                 hba->is_irq_enabled = true;
306         }
307 }
308 EXPORT_SYMBOL_GPL(ufshcd_enable_irq);
309
310 void ufshcd_disable_irq(struct ufs_hba *hba)
311 {
312         if (hba->is_irq_enabled) {
313                 disable_irq(hba->irq);
314                 hba->is_irq_enabled = false;
315         }
316 }
317 EXPORT_SYMBOL_GPL(ufshcd_disable_irq);
318
319 static void ufshcd_configure_wb(struct ufs_hba *hba)
320 {
321         if (!ufshcd_is_wb_allowed(hba))
322                 return;
323
324         ufshcd_wb_toggle(hba, true);
325
326         ufshcd_wb_toggle_buf_flush_during_h8(hba, true);
327
328         if (ufshcd_is_wb_buf_flush_allowed(hba))
329                 ufshcd_wb_toggle_buf_flush(hba, true);
330 }
331
332 static void ufshcd_scsi_unblock_requests(struct ufs_hba *hba)
333 {
334         if (atomic_dec_and_test(&hba->scsi_block_reqs_cnt))
335                 scsi_unblock_requests(hba->host);
336 }
337
338 static void ufshcd_scsi_block_requests(struct ufs_hba *hba)
339 {
340         if (atomic_inc_return(&hba->scsi_block_reqs_cnt) == 1)
341                 scsi_block_requests(hba->host);
342 }
343
344 static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag,
345                                       enum ufs_trace_str_t str_t)
346 {
347         struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
348         struct utp_upiu_header *header;
349
350         if (!trace_ufshcd_upiu_enabled())
351                 return;
352
353         if (str_t == UFS_CMD_SEND)
354                 header = &rq->header;
355         else
356                 header = &hba->lrb[tag].ucd_rsp_ptr->header;
357
358         trace_ufshcd_upiu(dev_name(hba->dev), str_t, header, &rq->sc.cdb,
359                           UFS_TSF_CDB);
360 }
361
362 static void ufshcd_add_query_upiu_trace(struct ufs_hba *hba,
363                                         enum ufs_trace_str_t str_t,
364                                         struct utp_upiu_req *rq_rsp)
365 {
366         if (!trace_ufshcd_upiu_enabled())
367                 return;
368
369         trace_ufshcd_upiu(dev_name(hba->dev), str_t, &rq_rsp->header,
370                           &rq_rsp->qr, UFS_TSF_OSF);
371 }
372
373 static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag,
374                                      enum ufs_trace_str_t str_t)
375 {
376         struct utp_task_req_desc *descp = &hba->utmrdl_base_addr[tag];
377
378         if (!trace_ufshcd_upiu_enabled())
379                 return;
380
381         if (str_t == UFS_TM_SEND)
382                 trace_ufshcd_upiu(dev_name(hba->dev), str_t,
383                                   &descp->upiu_req.req_header,
384                                   &descp->upiu_req.input_param1,
385                                   UFS_TSF_TM_INPUT);
386         else
387                 trace_ufshcd_upiu(dev_name(hba->dev), str_t,
388                                   &descp->upiu_rsp.rsp_header,
389                                   &descp->upiu_rsp.output_param1,
390                                   UFS_TSF_TM_OUTPUT);
391 }
392
393 static void ufshcd_add_uic_command_trace(struct ufs_hba *hba,
394                                          const struct uic_command *ucmd,
395                                          enum ufs_trace_str_t str_t)
396 {
397         u32 cmd;
398
399         if (!trace_ufshcd_uic_command_enabled())
400                 return;
401
402         if (str_t == UFS_CMD_SEND)
403                 cmd = ucmd->command;
404         else
405                 cmd = ufshcd_readl(hba, REG_UIC_COMMAND);
406
407         trace_ufshcd_uic_command(dev_name(hba->dev), str_t, cmd,
408                                  ufshcd_readl(hba, REG_UIC_COMMAND_ARG_1),
409                                  ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2),
410                                  ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3));
411 }
412
413 static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag,
414                                      enum ufs_trace_str_t str_t)
415 {
416         u64 lba = 0;
417         u8 opcode = 0, group_id = 0;
418         u32 doorbell = 0;
419         u32 intr;
420         int hwq_id = -1;
421         struct ufshcd_lrb *lrbp = &hba->lrb[tag];
422         struct scsi_cmnd *cmd = lrbp->cmd;
423         struct request *rq = scsi_cmd_to_rq(cmd);
424         int transfer_len = -1;
425
426         if (!cmd)
427                 return;
428
429         /* trace UPIU also */
430         ufshcd_add_cmd_upiu_trace(hba, tag, str_t);
431         if (!trace_ufshcd_command_enabled())
432                 return;
433
434         opcode = cmd->cmnd[0];
435
436         if (opcode == READ_10 || opcode == WRITE_10) {
437                 /*
438                  * Currently we only fully trace read(10) and write(10) commands
439                  */
440                 transfer_len =
441                        be32_to_cpu(lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
442                 lba = scsi_get_lba(cmd);
443                 if (opcode == WRITE_10)
444                         group_id = lrbp->cmd->cmnd[6];
445         } else if (opcode == UNMAP) {
446                 /*
447                  * The number of Bytes to be unmapped beginning with the lba.
448                  */
449                 transfer_len = blk_rq_bytes(rq);
450                 lba = scsi_get_lba(cmd);
451         }
452
453         intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
454
455         if (is_mcq_enabled(hba)) {
456                 struct ufs_hw_queue *hwq = ufshcd_mcq_req_to_hwq(hba, rq);
457
458                 hwq_id = hwq->id;
459         } else {
460                 doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
461         }
462         trace_ufshcd_command(cmd->device, str_t, tag, doorbell, hwq_id,
463                              transfer_len, intr, lba, opcode, group_id);
464 }
465
466 static void ufshcd_print_clk_freqs(struct ufs_hba *hba)
467 {
468         struct ufs_clk_info *clki;
469         struct list_head *head = &hba->clk_list_head;
470
471         if (list_empty(head))
472                 return;
473
474         list_for_each_entry(clki, head, list) {
475                 if (!IS_ERR_OR_NULL(clki->clk) && clki->min_freq &&
476                                 clki->max_freq)
477                         dev_err(hba->dev, "clk: %s, rate: %u\n",
478                                         clki->name, clki->curr_freq);
479         }
480 }
481
482 static void ufshcd_print_evt(struct ufs_hba *hba, u32 id,
483                              const char *err_name)
484 {
485         int i;
486         bool found = false;
487         const struct ufs_event_hist *e;
488
489         if (id >= UFS_EVT_CNT)
490                 return;
491
492         e = &hba->ufs_stats.event[id];
493
494         for (i = 0; i < UFS_EVENT_HIST_LENGTH; i++) {
495                 int p = (i + e->pos) % UFS_EVENT_HIST_LENGTH;
496
497                 if (e->tstamp[p] == 0)
498                         continue;
499                 dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, p,
500                         e->val[p], div_u64(e->tstamp[p], 1000));
501                 found = true;
502         }
503
504         if (!found)
505                 dev_err(hba->dev, "No record of %s\n", err_name);
506         else
507                 dev_err(hba->dev, "%s: total cnt=%llu\n", err_name, e->cnt);
508 }
509
510 static void ufshcd_print_evt_hist(struct ufs_hba *hba)
511 {
512         ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
513
514         ufshcd_print_evt(hba, UFS_EVT_PA_ERR, "pa_err");
515         ufshcd_print_evt(hba, UFS_EVT_DL_ERR, "dl_err");
516         ufshcd_print_evt(hba, UFS_EVT_NL_ERR, "nl_err");
517         ufshcd_print_evt(hba, UFS_EVT_TL_ERR, "tl_err");
518         ufshcd_print_evt(hba, UFS_EVT_DME_ERR, "dme_err");
519         ufshcd_print_evt(hba, UFS_EVT_AUTO_HIBERN8_ERR,
520                          "auto_hibern8_err");
521         ufshcd_print_evt(hba, UFS_EVT_FATAL_ERR, "fatal_err");
522         ufshcd_print_evt(hba, UFS_EVT_LINK_STARTUP_FAIL,
523                          "link_startup_fail");
524         ufshcd_print_evt(hba, UFS_EVT_RESUME_ERR, "resume_fail");
525         ufshcd_print_evt(hba, UFS_EVT_SUSPEND_ERR,
526                          "suspend_fail");
527         ufshcd_print_evt(hba, UFS_EVT_WL_RES_ERR, "wlun resume_fail");
528         ufshcd_print_evt(hba, UFS_EVT_WL_SUSP_ERR,
529                          "wlun suspend_fail");
530         ufshcd_print_evt(hba, UFS_EVT_DEV_RESET, "dev_reset");
531         ufshcd_print_evt(hba, UFS_EVT_HOST_RESET, "host_reset");
532         ufshcd_print_evt(hba, UFS_EVT_ABORT, "task_abort");
533
534         ufshcd_vops_dbg_register_dump(hba);
535 }
536
537 static
538 void ufshcd_print_tr(struct ufs_hba *hba, int tag, bool pr_prdt)
539 {
540         const struct ufshcd_lrb *lrbp;
541         int prdt_length;
542
543         lrbp = &hba->lrb[tag];
544
545         dev_err(hba->dev, "UPIU[%d] - issue time %lld us\n",
546                         tag, div_u64(lrbp->issue_time_stamp_local_clock, 1000));
547         dev_err(hba->dev, "UPIU[%d] - complete time %lld us\n",
548                         tag, div_u64(lrbp->compl_time_stamp_local_clock, 1000));
549         dev_err(hba->dev,
550                 "UPIU[%d] - Transfer Request Descriptor phys@0x%llx\n",
551                 tag, (u64)lrbp->utrd_dma_addr);
552
553         ufshcd_hex_dump("UPIU TRD: ", lrbp->utr_descriptor_ptr,
554                         sizeof(struct utp_transfer_req_desc));
555         dev_err(hba->dev, "UPIU[%d] - Request UPIU phys@0x%llx\n", tag,
556                 (u64)lrbp->ucd_req_dma_addr);
557         ufshcd_hex_dump("UPIU REQ: ", lrbp->ucd_req_ptr,
558                         sizeof(struct utp_upiu_req));
559         dev_err(hba->dev, "UPIU[%d] - Response UPIU phys@0x%llx\n", tag,
560                 (u64)lrbp->ucd_rsp_dma_addr);
561         ufshcd_hex_dump("UPIU RSP: ", lrbp->ucd_rsp_ptr,
562                         sizeof(struct utp_upiu_rsp));
563
564         prdt_length = le16_to_cpu(
565                 lrbp->utr_descriptor_ptr->prd_table_length);
566         if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
567                 prdt_length /= ufshcd_sg_entry_size(hba);
568
569         dev_err(hba->dev,
570                 "UPIU[%d] - PRDT - %d entries  phys@0x%llx\n",
571                 tag, prdt_length,
572                 (u64)lrbp->ucd_prdt_dma_addr);
573
574         if (pr_prdt)
575                 ufshcd_hex_dump("UPIU PRDT: ", lrbp->ucd_prdt_ptr,
576                         ufshcd_sg_entry_size(hba) * prdt_length);
577 }
578
579 static bool ufshcd_print_tr_iter(struct request *req, void *priv)
580 {
581         struct scsi_device *sdev = req->q->queuedata;
582         struct Scsi_Host *shost = sdev->host;
583         struct ufs_hba *hba = shost_priv(shost);
584
585         ufshcd_print_tr(hba, req->tag, *(bool *)priv);
586
587         return true;
588 }
589
590 /**
591  * ufshcd_print_trs_all - print trs for all started requests.
592  * @hba: per-adapter instance.
593  * @pr_prdt: need to print prdt or not.
594  */
595 static void ufshcd_print_trs_all(struct ufs_hba *hba, bool pr_prdt)
596 {
597         blk_mq_tagset_busy_iter(&hba->host->tag_set, ufshcd_print_tr_iter, &pr_prdt);
598 }
599
600 static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap)
601 {
602         int tag;
603
604         for_each_set_bit(tag, &bitmap, hba->nutmrs) {
605                 struct utp_task_req_desc *tmrdp = &hba->utmrdl_base_addr[tag];
606
607                 dev_err(hba->dev, "TM[%d] - Task Management Header\n", tag);
608                 ufshcd_hex_dump("", tmrdp, sizeof(*tmrdp));
609         }
610 }
611
612 static void ufshcd_print_host_state(struct ufs_hba *hba)
613 {
614         const struct scsi_device *sdev_ufs = hba->ufs_device_wlun;
615
616         dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
617         dev_err(hba->dev, "outstanding reqs=0x%lx tasks=0x%lx\n",
618                 hba->outstanding_reqs, hba->outstanding_tasks);
619         dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n",
620                 hba->saved_err, hba->saved_uic_err);
621         dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n",
622                 hba->curr_dev_pwr_mode, hba->uic_link_state);
623         dev_err(hba->dev, "PM in progress=%d, sys. suspended=%d\n",
624                 hba->pm_op_in_progress, hba->is_sys_suspended);
625         dev_err(hba->dev, "Auto BKOPS=%d, Host self-block=%d\n",
626                 hba->auto_bkops_enabled, hba->host->host_self_blocked);
627         dev_err(hba->dev, "Clk gate=%d\n", hba->clk_gating.state);
628         dev_err(hba->dev,
629                 "last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt=%d\n",
630                 div_u64(hba->ufs_stats.last_hibern8_exit_tstamp, 1000),
631                 hba->ufs_stats.hibern8_exit_cnt);
632         dev_err(hba->dev, "last intr at %lld us, last intr status=0x%x\n",
633                 div_u64(hba->ufs_stats.last_intr_ts, 1000),
634                 hba->ufs_stats.last_intr_status);
635         dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n",
636                 hba->eh_flags, hba->req_abort_count);
637         dev_err(hba->dev, "hba->ufs_version=0x%x, Host capabilities=0x%x, caps=0x%x\n",
638                 hba->ufs_version, hba->capabilities, hba->caps);
639         dev_err(hba->dev, "quirks=0x%x, dev. quirks=0x%x\n", hba->quirks,
640                 hba->dev_quirks);
641         if (sdev_ufs)
642                 dev_err(hba->dev, "UFS dev info: %.8s %.16s rev %.4s\n",
643                         sdev_ufs->vendor, sdev_ufs->model, sdev_ufs->rev);
644
645         ufshcd_print_clk_freqs(hba);
646 }
647
648 /**
649  * ufshcd_print_pwr_info - print power params as saved in hba
650  * power info
651  * @hba: per-adapter instance
652  */
653 static void ufshcd_print_pwr_info(struct ufs_hba *hba)
654 {
655         static const char * const names[] = {
656                 "INVALID MODE",
657                 "FAST MODE",
658                 "SLOW_MODE",
659                 "INVALID MODE",
660                 "FASTAUTO_MODE",
661                 "SLOWAUTO_MODE",
662                 "INVALID MODE",
663         };
664
665         /*
666          * Using dev_dbg to avoid messages during runtime PM to avoid
667          * never-ending cycles of messages written back to storage by user space
668          * causing runtime resume, causing more messages and so on.
669          */
670         dev_dbg(hba->dev, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
671                  __func__,
672                  hba->pwr_info.gear_rx, hba->pwr_info.gear_tx,
673                  hba->pwr_info.lane_rx, hba->pwr_info.lane_tx,
674                  names[hba->pwr_info.pwr_rx],
675                  names[hba->pwr_info.pwr_tx],
676                  hba->pwr_info.hs_rate);
677 }
678
679 static void ufshcd_device_reset(struct ufs_hba *hba)
680 {
681         int err;
682
683         err = ufshcd_vops_device_reset(hba);
684
685         if (!err) {
686                 ufshcd_set_ufs_dev_active(hba);
687                 if (ufshcd_is_wb_allowed(hba)) {
688                         hba->dev_info.wb_enabled = false;
689                         hba->dev_info.wb_buf_flush_enabled = false;
690                 }
691                 if (hba->dev_info.rtc_type == UFS_RTC_RELATIVE)
692                         hba->dev_info.rtc_time_baseline = 0;
693         }
694         if (err != -EOPNOTSUPP)
695                 ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, err);
696 }
697
698 void ufshcd_delay_us(unsigned long us, unsigned long tolerance)
699 {
700         if (!us)
701                 return;
702
703         if (us < 10)
704                 udelay(us);
705         else
706                 usleep_range(us, us + tolerance);
707 }
708 EXPORT_SYMBOL_GPL(ufshcd_delay_us);
709
710 /**
711  * ufshcd_wait_for_register - wait for register value to change
712  * @hba: per-adapter interface
713  * @reg: mmio register offset
714  * @mask: mask to apply to the read register value
715  * @val: value to wait for
716  * @interval_us: polling interval in microseconds
717  * @timeout_ms: timeout in milliseconds
718  *
719  * Return: -ETIMEDOUT on error, zero on success.
720  */
721 static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
722                                 u32 val, unsigned long interval_us,
723                                 unsigned long timeout_ms)
724 {
725         int err = 0;
726         unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
727
728         /* ignore bits that we don't intend to wait on */
729         val = val & mask;
730
731         while ((ufshcd_readl(hba, reg) & mask) != val) {
732                 usleep_range(interval_us, interval_us + 50);
733                 if (time_after(jiffies, timeout)) {
734                         if ((ufshcd_readl(hba, reg) & mask) != val)
735                                 err = -ETIMEDOUT;
736                         break;
737                 }
738         }
739
740         return err;
741 }
742
743 /**
744  * ufshcd_get_intr_mask - Get the interrupt bit mask
745  * @hba: Pointer to adapter instance
746  *
747  * Return: interrupt bit mask per version
748  */
749 static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
750 {
751         if (hba->ufs_version == ufshci_version(1, 0))
752                 return INTERRUPT_MASK_ALL_VER_10;
753         if (hba->ufs_version <= ufshci_version(2, 0))
754                 return INTERRUPT_MASK_ALL_VER_11;
755
756         return INTERRUPT_MASK_ALL_VER_21;
757 }
758
759 /**
760  * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
761  * @hba: Pointer to adapter instance
762  *
763  * Return: UFSHCI version supported by the controller
764  */
765 static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
766 {
767         u32 ufshci_ver;
768
769         if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION)
770                 ufshci_ver = ufshcd_vops_get_ufs_hci_version(hba);
771         else
772                 ufshci_ver = ufshcd_readl(hba, REG_UFS_VERSION);
773
774         /*
775          * UFSHCI v1.x uses a different version scheme, in order
776          * to allow the use of comparisons with the ufshci_version
777          * function, we convert it to the same scheme as ufs 2.0+.
778          */
779         if (ufshci_ver & 0x00010000)
780                 return ufshci_version(1, ufshci_ver & 0x00000100);
781
782         return ufshci_ver;
783 }
784
785 /**
786  * ufshcd_is_device_present - Check if any device connected to
787  *                            the host controller
788  * @hba: pointer to adapter instance
789  *
790  * Return: true if device present, false if no device detected
791  */
792 static inline bool ufshcd_is_device_present(struct ufs_hba *hba)
793 {
794         return ufshcd_readl(hba, REG_CONTROLLER_STATUS) & DEVICE_PRESENT;
795 }
796
797 /**
798  * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
799  * @lrbp: pointer to local command reference block
800  * @cqe: pointer to the completion queue entry
801  *
802  * This function is used to get the OCS field from UTRD
803  *
804  * Return: the OCS field in the UTRD.
805  */
806 static enum utp_ocs ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp,
807                                       struct cq_entry *cqe)
808 {
809         if (cqe)
810                 return le32_to_cpu(cqe->status) & MASK_OCS;
811
812         return lrbp->utr_descriptor_ptr->header.ocs & MASK_OCS;
813 }
814
815 /**
816  * ufshcd_utrl_clear() - Clear requests from the controller request list.
817  * @hba: per adapter instance
818  * @mask: mask with one bit set for each request to be cleared
819  */
820 static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 mask)
821 {
822         if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
823                 mask = ~mask;
824         /*
825          * From the UFSHCI specification: "UTP Transfer Request List CLear
826          * Register (UTRLCLR): This field is bit significant. Each bit
827          * corresponds to a slot in the UTP Transfer Request List, where bit 0
828          * corresponds to request slot 0. A bit in this field is set to ‘0’
829          * by host software to indicate to the host controller that a transfer
830          * request slot is cleared. The host controller
831          * shall free up any resources associated to the request slot
832          * immediately, and shall set the associated bit in UTRLDBR to ‘0’. The
833          * host software indicates no change to request slots by setting the
834          * associated bits in this field to ‘1’. Bits in this field shall only
835          * be set ‘1’ or ‘0’ by host software when UTRLRSR is set to ‘1’."
836          */
837         ufshcd_writel(hba, ~mask, REG_UTP_TRANSFER_REQ_LIST_CLEAR);
838 }
839
840 /**
841  * ufshcd_utmrl_clear - Clear a bit in UTMRLCLR register
842  * @hba: per adapter instance
843  * @pos: position of the bit to be cleared
844  */
845 static inline void ufshcd_utmrl_clear(struct ufs_hba *hba, u32 pos)
846 {
847         if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
848                 ufshcd_writel(hba, (1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
849         else
850                 ufshcd_writel(hba, ~(1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
851 }
852
853 /**
854  * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
855  * @reg: Register value of host controller status
856  *
857  * Return: 0 on success; a positive value if failed.
858  */
859 static inline int ufshcd_get_lists_status(u32 reg)
860 {
861         return !((reg & UFSHCD_STATUS_READY) == UFSHCD_STATUS_READY);
862 }
863
864 /**
865  * ufshcd_get_uic_cmd_result - Get the UIC command result
866  * @hba: Pointer to adapter instance
867  *
868  * This function gets the result of UIC command completion
869  *
870  * Return: 0 on success; non-zero value on error.
871  */
872 static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
873 {
874         return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
875                MASK_UIC_COMMAND_RESULT;
876 }
877
878 /**
879  * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
880  * @hba: Pointer to adapter instance
881  *
882  * This function gets UIC command argument3
883  *
884  * Return: 0 on success; non-zero value on error.
885  */
886 static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
887 {
888         return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
889 }
890
891 /**
892  * ufshcd_get_req_rsp - returns the TR response transaction type
893  * @ucd_rsp_ptr: pointer to response UPIU
894  *
895  * Return: UPIU type.
896  */
897 static inline enum upiu_response_transaction
898 ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
899 {
900         return ucd_rsp_ptr->header.transaction_code;
901 }
902
903 /**
904  * ufshcd_is_exception_event - Check if the device raised an exception event
905  * @ucd_rsp_ptr: pointer to response UPIU
906  *
907  * The function checks if the device raised an exception event indicated in
908  * the Device Information field of response UPIU.
909  *
910  * Return: true if exception is raised, false otherwise.
911  */
912 static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
913 {
914         return ucd_rsp_ptr->header.device_information & 1;
915 }
916
917 /**
918  * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
919  * @hba: per adapter instance
920  */
921 static inline void
922 ufshcd_reset_intr_aggr(struct ufs_hba *hba)
923 {
924         ufshcd_writel(hba, INT_AGGR_ENABLE |
925                       INT_AGGR_COUNTER_AND_TIMER_RESET,
926                       REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
927 }
928
929 /**
930  * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
931  * @hba: per adapter instance
932  * @cnt: Interrupt aggregation counter threshold
933  * @tmout: Interrupt aggregation timeout value
934  */
935 static inline void
936 ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
937 {
938         ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
939                       INT_AGGR_COUNTER_THLD_VAL(cnt) |
940                       INT_AGGR_TIMEOUT_VAL(tmout),
941                       REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
942 }
943
944 /**
945  * ufshcd_disable_intr_aggr - Disables interrupt aggregation.
946  * @hba: per adapter instance
947  */
948 static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba)
949 {
950         ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
951 }
952
953 /**
954  * ufshcd_enable_run_stop_reg - Enable run-stop registers,
955  *                      When run-stop registers are set to 1, it indicates the
956  *                      host controller that it can process the requests
957  * @hba: per adapter instance
958  */
959 static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
960 {
961         ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
962                       REG_UTP_TASK_REQ_LIST_RUN_STOP);
963         ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
964                       REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
965 }
966
967 /**
968  * ufshcd_hba_start - Start controller initialization sequence
969  * @hba: per adapter instance
970  */
971 static inline void ufshcd_hba_start(struct ufs_hba *hba)
972 {
973         u32 val = CONTROLLER_ENABLE;
974
975         if (ufshcd_crypto_enable(hba))
976                 val |= CRYPTO_GENERAL_ENABLE;
977
978         ufshcd_writel(hba, val, REG_CONTROLLER_ENABLE);
979 }
980
981 /**
982  * ufshcd_is_hba_active - Get controller state
983  * @hba: per adapter instance
984  *
985  * Return: true if and only if the controller is active.
986  */
987 bool ufshcd_is_hba_active(struct ufs_hba *hba)
988 {
989         return ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & CONTROLLER_ENABLE;
990 }
991 EXPORT_SYMBOL_GPL(ufshcd_is_hba_active);
992
993 u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba)
994 {
995         /* HCI version 1.0 and 1.1 supports UniPro 1.41 */
996         if (hba->ufs_version <= ufshci_version(1, 1))
997                 return UFS_UNIPRO_VER_1_41;
998         else
999                 return UFS_UNIPRO_VER_1_6;
1000 }
1001 EXPORT_SYMBOL(ufshcd_get_local_unipro_ver);
1002
1003 static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
1004 {
1005         /*
1006          * If both host and device support UniPro ver1.6 or later, PA layer
1007          * parameters tuning happens during link startup itself.
1008          *
1009          * We can manually tune PA layer parameters if either host or device
1010          * doesn't support UniPro ver 1.6 or later. But to keep manual tuning
1011          * logic simple, we will only do manual tuning if local unipro version
1012          * doesn't support ver1.6 or later.
1013          */
1014         return ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6;
1015 }
1016
1017 /**
1018  * ufshcd_set_clk_freq - set UFS controller clock frequencies
1019  * @hba: per adapter instance
1020  * @scale_up: If True, set max possible frequency othewise set low frequency
1021  *
1022  * Return: 0 if successful; < 0 upon failure.
1023  */
1024 static int ufshcd_set_clk_freq(struct ufs_hba *hba, bool scale_up)
1025 {
1026         int ret = 0;
1027         struct ufs_clk_info *clki;
1028         struct list_head *head = &hba->clk_list_head;
1029
1030         if (list_empty(head))
1031                 goto out;
1032
1033         list_for_each_entry(clki, head, list) {
1034                 if (!IS_ERR_OR_NULL(clki->clk)) {
1035                         if (scale_up && clki->max_freq) {
1036                                 if (clki->curr_freq == clki->max_freq)
1037                                         continue;
1038
1039                                 ret = clk_set_rate(clki->clk, clki->max_freq);
1040                                 if (ret) {
1041                                         dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
1042                                                 __func__, clki->name,
1043                                                 clki->max_freq, ret);
1044                                         break;
1045                                 }
1046                                 trace_ufshcd_clk_scaling(dev_name(hba->dev),
1047                                                 "scaled up", clki->name,
1048                                                 clki->curr_freq,
1049                                                 clki->max_freq);
1050
1051                                 clki->curr_freq = clki->max_freq;
1052
1053                         } else if (!scale_up && clki->min_freq) {
1054                                 if (clki->curr_freq == clki->min_freq)
1055                                         continue;
1056
1057                                 ret = clk_set_rate(clki->clk, clki->min_freq);
1058                                 if (ret) {
1059                                         dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
1060                                                 __func__, clki->name,
1061                                                 clki->min_freq, ret);
1062                                         break;
1063                                 }
1064                                 trace_ufshcd_clk_scaling(dev_name(hba->dev),
1065                                                 "scaled down", clki->name,
1066                                                 clki->curr_freq,
1067                                                 clki->min_freq);
1068                                 clki->curr_freq = clki->min_freq;
1069                         }
1070                 }
1071                 dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
1072                                 clki->name, clk_get_rate(clki->clk));
1073         }
1074
1075 out:
1076         return ret;
1077 }
1078
1079 int ufshcd_opp_config_clks(struct device *dev, struct opp_table *opp_table,
1080                            struct dev_pm_opp *opp, void *data,
1081                            bool scaling_down)
1082 {
1083         struct ufs_hba *hba = dev_get_drvdata(dev);
1084         struct list_head *head = &hba->clk_list_head;
1085         struct ufs_clk_info *clki;
1086         unsigned long freq;
1087         u8 idx = 0;
1088         int ret;
1089
1090         list_for_each_entry(clki, head, list) {
1091                 if (!IS_ERR_OR_NULL(clki->clk)) {
1092                         freq = dev_pm_opp_get_freq_indexed(opp, idx++);
1093
1094                         /* Do not set rate for clocks having frequency as 0 */
1095                         if (!freq)
1096                                 continue;
1097
1098                         ret = clk_set_rate(clki->clk, freq);
1099                         if (ret) {
1100                                 dev_err(dev, "%s: %s clk set rate(%ldHz) failed, %d\n",
1101                                         __func__, clki->name, freq, ret);
1102                                 return ret;
1103                         }
1104
1105                         trace_ufshcd_clk_scaling(dev_name(dev),
1106                                 (scaling_down ? "scaled down" : "scaled up"),
1107                                 clki->name, hba->clk_scaling.target_freq, freq);
1108                 }
1109         }
1110
1111         return 0;
1112 }
1113 EXPORT_SYMBOL_GPL(ufshcd_opp_config_clks);
1114
1115 static int ufshcd_opp_set_rate(struct ufs_hba *hba, unsigned long freq)
1116 {
1117         struct dev_pm_opp *opp;
1118         int ret;
1119
1120         opp = dev_pm_opp_find_freq_floor_indexed(hba->dev,
1121                                                  &freq, 0);
1122         if (IS_ERR(opp))
1123                 return PTR_ERR(opp);
1124
1125         ret = dev_pm_opp_set_opp(hba->dev, opp);
1126         dev_pm_opp_put(opp);
1127
1128         return ret;
1129 }
1130
1131 /**
1132  * ufshcd_scale_clks - scale up or scale down UFS controller clocks
1133  * @hba: per adapter instance
1134  * @freq: frequency to scale
1135  * @scale_up: True if scaling up and false if scaling down
1136  *
1137  * Return: 0 if successful; < 0 upon failure.
1138  */
1139 static int ufshcd_scale_clks(struct ufs_hba *hba, unsigned long freq,
1140                              bool scale_up)
1141 {
1142         int ret = 0;
1143         ktime_t start = ktime_get();
1144
1145         ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
1146         if (ret)
1147                 goto out;
1148
1149         if (hba->use_pm_opp)
1150                 ret = ufshcd_opp_set_rate(hba, freq);
1151         else
1152                 ret = ufshcd_set_clk_freq(hba, scale_up);
1153         if (ret)
1154                 goto out;
1155
1156         ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
1157         if (ret) {
1158                 if (hba->use_pm_opp)
1159                         ufshcd_opp_set_rate(hba,
1160                                             hba->devfreq->previous_freq);
1161                 else
1162                         ufshcd_set_clk_freq(hba, !scale_up);
1163         }
1164
1165 out:
1166         trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
1167                         (scale_up ? "up" : "down"),
1168                         ktime_to_us(ktime_sub(ktime_get(), start)), ret);
1169         return ret;
1170 }
1171
1172 /**
1173  * ufshcd_is_devfreq_scaling_required - check if scaling is required or not
1174  * @hba: per adapter instance
1175  * @freq: frequency to scale
1176  * @scale_up: True if scaling up and false if scaling down
1177  *
1178  * Return: true if scaling is required, false otherwise.
1179  */
1180 static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba,
1181                                                unsigned long freq, bool scale_up)
1182 {
1183         struct ufs_clk_info *clki;
1184         struct list_head *head = &hba->clk_list_head;
1185
1186         if (list_empty(head))
1187                 return false;
1188
1189         if (hba->use_pm_opp)
1190                 return freq != hba->clk_scaling.target_freq;
1191
1192         list_for_each_entry(clki, head, list) {
1193                 if (!IS_ERR_OR_NULL(clki->clk)) {
1194                         if (scale_up && clki->max_freq) {
1195                                 if (clki->curr_freq == clki->max_freq)
1196                                         continue;
1197                                 return true;
1198                         } else if (!scale_up && clki->min_freq) {
1199                                 if (clki->curr_freq == clki->min_freq)
1200                                         continue;
1201                                 return true;
1202                         }
1203                 }
1204         }
1205
1206         return false;
1207 }
1208
1209 /*
1210  * Determine the number of pending commands by counting the bits in the SCSI
1211  * device budget maps. This approach has been selected because a bit is set in
1212  * the budget map before scsi_host_queue_ready() checks the host_self_blocked
1213  * flag. The host_self_blocked flag can be modified by calling
1214  * scsi_block_requests() or scsi_unblock_requests().
1215  */
1216 static u32 ufshcd_pending_cmds(struct ufs_hba *hba)
1217 {
1218         const struct scsi_device *sdev;
1219         u32 pending = 0;
1220
1221         lockdep_assert_held(hba->host->host_lock);
1222         __shost_for_each_device(sdev, hba->host)
1223                 pending += sbitmap_weight(&sdev->budget_map);
1224
1225         return pending;
1226 }
1227
1228 /*
1229  * Wait until all pending SCSI commands and TMFs have finished or the timeout
1230  * has expired.
1231  *
1232  * Return: 0 upon success; -EBUSY upon timeout.
1233  */
1234 static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
1235                                         u64 wait_timeout_us)
1236 {
1237         unsigned long flags;
1238         int ret = 0;
1239         u32 tm_doorbell;
1240         u32 tr_pending;
1241         bool timeout = false, do_last_check = false;
1242         ktime_t start;
1243
1244         ufshcd_hold(hba);
1245         spin_lock_irqsave(hba->host->host_lock, flags);
1246         /*
1247          * Wait for all the outstanding tasks/transfer requests.
1248          * Verify by checking the doorbell registers are clear.
1249          */
1250         start = ktime_get();
1251         do {
1252                 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) {
1253                         ret = -EBUSY;
1254                         goto out;
1255                 }
1256
1257                 tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
1258                 tr_pending = ufshcd_pending_cmds(hba);
1259                 if (!tm_doorbell && !tr_pending) {
1260                         timeout = false;
1261                         break;
1262                 } else if (do_last_check) {
1263                         break;
1264                 }
1265
1266                 spin_unlock_irqrestore(hba->host->host_lock, flags);
1267                 io_schedule_timeout(msecs_to_jiffies(20));
1268                 if (ktime_to_us(ktime_sub(ktime_get(), start)) >
1269                     wait_timeout_us) {
1270                         timeout = true;
1271                         /*
1272                          * We might have scheduled out for long time so make
1273                          * sure to check if doorbells are cleared by this time
1274                          * or not.
1275                          */
1276                         do_last_check = true;
1277                 }
1278                 spin_lock_irqsave(hba->host->host_lock, flags);
1279         } while (tm_doorbell || tr_pending);
1280
1281         if (timeout) {
1282                 dev_err(hba->dev,
1283                         "%s: timedout waiting for doorbell to clear (tm=0x%x, tr=0x%x)\n",
1284                         __func__, tm_doorbell, tr_pending);
1285                 ret = -EBUSY;
1286         }
1287 out:
1288         spin_unlock_irqrestore(hba->host->host_lock, flags);
1289         ufshcd_release(hba);
1290         return ret;
1291 }
1292
1293 /**
1294  * ufshcd_scale_gear - scale up/down UFS gear
1295  * @hba: per adapter instance
1296  * @scale_up: True for scaling up gear and false for scaling down
1297  *
1298  * Return: 0 for success; -EBUSY if scaling can't happen at this time;
1299  * non-zero for any other errors.
1300  */
1301 static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
1302 {
1303         int ret = 0;
1304         struct ufs_pa_layer_attr new_pwr_info;
1305
1306         if (scale_up) {
1307                 memcpy(&new_pwr_info, &hba->clk_scaling.saved_pwr_info,
1308                        sizeof(struct ufs_pa_layer_attr));
1309         } else {
1310                 memcpy(&new_pwr_info, &hba->pwr_info,
1311                        sizeof(struct ufs_pa_layer_attr));
1312
1313                 if (hba->pwr_info.gear_tx > hba->clk_scaling.min_gear ||
1314                     hba->pwr_info.gear_rx > hba->clk_scaling.min_gear) {
1315                         /* save the current power mode */
1316                         memcpy(&hba->clk_scaling.saved_pwr_info,
1317                                 &hba->pwr_info,
1318                                 sizeof(struct ufs_pa_layer_attr));
1319
1320                         /* scale down gear */
1321                         new_pwr_info.gear_tx = hba->clk_scaling.min_gear;
1322                         new_pwr_info.gear_rx = hba->clk_scaling.min_gear;
1323                 }
1324         }
1325
1326         /* check if the power mode needs to be changed or not? */
1327         ret = ufshcd_config_pwr_mode(hba, &new_pwr_info);
1328         if (ret)
1329                 dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d)",
1330                         __func__, ret,
1331                         hba->pwr_info.gear_tx, hba->pwr_info.gear_rx,
1332                         new_pwr_info.gear_tx, new_pwr_info.gear_rx);
1333
1334         return ret;
1335 }
1336
1337 /*
1338  * Wait until all pending SCSI commands and TMFs have finished or the timeout
1339  * has expired.
1340  *
1341  * Return: 0 upon success; -EBUSY upon timeout.
1342  */
1343 static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba, u64 timeout_us)
1344 {
1345         int ret = 0;
1346         /*
1347          * make sure that there are no outstanding requests when
1348          * clock scaling is in progress
1349          */
1350         ufshcd_scsi_block_requests(hba);
1351         mutex_lock(&hba->wb_mutex);
1352         down_write(&hba->clk_scaling_lock);
1353
1354         if (!hba->clk_scaling.is_allowed ||
1355             ufshcd_wait_for_doorbell_clr(hba, timeout_us)) {
1356                 ret = -EBUSY;
1357                 up_write(&hba->clk_scaling_lock);
1358                 mutex_unlock(&hba->wb_mutex);
1359                 ufshcd_scsi_unblock_requests(hba);
1360                 goto out;
1361         }
1362
1363         /* let's not get into low power until clock scaling is completed */
1364         ufshcd_hold(hba);
1365
1366 out:
1367         return ret;
1368 }
1369
1370 static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, int err, bool scale_up)
1371 {
1372         up_write(&hba->clk_scaling_lock);
1373
1374         /* Enable Write Booster if we have scaled up else disable it */
1375         if (ufshcd_enable_wb_if_scaling_up(hba) && !err)
1376                 ufshcd_wb_toggle(hba, scale_up);
1377
1378         mutex_unlock(&hba->wb_mutex);
1379
1380         ufshcd_scsi_unblock_requests(hba);
1381         ufshcd_release(hba);
1382 }
1383
1384 /**
1385  * ufshcd_devfreq_scale - scale up/down UFS clocks and gear
1386  * @hba: per adapter instance
1387  * @freq: frequency to scale
1388  * @scale_up: True for scaling up and false for scalin down
1389  *
1390  * Return: 0 for success; -EBUSY if scaling can't happen at this time; non-zero
1391  * for any other errors.
1392  */
1393 static int ufshcd_devfreq_scale(struct ufs_hba *hba, unsigned long freq,
1394                                 bool scale_up)
1395 {
1396         int ret = 0;
1397
1398         ret = ufshcd_clock_scaling_prepare(hba, 1 * USEC_PER_SEC);
1399         if (ret)
1400                 return ret;
1401
1402         /* scale down the gear before scaling down clocks */
1403         if (!scale_up) {
1404                 ret = ufshcd_scale_gear(hba, false);
1405                 if (ret)
1406                         goto out_unprepare;
1407         }
1408
1409         ret = ufshcd_scale_clks(hba, freq, scale_up);
1410         if (ret) {
1411                 if (!scale_up)
1412                         ufshcd_scale_gear(hba, true);
1413                 goto out_unprepare;
1414         }
1415
1416         /* scale up the gear after scaling up clocks */
1417         if (scale_up) {
1418                 ret = ufshcd_scale_gear(hba, true);
1419                 if (ret) {
1420                         ufshcd_scale_clks(hba, hba->devfreq->previous_freq,
1421                                           false);
1422                         goto out_unprepare;
1423                 }
1424         }
1425
1426 out_unprepare:
1427         ufshcd_clock_scaling_unprepare(hba, ret, scale_up);
1428         return ret;
1429 }
1430
1431 static void ufshcd_clk_scaling_suspend_work(struct work_struct *work)
1432 {
1433         struct ufs_hba *hba = container_of(work, struct ufs_hba,
1434                                            clk_scaling.suspend_work);
1435         unsigned long irq_flags;
1436
1437         spin_lock_irqsave(hba->host->host_lock, irq_flags);
1438         if (hba->clk_scaling.active_reqs || hba->clk_scaling.is_suspended) {
1439                 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1440                 return;
1441         }
1442         hba->clk_scaling.is_suspended = true;
1443         hba->clk_scaling.window_start_t = 0;
1444         spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1445
1446         devfreq_suspend_device(hba->devfreq);
1447 }
1448
1449 static void ufshcd_clk_scaling_resume_work(struct work_struct *work)
1450 {
1451         struct ufs_hba *hba = container_of(work, struct ufs_hba,
1452                                            clk_scaling.resume_work);
1453         unsigned long irq_flags;
1454
1455         spin_lock_irqsave(hba->host->host_lock, irq_flags);
1456         if (!hba->clk_scaling.is_suspended) {
1457                 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1458                 return;
1459         }
1460         hba->clk_scaling.is_suspended = false;
1461         spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1462
1463         devfreq_resume_device(hba->devfreq);
1464 }
1465
1466 static int ufshcd_devfreq_target(struct device *dev,
1467                                 unsigned long *freq, u32 flags)
1468 {
1469         int ret = 0;
1470         struct ufs_hba *hba = dev_get_drvdata(dev);
1471         ktime_t start;
1472         bool scale_up = false, sched_clk_scaling_suspend_work = false;
1473         struct list_head *clk_list = &hba->clk_list_head;
1474         struct ufs_clk_info *clki;
1475         unsigned long irq_flags;
1476
1477         if (!ufshcd_is_clkscaling_supported(hba))
1478                 return -EINVAL;
1479
1480         if (hba->use_pm_opp) {
1481                 struct dev_pm_opp *opp;
1482
1483                 /* Get the recommended frequency from OPP framework */
1484                 opp = devfreq_recommended_opp(dev, freq, flags);
1485                 if (IS_ERR(opp))
1486                         return PTR_ERR(opp);
1487
1488                 dev_pm_opp_put(opp);
1489         } else {
1490                 /* Override with the closest supported frequency */
1491                 clki = list_first_entry(&hba->clk_list_head, struct ufs_clk_info,
1492                                         list);
1493                 *freq = (unsigned long) clk_round_rate(clki->clk, *freq);
1494         }
1495
1496         spin_lock_irqsave(hba->host->host_lock, irq_flags);
1497         if (ufshcd_eh_in_progress(hba)) {
1498                 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1499                 return 0;
1500         }
1501
1502         /* Skip scaling clock when clock scaling is suspended */
1503         if (hba->clk_scaling.is_suspended) {
1504                 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1505                 dev_warn(hba->dev, "clock scaling is suspended, skip");
1506                 return 0;
1507         }
1508
1509         if (!hba->clk_scaling.active_reqs)
1510                 sched_clk_scaling_suspend_work = true;
1511
1512         if (list_empty(clk_list)) {
1513                 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1514                 goto out;
1515         }
1516
1517         /* Decide based on the target or rounded-off frequency and update */
1518         if (hba->use_pm_opp)
1519                 scale_up = *freq > hba->clk_scaling.target_freq;
1520         else
1521                 scale_up = *freq == clki->max_freq;
1522
1523         if (!hba->use_pm_opp && !scale_up)
1524                 *freq = clki->min_freq;
1525
1526         /* Update the frequency */
1527         if (!ufshcd_is_devfreq_scaling_required(hba, *freq, scale_up)) {
1528                 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1529                 ret = 0;
1530                 goto out; /* no state change required */
1531         }
1532         spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1533
1534         start = ktime_get();
1535         ret = ufshcd_devfreq_scale(hba, *freq, scale_up);
1536         if (!ret)
1537                 hba->clk_scaling.target_freq = *freq;
1538
1539         trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
1540                 (scale_up ? "up" : "down"),
1541                 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
1542
1543 out:
1544         if (sched_clk_scaling_suspend_work && !scale_up)
1545                 queue_work(hba->clk_scaling.workq,
1546                            &hba->clk_scaling.suspend_work);
1547
1548         return ret;
1549 }
1550
1551 static int ufshcd_devfreq_get_dev_status(struct device *dev,
1552                 struct devfreq_dev_status *stat)
1553 {
1554         struct ufs_hba *hba = dev_get_drvdata(dev);
1555         struct ufs_clk_scaling *scaling = &hba->clk_scaling;
1556         unsigned long flags;
1557         ktime_t curr_t;
1558
1559         if (!ufshcd_is_clkscaling_supported(hba))
1560                 return -EINVAL;
1561
1562         memset(stat, 0, sizeof(*stat));
1563
1564         spin_lock_irqsave(hba->host->host_lock, flags);
1565         curr_t = ktime_get();
1566         if (!scaling->window_start_t)
1567                 goto start_window;
1568
1569         /*
1570          * If current frequency is 0, then the ondemand governor considers
1571          * there's no initial frequency set. And it always requests to set
1572          * to max. frequency.
1573          */
1574         if (hba->use_pm_opp) {
1575                 stat->current_frequency = hba->clk_scaling.target_freq;
1576         } else {
1577                 struct list_head *clk_list = &hba->clk_list_head;
1578                 struct ufs_clk_info *clki;
1579
1580                 clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1581                 stat->current_frequency = clki->curr_freq;
1582         }
1583
1584         if (scaling->is_busy_started)
1585                 scaling->tot_busy_t += ktime_us_delta(curr_t,
1586                                 scaling->busy_start_t);
1587         stat->total_time = ktime_us_delta(curr_t, scaling->window_start_t);
1588         stat->busy_time = scaling->tot_busy_t;
1589 start_window:
1590         scaling->window_start_t = curr_t;
1591         scaling->tot_busy_t = 0;
1592
1593         if (scaling->active_reqs) {
1594                 scaling->busy_start_t = curr_t;
1595                 scaling->is_busy_started = true;
1596         } else {
1597                 scaling->busy_start_t = 0;
1598                 scaling->is_busy_started = false;
1599         }
1600         spin_unlock_irqrestore(hba->host->host_lock, flags);
1601         return 0;
1602 }
1603
1604 static int ufshcd_devfreq_init(struct ufs_hba *hba)
1605 {
1606         struct list_head *clk_list = &hba->clk_list_head;
1607         struct ufs_clk_info *clki;
1608         struct devfreq *devfreq;
1609         int ret;
1610
1611         /* Skip devfreq if we don't have any clocks in the list */
1612         if (list_empty(clk_list))
1613                 return 0;
1614
1615         if (!hba->use_pm_opp) {
1616                 clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1617                 dev_pm_opp_add(hba->dev, clki->min_freq, 0);
1618                 dev_pm_opp_add(hba->dev, clki->max_freq, 0);
1619         }
1620
1621         ufshcd_vops_config_scaling_param(hba, &hba->vps->devfreq_profile,
1622                                          &hba->vps->ondemand_data);
1623         devfreq = devfreq_add_device(hba->dev,
1624                         &hba->vps->devfreq_profile,
1625                         DEVFREQ_GOV_SIMPLE_ONDEMAND,
1626                         &hba->vps->ondemand_data);
1627         if (IS_ERR(devfreq)) {
1628                 ret = PTR_ERR(devfreq);
1629                 dev_err(hba->dev, "Unable to register with devfreq %d\n", ret);
1630
1631                 if (!hba->use_pm_opp) {
1632                         dev_pm_opp_remove(hba->dev, clki->min_freq);
1633                         dev_pm_opp_remove(hba->dev, clki->max_freq);
1634                 }
1635                 return ret;
1636         }
1637
1638         hba->devfreq = devfreq;
1639
1640         return 0;
1641 }
1642
1643 static void ufshcd_devfreq_remove(struct ufs_hba *hba)
1644 {
1645         struct list_head *clk_list = &hba->clk_list_head;
1646
1647         if (!hba->devfreq)
1648                 return;
1649
1650         devfreq_remove_device(hba->devfreq);
1651         hba->devfreq = NULL;
1652
1653         if (!hba->use_pm_opp) {
1654                 struct ufs_clk_info *clki;
1655
1656                 clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1657                 dev_pm_opp_remove(hba->dev, clki->min_freq);
1658                 dev_pm_opp_remove(hba->dev, clki->max_freq);
1659         }
1660 }
1661
1662 static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
1663 {
1664         unsigned long flags;
1665         bool suspend = false;
1666
1667         cancel_work_sync(&hba->clk_scaling.suspend_work);
1668         cancel_work_sync(&hba->clk_scaling.resume_work);
1669
1670         spin_lock_irqsave(hba->host->host_lock, flags);
1671         if (!hba->clk_scaling.is_suspended) {
1672                 suspend = true;
1673                 hba->clk_scaling.is_suspended = true;
1674                 hba->clk_scaling.window_start_t = 0;
1675         }
1676         spin_unlock_irqrestore(hba->host->host_lock, flags);
1677
1678         if (suspend)
1679                 devfreq_suspend_device(hba->devfreq);
1680 }
1681
1682 static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
1683 {
1684         unsigned long flags;
1685         bool resume = false;
1686
1687         spin_lock_irqsave(hba->host->host_lock, flags);
1688         if (hba->clk_scaling.is_suspended) {
1689                 resume = true;
1690                 hba->clk_scaling.is_suspended = false;
1691         }
1692         spin_unlock_irqrestore(hba->host->host_lock, flags);
1693
1694         if (resume)
1695                 devfreq_resume_device(hba->devfreq);
1696 }
1697
1698 static ssize_t ufshcd_clkscale_enable_show(struct device *dev,
1699                 struct device_attribute *attr, char *buf)
1700 {
1701         struct ufs_hba *hba = dev_get_drvdata(dev);
1702
1703         return sysfs_emit(buf, "%d\n", hba->clk_scaling.is_enabled);
1704 }
1705
1706 static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
1707                 struct device_attribute *attr, const char *buf, size_t count)
1708 {
1709         struct ufs_hba *hba = dev_get_drvdata(dev);
1710         u32 value;
1711         int err = 0;
1712
1713         if (kstrtou32(buf, 0, &value))
1714                 return -EINVAL;
1715
1716         down(&hba->host_sem);
1717         if (!ufshcd_is_user_access_allowed(hba)) {
1718                 err = -EBUSY;
1719                 goto out;
1720         }
1721
1722         value = !!value;
1723         if (value == hba->clk_scaling.is_enabled)
1724                 goto out;
1725
1726         ufshcd_rpm_get_sync(hba);
1727         ufshcd_hold(hba);
1728
1729         hba->clk_scaling.is_enabled = value;
1730
1731         if (value) {
1732                 ufshcd_resume_clkscaling(hba);
1733         } else {
1734                 ufshcd_suspend_clkscaling(hba);
1735                 err = ufshcd_devfreq_scale(hba, ULONG_MAX, true);
1736                 if (err)
1737                         dev_err(hba->dev, "%s: failed to scale clocks up %d\n",
1738                                         __func__, err);
1739         }
1740
1741         ufshcd_release(hba);
1742         ufshcd_rpm_put_sync(hba);
1743 out:
1744         up(&hba->host_sem);
1745         return err ? err : count;
1746 }
1747
1748 static void ufshcd_init_clk_scaling_sysfs(struct ufs_hba *hba)
1749 {
1750         hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show;
1751         hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store;
1752         sysfs_attr_init(&hba->clk_scaling.enable_attr.attr);
1753         hba->clk_scaling.enable_attr.attr.name = "clkscale_enable";
1754         hba->clk_scaling.enable_attr.attr.mode = 0644;
1755         if (device_create_file(hba->dev, &hba->clk_scaling.enable_attr))
1756                 dev_err(hba->dev, "Failed to create sysfs for clkscale_enable\n");
1757 }
1758
1759 static void ufshcd_remove_clk_scaling_sysfs(struct ufs_hba *hba)
1760 {
1761         if (hba->clk_scaling.enable_attr.attr.name)
1762                 device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
1763 }
1764
1765 static void ufshcd_init_clk_scaling(struct ufs_hba *hba)
1766 {
1767         char wq_name[sizeof("ufs_clkscaling_00")];
1768
1769         if (!ufshcd_is_clkscaling_supported(hba))
1770                 return;
1771
1772         if (!hba->clk_scaling.min_gear)
1773                 hba->clk_scaling.min_gear = UFS_HS_G1;
1774
1775         INIT_WORK(&hba->clk_scaling.suspend_work,
1776                   ufshcd_clk_scaling_suspend_work);
1777         INIT_WORK(&hba->clk_scaling.resume_work,
1778                   ufshcd_clk_scaling_resume_work);
1779
1780         snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d",
1781                  hba->host->host_no);
1782         hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
1783
1784         hba->clk_scaling.is_initialized = true;
1785 }
1786
1787 static void ufshcd_exit_clk_scaling(struct ufs_hba *hba)
1788 {
1789         if (!hba->clk_scaling.is_initialized)
1790                 return;
1791
1792         ufshcd_remove_clk_scaling_sysfs(hba);
1793         destroy_workqueue(hba->clk_scaling.workq);
1794         ufshcd_devfreq_remove(hba);
1795         hba->clk_scaling.is_initialized = false;
1796 }
1797
1798 static void ufshcd_ungate_work(struct work_struct *work)
1799 {
1800         int ret;
1801         unsigned long flags;
1802         struct ufs_hba *hba = container_of(work, struct ufs_hba,
1803                         clk_gating.ungate_work);
1804
1805         cancel_delayed_work_sync(&hba->clk_gating.gate_work);
1806
1807         spin_lock_irqsave(hba->host->host_lock, flags);
1808         if (hba->clk_gating.state == CLKS_ON) {
1809                 spin_unlock_irqrestore(hba->host->host_lock, flags);
1810                 return;
1811         }
1812
1813         spin_unlock_irqrestore(hba->host->host_lock, flags);
1814         ufshcd_hba_vreg_set_hpm(hba);
1815         ufshcd_setup_clocks(hba, true);
1816
1817         ufshcd_enable_irq(hba);
1818
1819         /* Exit from hibern8 */
1820         if (ufshcd_can_hibern8_during_gating(hba)) {
1821                 /* Prevent gating in this path */
1822                 hba->clk_gating.is_suspended = true;
1823                 if (ufshcd_is_link_hibern8(hba)) {
1824                         ret = ufshcd_uic_hibern8_exit(hba);
1825                         if (ret)
1826                                 dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
1827                                         __func__, ret);
1828                         else
1829                                 ufshcd_set_link_active(hba);
1830                 }
1831                 hba->clk_gating.is_suspended = false;
1832         }
1833 }
1834
1835 /**
1836  * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
1837  * Also, exit from hibern8 mode and set the link as active.
1838  * @hba: per adapter instance
1839  */
1840 void ufshcd_hold(struct ufs_hba *hba)
1841 {
1842         bool flush_result;
1843         unsigned long flags;
1844
1845         if (!ufshcd_is_clkgating_allowed(hba) ||
1846             !hba->clk_gating.is_initialized)
1847                 return;
1848         spin_lock_irqsave(hba->host->host_lock, flags);
1849         hba->clk_gating.active_reqs++;
1850
1851 start:
1852         switch (hba->clk_gating.state) {
1853         case CLKS_ON:
1854                 /*
1855                  * Wait for the ungate work to complete if in progress.
1856                  * Though the clocks may be in ON state, the link could
1857                  * still be in hibner8 state if hibern8 is allowed
1858                  * during clock gating.
1859                  * Make sure we exit hibern8 state also in addition to
1860                  * clocks being ON.
1861                  */
1862                 if (ufshcd_can_hibern8_during_gating(hba) &&
1863                     ufshcd_is_link_hibern8(hba)) {
1864                         spin_unlock_irqrestore(hba->host->host_lock, flags);
1865                         flush_result = flush_work(&hba->clk_gating.ungate_work);
1866                         if (hba->clk_gating.is_suspended && !flush_result)
1867                                 return;
1868                         spin_lock_irqsave(hba->host->host_lock, flags);
1869                         goto start;
1870                 }
1871                 break;
1872         case REQ_CLKS_OFF:
1873                 if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
1874                         hba->clk_gating.state = CLKS_ON;
1875                         trace_ufshcd_clk_gating(dev_name(hba->dev),
1876                                                 hba->clk_gating.state);
1877                         break;
1878                 }
1879                 /*
1880                  * If we are here, it means gating work is either done or
1881                  * currently running. Hence, fall through to cancel gating
1882                  * work and to enable clocks.
1883                  */
1884                 fallthrough;
1885         case CLKS_OFF:
1886                 hba->clk_gating.state = REQ_CLKS_ON;
1887                 trace_ufshcd_clk_gating(dev_name(hba->dev),
1888                                         hba->clk_gating.state);
1889                 queue_work(hba->clk_gating.clk_gating_workq,
1890                            &hba->clk_gating.ungate_work);
1891                 /*
1892                  * fall through to check if we should wait for this
1893                  * work to be done or not.
1894                  */
1895                 fallthrough;
1896         case REQ_CLKS_ON:
1897                 spin_unlock_irqrestore(hba->host->host_lock, flags);
1898                 flush_work(&hba->clk_gating.ungate_work);
1899                 /* Make sure state is CLKS_ON before returning */
1900                 spin_lock_irqsave(hba->host->host_lock, flags);
1901                 goto start;
1902         default:
1903                 dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
1904                                 __func__, hba->clk_gating.state);
1905                 break;
1906         }
1907         spin_unlock_irqrestore(hba->host->host_lock, flags);
1908 }
1909 EXPORT_SYMBOL_GPL(ufshcd_hold);
1910
1911 static void ufshcd_gate_work(struct work_struct *work)
1912 {
1913         struct ufs_hba *hba = container_of(work, struct ufs_hba,
1914                         clk_gating.gate_work.work);
1915         unsigned long flags;
1916         int ret;
1917
1918         spin_lock_irqsave(hba->host->host_lock, flags);
1919         /*
1920          * In case you are here to cancel this work the gating state
1921          * would be marked as REQ_CLKS_ON. In this case save time by
1922          * skipping the gating work and exit after changing the clock
1923          * state to CLKS_ON.
1924          */
1925         if (hba->clk_gating.is_suspended ||
1926                 (hba->clk_gating.state != REQ_CLKS_OFF)) {
1927                 hba->clk_gating.state = CLKS_ON;
1928                 trace_ufshcd_clk_gating(dev_name(hba->dev),
1929                                         hba->clk_gating.state);
1930                 goto rel_lock;
1931         }
1932
1933         if (ufshcd_is_ufs_dev_busy(hba) || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
1934                 goto rel_lock;
1935
1936         spin_unlock_irqrestore(hba->host->host_lock, flags);
1937
1938         /* put the link into hibern8 mode before turning off clocks */
1939         if (ufshcd_can_hibern8_during_gating(hba)) {
1940                 ret = ufshcd_uic_hibern8_enter(hba);
1941                 if (ret) {
1942                         hba->clk_gating.state = CLKS_ON;
1943                         dev_err(hba->dev, "%s: hibern8 enter failed %d\n",
1944                                         __func__, ret);
1945                         trace_ufshcd_clk_gating(dev_name(hba->dev),
1946                                                 hba->clk_gating.state);
1947                         goto out;
1948                 }
1949                 ufshcd_set_link_hibern8(hba);
1950         }
1951
1952         ufshcd_disable_irq(hba);
1953
1954         ufshcd_setup_clocks(hba, false);
1955
1956         /* Put the host controller in low power mode if possible */
1957         ufshcd_hba_vreg_set_lpm(hba);
1958         /*
1959          * In case you are here to cancel this work the gating state
1960          * would be marked as REQ_CLKS_ON. In this case keep the state
1961          * as REQ_CLKS_ON which would anyway imply that clocks are off
1962          * and a request to turn them on is pending. By doing this way,
1963          * we keep the state machine in tact and this would ultimately
1964          * prevent from doing cancel work multiple times when there are
1965          * new requests arriving before the current cancel work is done.
1966          */
1967         spin_lock_irqsave(hba->host->host_lock, flags);
1968         if (hba->clk_gating.state == REQ_CLKS_OFF) {
1969                 hba->clk_gating.state = CLKS_OFF;
1970                 trace_ufshcd_clk_gating(dev_name(hba->dev),
1971                                         hba->clk_gating.state);
1972         }
1973 rel_lock:
1974         spin_unlock_irqrestore(hba->host->host_lock, flags);
1975 out:
1976         return;
1977 }
1978
1979 /* host lock must be held before calling this variant */
1980 static void __ufshcd_release(struct ufs_hba *hba)
1981 {
1982         if (!ufshcd_is_clkgating_allowed(hba))
1983                 return;
1984
1985         hba->clk_gating.active_reqs--;
1986
1987         if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended ||
1988             hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL ||
1989             hba->outstanding_tasks || !hba->clk_gating.is_initialized ||
1990             hba->active_uic_cmd || hba->uic_async_done ||
1991             hba->clk_gating.state == CLKS_OFF)
1992                 return;
1993
1994         hba->clk_gating.state = REQ_CLKS_OFF;
1995         trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
1996         queue_delayed_work(hba->clk_gating.clk_gating_workq,
1997                            &hba->clk_gating.gate_work,
1998                            msecs_to_jiffies(hba->clk_gating.delay_ms));
1999 }
2000
2001 void ufshcd_release(struct ufs_hba *hba)
2002 {
2003         unsigned long flags;
2004
2005         spin_lock_irqsave(hba->host->host_lock, flags);
2006         __ufshcd_release(hba);
2007         spin_unlock_irqrestore(hba->host->host_lock, flags);
2008 }
2009 EXPORT_SYMBOL_GPL(ufshcd_release);
2010
2011 static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
2012                 struct device_attribute *attr, char *buf)
2013 {
2014         struct ufs_hba *hba = dev_get_drvdata(dev);
2015
2016         return sysfs_emit(buf, "%lu\n", hba->clk_gating.delay_ms);
2017 }
2018
2019 void ufshcd_clkgate_delay_set(struct device *dev, unsigned long value)
2020 {
2021         struct ufs_hba *hba = dev_get_drvdata(dev);
2022         unsigned long flags;
2023
2024         spin_lock_irqsave(hba->host->host_lock, flags);
2025         hba->clk_gating.delay_ms = value;
2026         spin_unlock_irqrestore(hba->host->host_lock, flags);
2027 }
2028 EXPORT_SYMBOL_GPL(ufshcd_clkgate_delay_set);
2029
2030 static ssize_t ufshcd_clkgate_delay_store(struct device *dev,
2031                 struct device_attribute *attr, const char *buf, size_t count)
2032 {
2033         unsigned long value;
2034
2035         if (kstrtoul(buf, 0, &value))
2036                 return -EINVAL;
2037
2038         ufshcd_clkgate_delay_set(dev, value);
2039         return count;
2040 }
2041
2042 static ssize_t ufshcd_clkgate_enable_show(struct device *dev,
2043                 struct device_attribute *attr, char *buf)
2044 {
2045         struct ufs_hba *hba = dev_get_drvdata(dev);
2046
2047         return sysfs_emit(buf, "%d\n", hba->clk_gating.is_enabled);
2048 }
2049
2050 static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
2051                 struct device_attribute *attr, const char *buf, size_t count)
2052 {
2053         struct ufs_hba *hba = dev_get_drvdata(dev);
2054         unsigned long flags;
2055         u32 value;
2056
2057         if (kstrtou32(buf, 0, &value))
2058                 return -EINVAL;
2059
2060         value = !!value;
2061
2062         spin_lock_irqsave(hba->host->host_lock, flags);
2063         if (value == hba->clk_gating.is_enabled)
2064                 goto out;
2065
2066         if (value)
2067                 __ufshcd_release(hba);
2068         else
2069                 hba->clk_gating.active_reqs++;
2070
2071         hba->clk_gating.is_enabled = value;
2072 out:
2073         spin_unlock_irqrestore(hba->host->host_lock, flags);
2074         return count;
2075 }
2076
2077 static void ufshcd_init_clk_gating_sysfs(struct ufs_hba *hba)
2078 {
2079         hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
2080         hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
2081         sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
2082         hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
2083         hba->clk_gating.delay_attr.attr.mode = 0644;
2084         if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
2085                 dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
2086
2087         hba->clk_gating.enable_attr.show = ufshcd_clkgate_enable_show;
2088         hba->clk_gating.enable_attr.store = ufshcd_clkgate_enable_store;
2089         sysfs_attr_init(&hba->clk_gating.enable_attr.attr);
2090         hba->clk_gating.enable_attr.attr.name = "clkgate_enable";
2091         hba->clk_gating.enable_attr.attr.mode = 0644;
2092         if (device_create_file(hba->dev, &hba->clk_gating.enable_attr))
2093                 dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n");
2094 }
2095
2096 static void ufshcd_remove_clk_gating_sysfs(struct ufs_hba *hba)
2097 {
2098         if (hba->clk_gating.delay_attr.attr.name)
2099                 device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
2100         if (hba->clk_gating.enable_attr.attr.name)
2101                 device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
2102 }
2103
2104 static void ufshcd_init_clk_gating(struct ufs_hba *hba)
2105 {
2106         char wq_name[sizeof("ufs_clk_gating_00")];
2107
2108         if (!ufshcd_is_clkgating_allowed(hba))
2109                 return;
2110
2111         hba->clk_gating.state = CLKS_ON;
2112
2113         hba->clk_gating.delay_ms = 150;
2114         INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
2115         INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
2116
2117         snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_gating_%d",
2118                  hba->host->host_no);
2119         hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(wq_name,
2120                                         WQ_MEM_RECLAIM | WQ_HIGHPRI);
2121
2122         ufshcd_init_clk_gating_sysfs(hba);
2123
2124         hba->clk_gating.is_enabled = true;
2125         hba->clk_gating.is_initialized = true;
2126 }
2127
2128 static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
2129 {
2130         if (!hba->clk_gating.is_initialized)
2131                 return;
2132
2133         ufshcd_remove_clk_gating_sysfs(hba);
2134
2135         /* Ungate the clock if necessary. */
2136         ufshcd_hold(hba);
2137         hba->clk_gating.is_initialized = false;
2138         ufshcd_release(hba);
2139
2140         destroy_workqueue(hba->clk_gating.clk_gating_workq);
2141 }
2142
2143 static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
2144 {
2145         bool queue_resume_work = false;
2146         ktime_t curr_t = ktime_get();
2147         unsigned long flags;
2148
2149         if (!ufshcd_is_clkscaling_supported(hba))
2150                 return;
2151
2152         spin_lock_irqsave(hba->host->host_lock, flags);
2153         if (!hba->clk_scaling.active_reqs++)
2154                 queue_resume_work = true;
2155
2156         if (!hba->clk_scaling.is_enabled || hba->pm_op_in_progress) {
2157                 spin_unlock_irqrestore(hba->host->host_lock, flags);
2158                 return;
2159         }
2160
2161         if (queue_resume_work)
2162                 queue_work(hba->clk_scaling.workq,
2163                            &hba->clk_scaling.resume_work);
2164
2165         if (!hba->clk_scaling.window_start_t) {
2166                 hba->clk_scaling.window_start_t = curr_t;
2167                 hba->clk_scaling.tot_busy_t = 0;
2168                 hba->clk_scaling.is_busy_started = false;
2169         }
2170
2171         if (!hba->clk_scaling.is_busy_started) {
2172                 hba->clk_scaling.busy_start_t = curr_t;
2173                 hba->clk_scaling.is_busy_started = true;
2174         }
2175         spin_unlock_irqrestore(hba->host->host_lock, flags);
2176 }
2177
2178 static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
2179 {
2180         struct ufs_clk_scaling *scaling = &hba->clk_scaling;
2181         unsigned long flags;
2182
2183         if (!ufshcd_is_clkscaling_supported(hba))
2184                 return;
2185
2186         spin_lock_irqsave(hba->host->host_lock, flags);
2187         hba->clk_scaling.active_reqs--;
2188         if (!scaling->active_reqs && scaling->is_busy_started) {
2189                 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
2190                                         scaling->busy_start_t));
2191                 scaling->busy_start_t = 0;
2192                 scaling->is_busy_started = false;
2193         }
2194         spin_unlock_irqrestore(hba->host->host_lock, flags);
2195 }
2196
2197 static inline int ufshcd_monitor_opcode2dir(u8 opcode)
2198 {
2199         if (opcode == READ_6 || opcode == READ_10 || opcode == READ_16)
2200                 return READ;
2201         else if (opcode == WRITE_6 || opcode == WRITE_10 || opcode == WRITE_16)
2202                 return WRITE;
2203         else
2204                 return -EINVAL;
2205 }
2206
2207 static inline bool ufshcd_should_inform_monitor(struct ufs_hba *hba,
2208                                                 struct ufshcd_lrb *lrbp)
2209 {
2210         const struct ufs_hba_monitor *m = &hba->monitor;
2211
2212         return (m->enabled && lrbp && lrbp->cmd &&
2213                 (!m->chunk_size || m->chunk_size == lrbp->cmd->sdb.length) &&
2214                 ktime_before(hba->monitor.enabled_ts, lrbp->issue_time_stamp));
2215 }
2216
2217 static void ufshcd_start_monitor(struct ufs_hba *hba,
2218                                  const struct ufshcd_lrb *lrbp)
2219 {
2220         int dir = ufshcd_monitor_opcode2dir(*lrbp->cmd->cmnd);
2221         unsigned long flags;
2222
2223         spin_lock_irqsave(hba->host->host_lock, flags);
2224         if (dir >= 0 && hba->monitor.nr_queued[dir]++ == 0)
2225                 hba->monitor.busy_start_ts[dir] = ktime_get();
2226         spin_unlock_irqrestore(hba->host->host_lock, flags);
2227 }
2228
2229 static void ufshcd_update_monitor(struct ufs_hba *hba, const struct ufshcd_lrb *lrbp)
2230 {
2231         int dir = ufshcd_monitor_opcode2dir(*lrbp->cmd->cmnd);
2232         unsigned long flags;
2233
2234         spin_lock_irqsave(hba->host->host_lock, flags);
2235         if (dir >= 0 && hba->monitor.nr_queued[dir] > 0) {
2236                 const struct request *req = scsi_cmd_to_rq(lrbp->cmd);
2237                 struct ufs_hba_monitor *m = &hba->monitor;
2238                 ktime_t now, inc, lat;
2239
2240                 now = lrbp->compl_time_stamp;
2241                 inc = ktime_sub(now, m->busy_start_ts[dir]);
2242                 m->total_busy[dir] = ktime_add(m->total_busy[dir], inc);
2243                 m->nr_sec_rw[dir] += blk_rq_sectors(req);
2244
2245                 /* Update latencies */
2246                 m->nr_req[dir]++;
2247                 lat = ktime_sub(now, lrbp->issue_time_stamp);
2248                 m->lat_sum[dir] += lat;
2249                 if (m->lat_max[dir] < lat || !m->lat_max[dir])
2250                         m->lat_max[dir] = lat;
2251                 if (m->lat_min[dir] > lat || !m->lat_min[dir])
2252                         m->lat_min[dir] = lat;
2253
2254                 m->nr_queued[dir]--;
2255                 /* Push forward the busy start of monitor */
2256                 m->busy_start_ts[dir] = now;
2257         }
2258         spin_unlock_irqrestore(hba->host->host_lock, flags);
2259 }
2260
2261 /**
2262  * ufshcd_send_command - Send SCSI or device management commands
2263  * @hba: per adapter instance
2264  * @task_tag: Task tag of the command
2265  * @hwq: pointer to hardware queue instance
2266  */
2267 static inline
2268 void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag,
2269                          struct ufs_hw_queue *hwq)
2270 {
2271         struct ufshcd_lrb *lrbp = &hba->lrb[task_tag];
2272         unsigned long flags;
2273
2274         lrbp->issue_time_stamp = ktime_get();
2275         lrbp->issue_time_stamp_local_clock = local_clock();
2276         lrbp->compl_time_stamp = ktime_set(0, 0);
2277         lrbp->compl_time_stamp_local_clock = 0;
2278         ufshcd_add_command_trace(hba, task_tag, UFS_CMD_SEND);
2279         if (lrbp->cmd)
2280                 ufshcd_clk_scaling_start_busy(hba);
2281         if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
2282                 ufshcd_start_monitor(hba, lrbp);
2283
2284         if (is_mcq_enabled(hba)) {
2285                 int utrd_size = sizeof(struct utp_transfer_req_desc);
2286                 struct utp_transfer_req_desc *src = lrbp->utr_descriptor_ptr;
2287                 struct utp_transfer_req_desc *dest;
2288
2289                 spin_lock(&hwq->sq_lock);
2290                 dest = hwq->sqe_base_addr + hwq->sq_tail_slot;
2291                 memcpy(dest, src, utrd_size);
2292                 ufshcd_inc_sq_tail(hwq);
2293                 spin_unlock(&hwq->sq_lock);
2294         } else {
2295                 spin_lock_irqsave(&hba->outstanding_lock, flags);
2296                 if (hba->vops && hba->vops->setup_xfer_req)
2297                         hba->vops->setup_xfer_req(hba, lrbp->task_tag,
2298                                                   !!lrbp->cmd);
2299                 __set_bit(lrbp->task_tag, &hba->outstanding_reqs);
2300                 ufshcd_writel(hba, 1 << lrbp->task_tag,
2301                               REG_UTP_TRANSFER_REQ_DOOR_BELL);
2302                 spin_unlock_irqrestore(&hba->outstanding_lock, flags);
2303         }
2304 }
2305
2306 /**
2307  * ufshcd_copy_sense_data - Copy sense data in case of check condition
2308  * @lrbp: pointer to local reference block
2309  */
2310 static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
2311 {
2312         u8 *const sense_buffer = lrbp->cmd->sense_buffer;
2313         u16 resp_len;
2314         int len;
2315
2316         resp_len = be16_to_cpu(lrbp->ucd_rsp_ptr->header.data_segment_length);
2317         if (sense_buffer && resp_len) {
2318                 int len_to_copy;
2319
2320                 len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
2321                 len_to_copy = min_t(int, UFS_SENSE_SIZE, len);
2322
2323                 memcpy(sense_buffer, lrbp->ucd_rsp_ptr->sr.sense_data,
2324                        len_to_copy);
2325         }
2326 }
2327
2328 /**
2329  * ufshcd_copy_query_response() - Copy the Query Response and the data
2330  * descriptor
2331  * @hba: per adapter instance
2332  * @lrbp: pointer to local reference block
2333  *
2334  * Return: 0 upon success; < 0 upon failure.
2335  */
2336 static
2337 int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2338 {
2339         struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
2340
2341         memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
2342
2343         /* Get the descriptor */
2344         if (hba->dev_cmd.query.descriptor &&
2345             lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
2346                 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr +
2347                                 GENERAL_UPIU_REQUEST_SIZE;
2348                 u16 resp_len;
2349                 u16 buf_len;
2350
2351                 /* data segment length */
2352                 resp_len = be16_to_cpu(lrbp->ucd_rsp_ptr->header
2353                                        .data_segment_length);
2354                 buf_len = be16_to_cpu(
2355                                 hba->dev_cmd.query.request.upiu_req.length);
2356                 if (likely(buf_len >= resp_len)) {
2357                         memcpy(hba->dev_cmd.query.descriptor, descp, resp_len);
2358                 } else {
2359                         dev_warn(hba->dev,
2360                                  "%s: rsp size %d is bigger than buffer size %d",
2361                                  __func__, resp_len, buf_len);
2362                         return -EINVAL;
2363                 }
2364         }
2365
2366         return 0;
2367 }
2368
2369 /**
2370  * ufshcd_hba_capabilities - Read controller capabilities
2371  * @hba: per adapter instance
2372  *
2373  * Return: 0 on success, negative on error.
2374  */
2375 static inline int ufshcd_hba_capabilities(struct ufs_hba *hba)
2376 {
2377         int err;
2378
2379         hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
2380         if (hba->quirks & UFSHCD_QUIRK_BROKEN_64BIT_ADDRESS)
2381                 hba->capabilities &= ~MASK_64_ADDRESSING_SUPPORT;
2382
2383         /* nutrs and nutmrs are 0 based values */
2384         hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
2385         hba->nutmrs =
2386         ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
2387         hba->reserved_slot = hba->nutrs - 1;
2388
2389         /* Read crypto capabilities */
2390         err = ufshcd_hba_init_crypto_capabilities(hba);
2391         if (err) {
2392                 dev_err(hba->dev, "crypto setup failed\n");
2393                 return err;
2394         }
2395
2396         hba->mcq_sup = FIELD_GET(MASK_MCQ_SUPPORT, hba->capabilities);
2397         if (!hba->mcq_sup)
2398                 return 0;
2399
2400         hba->mcq_capabilities = ufshcd_readl(hba, REG_MCQCAP);
2401         hba->ext_iid_sup = FIELD_GET(MASK_EXT_IID_SUPPORT,
2402                                      hba->mcq_capabilities);
2403
2404         return 0;
2405 }
2406
2407 /**
2408  * ufshcd_ready_for_uic_cmd - Check if controller is ready
2409  *                            to accept UIC commands
2410  * @hba: per adapter instance
2411  *
2412  * Return: true on success, else false.
2413  */
2414 static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
2415 {
2416         u32 val;
2417         int ret = read_poll_timeout(ufshcd_readl, val, val & UIC_COMMAND_READY,
2418                                     500, UIC_CMD_TIMEOUT * 1000, false, hba,
2419                                     REG_CONTROLLER_STATUS);
2420         return ret == 0;
2421 }
2422
2423 /**
2424  * ufshcd_get_upmcrs - Get the power mode change request status
2425  * @hba: Pointer to adapter instance
2426  *
2427  * This function gets the UPMCRS field of HCS register
2428  *
2429  * Return: value of UPMCRS field.
2430  */
2431 static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
2432 {
2433         return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
2434 }
2435
2436 /**
2437  * ufshcd_dispatch_uic_cmd - Dispatch an UIC command to the Unipro layer
2438  * @hba: per adapter instance
2439  * @uic_cmd: UIC command
2440  */
2441 static inline void
2442 ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2443 {
2444         lockdep_assert_held(&hba->uic_cmd_mutex);
2445
2446         WARN_ON(hba->active_uic_cmd);
2447
2448         hba->active_uic_cmd = uic_cmd;
2449
2450         /* Write Args */
2451         ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
2452         ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
2453         ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
2454
2455         ufshcd_add_uic_command_trace(hba, uic_cmd, UFS_CMD_SEND);
2456
2457         /* Write UIC Cmd */
2458         ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
2459                       REG_UIC_COMMAND);
2460 }
2461
2462 /**
2463  * ufshcd_wait_for_uic_cmd - Wait for completion of an UIC command
2464  * @hba: per adapter instance
2465  * @uic_cmd: UIC command
2466  *
2467  * Return: 0 only if success.
2468  */
2469 static int
2470 ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2471 {
2472         int ret;
2473         unsigned long flags;
2474
2475         lockdep_assert_held(&hba->uic_cmd_mutex);
2476
2477         if (wait_for_completion_timeout(&uic_cmd->done,
2478                                         msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
2479                 ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
2480         } else {
2481                 ret = -ETIMEDOUT;
2482                 dev_err(hba->dev,
2483                         "uic cmd 0x%x with arg3 0x%x completion timeout\n",
2484                         uic_cmd->command, uic_cmd->argument3);
2485
2486                 if (!uic_cmd->cmd_active) {
2487                         dev_err(hba->dev, "%s: UIC cmd has been completed, return the result\n",
2488                                 __func__);
2489                         ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
2490                 }
2491         }
2492
2493         spin_lock_irqsave(hba->host->host_lock, flags);
2494         hba->active_uic_cmd = NULL;
2495         spin_unlock_irqrestore(hba->host->host_lock, flags);
2496
2497         return ret;
2498 }
2499
2500 /**
2501  * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2502  * @hba: per adapter instance
2503  * @uic_cmd: UIC command
2504  * @completion: initialize the completion only if this is set to true
2505  *
2506  * Return: 0 only if success.
2507  */
2508 static int
2509 __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
2510                       bool completion)
2511 {
2512         lockdep_assert_held(&hba->uic_cmd_mutex);
2513
2514         if (!ufshcd_ready_for_uic_cmd(hba)) {
2515                 dev_err(hba->dev,
2516                         "Controller not ready to accept UIC commands\n");
2517                 return -EIO;
2518         }
2519
2520         if (completion)
2521                 init_completion(&uic_cmd->done);
2522
2523         uic_cmd->cmd_active = 1;
2524         ufshcd_dispatch_uic_cmd(hba, uic_cmd);
2525
2526         return 0;
2527 }
2528
2529 /**
2530  * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2531  * @hba: per adapter instance
2532  * @uic_cmd: UIC command
2533  *
2534  * Return: 0 only if success.
2535  */
2536 int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2537 {
2538         int ret;
2539
2540         if (hba->quirks & UFSHCD_QUIRK_BROKEN_UIC_CMD)
2541                 return 0;
2542
2543         ufshcd_hold(hba);
2544         mutex_lock(&hba->uic_cmd_mutex);
2545         ufshcd_add_delay_before_dme_cmd(hba);
2546
2547         ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true);
2548         if (!ret)
2549                 ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
2550
2551         mutex_unlock(&hba->uic_cmd_mutex);
2552
2553         ufshcd_release(hba);
2554         return ret;
2555 }
2556
2557 /**
2558  * ufshcd_sgl_to_prdt - SG list to PRTD (Physical Region Description Table, 4DW format)
2559  * @hba:        per-adapter instance
2560  * @lrbp:       pointer to local reference block
2561  * @sg_entries: The number of sg lists actually used
2562  * @sg_list:    Pointer to SG list
2563  */
2564 static void ufshcd_sgl_to_prdt(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, int sg_entries,
2565                                struct scatterlist *sg_list)
2566 {
2567         struct ufshcd_sg_entry *prd;
2568         struct scatterlist *sg;
2569         int i;
2570
2571         if (sg_entries) {
2572
2573                 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
2574                         lrbp->utr_descriptor_ptr->prd_table_length =
2575                                 cpu_to_le16(sg_entries * ufshcd_sg_entry_size(hba));
2576                 else
2577                         lrbp->utr_descriptor_ptr->prd_table_length = cpu_to_le16(sg_entries);
2578
2579                 prd = lrbp->ucd_prdt_ptr;
2580
2581                 for_each_sg(sg_list, sg, sg_entries, i) {
2582                         const unsigned int len = sg_dma_len(sg);
2583
2584                         /*
2585                          * From the UFSHCI spec: "Data Byte Count (DBC): A '0'
2586                          * based value that indicates the length, in bytes, of
2587                          * the data block. A maximum of length of 256KB may
2588                          * exist for any entry. Bits 1:0 of this field shall be
2589                          * 11b to indicate Dword granularity. A value of '3'
2590                          * indicates 4 bytes, '7' indicates 8 bytes, etc."
2591                          */
2592                         WARN_ONCE(len > SZ_256K, "len = %#x\n", len);
2593                         prd->size = cpu_to_le32(len - 1);
2594                         prd->addr = cpu_to_le64(sg->dma_address);
2595                         prd->reserved = 0;
2596                         prd = (void *)prd + ufshcd_sg_entry_size(hba);
2597                 }
2598         } else {
2599                 lrbp->utr_descriptor_ptr->prd_table_length = 0;
2600         }
2601 }
2602
2603 /**
2604  * ufshcd_map_sg - Map scatter-gather list to prdt
2605  * @hba: per adapter instance
2606  * @lrbp: pointer to local reference block
2607  *
2608  * Return: 0 in case of success, non-zero value in case of failure.
2609  */
2610 static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2611 {
2612         struct scsi_cmnd *cmd = lrbp->cmd;
2613         int sg_segments = scsi_dma_map(cmd);
2614
2615         if (sg_segments < 0)
2616                 return sg_segments;
2617
2618         ufshcd_sgl_to_prdt(hba, lrbp, sg_segments, scsi_sglist(cmd));
2619
2620         return 0;
2621 }
2622
2623 /**
2624  * ufshcd_enable_intr - enable interrupts
2625  * @hba: per adapter instance
2626  * @intrs: interrupt bits
2627  */
2628 static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
2629 {
2630         u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
2631
2632         if (hba->ufs_version == ufshci_version(1, 0)) {
2633                 u32 rw;
2634                 rw = set & INTERRUPT_MASK_RW_VER_10;
2635                 set = rw | ((set ^ intrs) & intrs);
2636         } else {
2637                 set |= intrs;
2638         }
2639
2640         ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
2641 }
2642
2643 /**
2644  * ufshcd_disable_intr - disable interrupts
2645  * @hba: per adapter instance
2646  * @intrs: interrupt bits
2647  */
2648 static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
2649 {
2650         u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
2651
2652         if (hba->ufs_version == ufshci_version(1, 0)) {
2653                 u32 rw;
2654                 rw = (set & INTERRUPT_MASK_RW_VER_10) &
2655                         ~(intrs & INTERRUPT_MASK_RW_VER_10);
2656                 set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10);
2657
2658         } else {
2659                 set &= ~intrs;
2660         }
2661
2662         ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
2663 }
2664
2665 /**
2666  * ufshcd_prepare_req_desc_hdr - Fill UTP Transfer request descriptor header according to request
2667  * descriptor according to request
2668  * @lrbp: pointer to local reference block
2669  * @upiu_flags: flags required in the header
2670  * @cmd_dir: requests data direction
2671  * @ehs_length: Total EHS Length (in 32‐bytes units of all Extra Header Segments)
2672  */
2673 static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp, u8 *upiu_flags,
2674                                         enum dma_data_direction cmd_dir, int ehs_length)
2675 {
2676         struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
2677         struct request_desc_header *h = &req_desc->header;
2678         enum utp_data_direction data_direction;
2679
2680         *h = (typeof(*h)){ };
2681
2682         if (cmd_dir == DMA_FROM_DEVICE) {
2683                 data_direction = UTP_DEVICE_TO_HOST;
2684                 *upiu_flags = UPIU_CMD_FLAGS_READ;
2685         } else if (cmd_dir == DMA_TO_DEVICE) {
2686                 data_direction = UTP_HOST_TO_DEVICE;
2687                 *upiu_flags = UPIU_CMD_FLAGS_WRITE;
2688         } else {
2689                 data_direction = UTP_NO_DATA_TRANSFER;
2690                 *upiu_flags = UPIU_CMD_FLAGS_NONE;
2691         }
2692
2693         h->command_type = lrbp->command_type;
2694         h->data_direction = data_direction;
2695         h->ehs_length = ehs_length;
2696
2697         if (lrbp->intr_cmd)
2698                 h->interrupt = 1;
2699
2700         /* Prepare crypto related dwords */
2701         ufshcd_prepare_req_desc_hdr_crypto(lrbp, h);
2702
2703         /*
2704          * assigning invalid value for command status. Controller
2705          * updates OCS on command completion, with the command
2706          * status
2707          */
2708         h->ocs = OCS_INVALID_COMMAND_STATUS;
2709
2710         req_desc->prd_table_length = 0;
2711 }
2712
2713 /**
2714  * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc,
2715  * for scsi commands
2716  * @lrbp: local reference block pointer
2717  * @upiu_flags: flags
2718  */
2719 static
2720 void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u8 upiu_flags)
2721 {
2722         struct scsi_cmnd *cmd = lrbp->cmd;
2723         struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2724         unsigned short cdb_len;
2725
2726         ucd_req_ptr->header = (struct utp_upiu_header){
2727                 .transaction_code = UPIU_TRANSACTION_COMMAND,
2728                 .flags = upiu_flags,
2729                 .lun = lrbp->lun,
2730                 .task_tag = lrbp->task_tag,
2731                 .command_set_type = UPIU_COMMAND_SET_TYPE_SCSI,
2732         };
2733
2734         WARN_ON_ONCE(ucd_req_ptr->header.task_tag != lrbp->task_tag);
2735
2736         ucd_req_ptr->sc.exp_data_transfer_len = cpu_to_be32(cmd->sdb.length);
2737
2738         cdb_len = min_t(unsigned short, cmd->cmd_len, UFS_CDB_SIZE);
2739         memset(ucd_req_ptr->sc.cdb, 0, UFS_CDB_SIZE);
2740         memcpy(ucd_req_ptr->sc.cdb, cmd->cmnd, cdb_len);
2741
2742         memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2743 }
2744
2745 /**
2746  * ufshcd_prepare_utp_query_req_upiu() - fill the utp_transfer_req_desc for query request
2747  * @hba: UFS hba
2748  * @lrbp: local reference block pointer
2749  * @upiu_flags: flags
2750  */
2751 static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
2752                                 struct ufshcd_lrb *lrbp, u8 upiu_flags)
2753 {
2754         struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2755         struct ufs_query *query = &hba->dev_cmd.query;
2756         u16 len = be16_to_cpu(query->request.upiu_req.length);
2757
2758         /* Query request header */
2759         ucd_req_ptr->header = (struct utp_upiu_header){
2760                 .transaction_code = UPIU_TRANSACTION_QUERY_REQ,
2761                 .flags = upiu_flags,
2762                 .lun = lrbp->lun,
2763                 .task_tag = lrbp->task_tag,
2764                 .query_function = query->request.query_func,
2765                 /* Data segment length only need for WRITE_DESC */
2766                 .data_segment_length =
2767                         query->request.upiu_req.opcode ==
2768                                         UPIU_QUERY_OPCODE_WRITE_DESC ?
2769                                 cpu_to_be16(len) :
2770                                 0,
2771         };
2772
2773         /* Copy the Query Request buffer as is */
2774         memcpy(&ucd_req_ptr->qr, &query->request.upiu_req,
2775                         QUERY_OSF_SIZE);
2776
2777         /* Copy the Descriptor */
2778         if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
2779                 memcpy(ucd_req_ptr + 1, query->descriptor, len);
2780
2781         memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2782 }
2783
2784 static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
2785 {
2786         struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2787
2788         memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
2789
2790         ucd_req_ptr->header = (struct utp_upiu_header){
2791                 .transaction_code = UPIU_TRANSACTION_NOP_OUT,
2792                 .task_tag = lrbp->task_tag,
2793         };
2794
2795         memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2796 }
2797
2798 /**
2799  * ufshcd_compose_devman_upiu - UFS Protocol Information Unit(UPIU)
2800  *                           for Device Management Purposes
2801  * @hba: per adapter instance
2802  * @lrbp: pointer to local reference block
2803  *
2804  * Return: 0 upon success; < 0 upon failure.
2805  */
2806 static int ufshcd_compose_devman_upiu(struct ufs_hba *hba,
2807                                       struct ufshcd_lrb *lrbp)
2808 {
2809         u8 upiu_flags;
2810         int ret = 0;
2811
2812         if (hba->ufs_version <= ufshci_version(1, 1))
2813                 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
2814         else
2815                 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
2816
2817         ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE, 0);
2818         if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
2819                 ufshcd_prepare_utp_query_req_upiu(hba, lrbp, upiu_flags);
2820         else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
2821                 ufshcd_prepare_utp_nop_upiu(lrbp);
2822         else
2823                 ret = -EINVAL;
2824
2825         return ret;
2826 }
2827
2828 /**
2829  * ufshcd_comp_scsi_upiu - UFS Protocol Information Unit(UPIU)
2830  *                         for SCSI Purposes
2831  * @hba: per adapter instance
2832  * @lrbp: pointer to local reference block
2833  */
2834 static void ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2835 {
2836         struct request *rq = scsi_cmd_to_rq(lrbp->cmd);
2837         unsigned int ioprio_class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
2838         u8 upiu_flags;
2839
2840         if (hba->ufs_version <= ufshci_version(1, 1))
2841                 lrbp->command_type = UTP_CMD_TYPE_SCSI;
2842         else
2843                 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
2844
2845         ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
2846                                     lrbp->cmd->sc_data_direction, 0);
2847         if (ioprio_class == IOPRIO_CLASS_RT)
2848                 upiu_flags |= UPIU_CMD_FLAGS_CP;
2849         ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
2850 }
2851
2852 /**
2853  * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID
2854  * @upiu_wlun_id: UPIU W-LUN id
2855  *
2856  * Return: SCSI W-LUN id.
2857  */
2858 static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
2859 {
2860         return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE;
2861 }
2862
2863 static inline bool is_device_wlun(struct scsi_device *sdev)
2864 {
2865         return sdev->lun ==
2866                 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN);
2867 }
2868
2869 /*
2870  * Associate the UFS controller queue with the default and poll HCTX types.
2871  * Initialize the mq_map[] arrays.
2872  */
2873 static void ufshcd_map_queues(struct Scsi_Host *shost)
2874 {
2875         struct ufs_hba *hba = shost_priv(shost);
2876         int i, queue_offset = 0;
2877
2878         if (!is_mcq_supported(hba)) {
2879                 hba->nr_queues[HCTX_TYPE_DEFAULT] = 1;
2880                 hba->nr_queues[HCTX_TYPE_READ] = 0;
2881                 hba->nr_queues[HCTX_TYPE_POLL] = 1;
2882                 hba->nr_hw_queues = 1;
2883         }
2884
2885         for (i = 0; i < shost->nr_maps; i++) {
2886                 struct blk_mq_queue_map *map = &shost->tag_set.map[i];
2887
2888                 map->nr_queues = hba->nr_queues[i];
2889                 if (!map->nr_queues)
2890                         continue;
2891                 map->queue_offset = queue_offset;
2892                 if (i == HCTX_TYPE_POLL && !is_mcq_supported(hba))
2893                         map->queue_offset = 0;
2894
2895                 blk_mq_map_queues(map);
2896                 queue_offset += map->nr_queues;
2897         }
2898 }
2899
2900 static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i)
2901 {
2902         struct utp_transfer_cmd_desc *cmd_descp = (void *)hba->ucdl_base_addr +
2903                 i * ufshcd_get_ucd_size(hba);
2904         struct utp_transfer_req_desc *utrdlp = hba->utrdl_base_addr;
2905         dma_addr_t cmd_desc_element_addr = hba->ucdl_dma_addr +
2906                 i * ufshcd_get_ucd_size(hba);
2907         u16 response_offset = offsetof(struct utp_transfer_cmd_desc,
2908                                        response_upiu);
2909         u16 prdt_offset = offsetof(struct utp_transfer_cmd_desc, prd_table);
2910
2911         lrb->utr_descriptor_ptr = utrdlp + i;
2912         lrb->utrd_dma_addr = hba->utrdl_dma_addr +
2913                 i * sizeof(struct utp_transfer_req_desc);
2914         lrb->ucd_req_ptr = (struct utp_upiu_req *)cmd_descp->command_upiu;
2915         lrb->ucd_req_dma_addr = cmd_desc_element_addr;
2916         lrb->ucd_rsp_ptr = (struct utp_upiu_rsp *)cmd_descp->response_upiu;
2917         lrb->ucd_rsp_dma_addr = cmd_desc_element_addr + response_offset;
2918         lrb->ucd_prdt_ptr = (struct ufshcd_sg_entry *)cmd_descp->prd_table;
2919         lrb->ucd_prdt_dma_addr = cmd_desc_element_addr + prdt_offset;
2920 }
2921
2922 /**
2923  * ufshcd_queuecommand - main entry point for SCSI requests
2924  * @host: SCSI host pointer
2925  * @cmd: command from SCSI Midlayer
2926  *
2927  * Return: 0 for success, non-zero in case of failure.
2928  */
2929 static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
2930 {
2931         struct ufs_hba *hba = shost_priv(host);
2932         int tag = scsi_cmd_to_rq(cmd)->tag;
2933         struct ufshcd_lrb *lrbp;
2934         int err = 0;
2935         struct ufs_hw_queue *hwq = NULL;
2936
2937         switch (hba->ufshcd_state) {
2938         case UFSHCD_STATE_OPERATIONAL:
2939                 break;
2940         case UFSHCD_STATE_EH_SCHEDULED_NON_FATAL:
2941                 /*
2942                  * SCSI error handler can call ->queuecommand() while UFS error
2943                  * handler is in progress. Error interrupts could change the
2944                  * state from UFSHCD_STATE_RESET to
2945                  * UFSHCD_STATE_EH_SCHEDULED_NON_FATAL. Prevent requests
2946                  * being issued in that case.
2947                  */
2948                 if (ufshcd_eh_in_progress(hba)) {
2949                         err = SCSI_MLQUEUE_HOST_BUSY;
2950                         goto out;
2951                 }
2952                 break;
2953         case UFSHCD_STATE_EH_SCHEDULED_FATAL:
2954                 /*
2955                  * pm_runtime_get_sync() is used at error handling preparation
2956                  * stage. If a scsi cmd, e.g. the SSU cmd, is sent from hba's
2957                  * PM ops, it can never be finished if we let SCSI layer keep
2958                  * retrying it, which gets err handler stuck forever. Neither
2959                  * can we let the scsi cmd pass through, because UFS is in bad
2960                  * state, the scsi cmd may eventually time out, which will get
2961                  * err handler blocked for too long. So, just fail the scsi cmd
2962                  * sent from PM ops, err handler can recover PM error anyways.
2963                  */
2964                 if (hba->pm_op_in_progress) {
2965                         hba->force_reset = true;
2966                         set_host_byte(cmd, DID_BAD_TARGET);
2967                         scsi_done(cmd);
2968                         goto out;
2969                 }
2970                 fallthrough;
2971         case UFSHCD_STATE_RESET:
2972                 err = SCSI_MLQUEUE_HOST_BUSY;
2973                 goto out;
2974         case UFSHCD_STATE_ERROR:
2975                 set_host_byte(cmd, DID_ERROR);
2976                 scsi_done(cmd);
2977                 goto out;
2978         }
2979
2980         hba->req_abort_count = 0;
2981
2982         ufshcd_hold(hba);
2983
2984         lrbp = &hba->lrb[tag];
2985         lrbp->cmd = cmd;
2986         lrbp->task_tag = tag;
2987         lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
2988         lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba);
2989
2990         ufshcd_prepare_lrbp_crypto(scsi_cmd_to_rq(cmd), lrbp);
2991
2992         lrbp->req_abort_skip = false;
2993
2994         ufshcd_comp_scsi_upiu(hba, lrbp);
2995
2996         err = ufshcd_map_sg(hba, lrbp);
2997         if (err) {
2998                 ufshcd_release(hba);
2999                 goto out;
3000         }
3001
3002         if (is_mcq_enabled(hba))
3003                 hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
3004
3005         ufshcd_send_command(hba, tag, hwq);
3006
3007 out:
3008         if (ufs_trigger_eh(hba)) {
3009                 unsigned long flags;
3010
3011                 spin_lock_irqsave(hba->host->host_lock, flags);
3012                 ufshcd_schedule_eh_work(hba);
3013                 spin_unlock_irqrestore(hba->host->host_lock, flags);
3014         }
3015
3016         return err;
3017 }
3018
3019 static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
3020                 struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
3021 {
3022         lrbp->cmd = NULL;
3023         lrbp->task_tag = tag;
3024         lrbp->lun = 0; /* device management cmd is not specific to any LUN */
3025         lrbp->intr_cmd = true; /* No interrupt aggregation */
3026         ufshcd_prepare_lrbp_crypto(NULL, lrbp);
3027         hba->dev_cmd.type = cmd_type;
3028
3029         return ufshcd_compose_devman_upiu(hba, lrbp);
3030 }
3031
3032 /*
3033  * Check with the block layer if the command is inflight
3034  * @cmd: command to check.
3035  *
3036  * Return: true if command is inflight; false if not.
3037  */
3038 bool ufshcd_cmd_inflight(struct scsi_cmnd *cmd)
3039 {
3040         struct request *rq;
3041
3042         if (!cmd)
3043                 return false;
3044
3045         rq = scsi_cmd_to_rq(cmd);
3046         if (!blk_mq_request_started(rq))
3047                 return false;
3048
3049         return true;
3050 }
3051
3052 /*
3053  * Clear the pending command in the controller and wait until
3054  * the controller confirms that the command has been cleared.
3055  * @hba: per adapter instance
3056  * @task_tag: The tag number of the command to be cleared.
3057  */
3058 static int ufshcd_clear_cmd(struct ufs_hba *hba, u32 task_tag)
3059 {
3060         u32 mask;
3061         unsigned long flags;
3062         int err;
3063
3064         if (is_mcq_enabled(hba)) {
3065                 /*
3066                  * MCQ mode. Clean up the MCQ resources similar to
3067                  * what the ufshcd_utrl_clear() does for SDB mode.
3068                  */
3069                 err = ufshcd_mcq_sq_cleanup(hba, task_tag);
3070                 if (err) {
3071                         dev_err(hba->dev, "%s: failed tag=%d. err=%d\n",
3072                                 __func__, task_tag, err);
3073                         return err;
3074                 }
3075                 return 0;
3076         }
3077
3078         mask = 1U << task_tag;
3079
3080         /* clear outstanding transaction before retry */
3081         spin_lock_irqsave(hba->host->host_lock, flags);
3082         ufshcd_utrl_clear(hba, mask);
3083         spin_unlock_irqrestore(hba->host->host_lock, flags);
3084
3085         /*
3086          * wait for h/w to clear corresponding bit in door-bell.
3087          * max. wait is 1 sec.
3088          */
3089         return ufshcd_wait_for_register(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL,
3090                                         mask, ~mask, 1000, 1000);
3091 }
3092
3093 /**
3094  * ufshcd_dev_cmd_completion() - handles device management command responses
3095  * @hba: per adapter instance
3096  * @lrbp: pointer to local reference block
3097  *
3098  * Return: 0 upon success; < 0 upon failure.
3099  */
3100 static int
3101 ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
3102 {
3103         enum upiu_response_transaction resp;
3104         int err = 0;
3105
3106         hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
3107         resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
3108
3109         switch (resp) {
3110         case UPIU_TRANSACTION_NOP_IN:
3111                 if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) {
3112                         err = -EINVAL;
3113                         dev_err(hba->dev, "%s: unexpected response %x\n",
3114                                         __func__, resp);
3115                 }
3116                 break;
3117         case UPIU_TRANSACTION_QUERY_RSP: {
3118                 u8 response = lrbp->ucd_rsp_ptr->header.response;
3119
3120                 if (response == 0)
3121                         err = ufshcd_copy_query_response(hba, lrbp);
3122                 break;
3123         }
3124         case UPIU_TRANSACTION_REJECT_UPIU:
3125                 /* TODO: handle Reject UPIU Response */
3126                 err = -EPERM;
3127                 dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
3128                                 __func__);
3129                 break;
3130         case UPIU_TRANSACTION_RESPONSE:
3131                 if (hba->dev_cmd.type != DEV_CMD_TYPE_RPMB) {
3132                         err = -EINVAL;
3133                         dev_err(hba->dev, "%s: unexpected response %x\n", __func__, resp);
3134                 }
3135                 break;
3136         default:
3137                 err = -EINVAL;
3138                 dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
3139                                 __func__, resp);
3140                 break;
3141         }
3142
3143         return err;
3144 }
3145
3146 static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
3147                 struct ufshcd_lrb *lrbp, int max_timeout)
3148 {
3149         unsigned long time_left = msecs_to_jiffies(max_timeout);
3150         unsigned long flags;
3151         bool pending;
3152         int err;
3153
3154 retry:
3155         time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
3156                                                 time_left);
3157
3158         if (likely(time_left)) {
3159                 /*
3160                  * The completion handler called complete() and the caller of
3161                  * this function still owns the @lrbp tag so the code below does
3162                  * not trigger any race conditions.
3163                  */
3164                 hba->dev_cmd.complete = NULL;
3165                 err = ufshcd_get_tr_ocs(lrbp, NULL);
3166                 if (!err)
3167                         err = ufshcd_dev_cmd_completion(hba, lrbp);
3168         } else {
3169                 err = -ETIMEDOUT;
3170                 dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
3171                         __func__, lrbp->task_tag);
3172
3173                 /* MCQ mode */
3174                 if (is_mcq_enabled(hba)) {
3175                         err = ufshcd_clear_cmd(hba, lrbp->task_tag);
3176                         hba->dev_cmd.complete = NULL;
3177                         return err;
3178                 }
3179
3180                 /* SDB mode */
3181                 if (ufshcd_clear_cmd(hba, lrbp->task_tag) == 0) {
3182                         /* successfully cleared the command, retry if needed */
3183                         err = -EAGAIN;
3184                         /*
3185                          * Since clearing the command succeeded we also need to
3186                          * clear the task tag bit from the outstanding_reqs
3187                          * variable.
3188                          */
3189                         spin_lock_irqsave(&hba->outstanding_lock, flags);
3190                         pending = test_bit(lrbp->task_tag,
3191                                            &hba->outstanding_reqs);
3192                         if (pending) {
3193                                 hba->dev_cmd.complete = NULL;
3194                                 __clear_bit(lrbp->task_tag,
3195                                             &hba->outstanding_reqs);
3196                         }
3197                         spin_unlock_irqrestore(&hba->outstanding_lock, flags);
3198
3199                         if (!pending) {
3200                                 /*
3201                                  * The completion handler ran while we tried to
3202                                  * clear the command.
3203                                  */
3204                                 time_left = 1;
3205                                 goto retry;
3206                         }
3207                 } else {
3208                         dev_err(hba->dev, "%s: failed to clear tag %d\n",
3209                                 __func__, lrbp->task_tag);
3210
3211                         spin_lock_irqsave(&hba->outstanding_lock, flags);
3212                         pending = test_bit(lrbp->task_tag,
3213                                            &hba->outstanding_reqs);
3214                         if (pending)
3215                                 hba->dev_cmd.complete = NULL;
3216                         spin_unlock_irqrestore(&hba->outstanding_lock, flags);
3217
3218                         if (!pending) {
3219                                 /*
3220                                  * The completion handler ran while we tried to
3221                                  * clear the command.
3222                                  */
3223                                 time_left = 1;
3224                                 goto retry;
3225                         }
3226                 }
3227         }
3228
3229         return err;
3230 }
3231
3232 /**
3233  * ufshcd_exec_dev_cmd - API for sending device management requests
3234  * @hba: UFS hba
3235  * @cmd_type: specifies the type (NOP, Query...)
3236  * @timeout: timeout in milliseconds
3237  *
3238  * Return: 0 upon success; < 0 upon failure.
3239  *
3240  * NOTE: Since there is only one available tag for device management commands,
3241  * it is expected you hold the hba->dev_cmd.lock mutex.
3242  */
3243 static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
3244                 enum dev_cmd_type cmd_type, int timeout)
3245 {
3246         DECLARE_COMPLETION_ONSTACK(wait);
3247         const u32 tag = hba->reserved_slot;
3248         struct ufshcd_lrb *lrbp;
3249         int err;
3250
3251         /* Protects use of hba->reserved_slot. */
3252         lockdep_assert_held(&hba->dev_cmd.lock);
3253
3254         down_read(&hba->clk_scaling_lock);
3255
3256         lrbp = &hba->lrb[tag];
3257         lrbp->cmd = NULL;
3258         err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
3259         if (unlikely(err))
3260                 goto out;
3261
3262         hba->dev_cmd.complete = &wait;
3263
3264         ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
3265
3266         ufshcd_send_command(hba, tag, hba->dev_cmd_queue);
3267         err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
3268         ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP,
3269                                     (struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
3270
3271 out:
3272         up_read(&hba->clk_scaling_lock);
3273         return err;
3274 }
3275
3276 /**
3277  * ufshcd_init_query() - init the query response and request parameters
3278  * @hba: per-adapter instance
3279  * @request: address of the request pointer to be initialized
3280  * @response: address of the response pointer to be initialized
3281  * @opcode: operation to perform
3282  * @idn: flag idn to access
3283  * @index: LU number to access
3284  * @selector: query/flag/descriptor further identification
3285  */
3286 static inline void ufshcd_init_query(struct ufs_hba *hba,
3287                 struct ufs_query_req **request, struct ufs_query_res **response,
3288                 enum query_opcode opcode, u8 idn, u8 index, u8 selector)
3289 {
3290         *request = &hba->dev_cmd.query.request;
3291         *response = &hba->dev_cmd.query.response;
3292         memset(*request, 0, sizeof(struct ufs_query_req));
3293         memset(*response, 0, sizeof(struct ufs_query_res));
3294         (*request)->upiu_req.opcode = opcode;
3295         (*request)->upiu_req.idn = idn;
3296         (*request)->upiu_req.index = index;
3297         (*request)->upiu_req.selector = selector;
3298 }
3299
3300 static int ufshcd_query_flag_retry(struct ufs_hba *hba,
3301         enum query_opcode opcode, enum flag_idn idn, u8 index, bool *flag_res)
3302 {
3303         int ret;
3304         int retries;
3305
3306         for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) {
3307                 ret = ufshcd_query_flag(hba, opcode, idn, index, flag_res);
3308                 if (ret)
3309                         dev_dbg(hba->dev,
3310                                 "%s: failed with error %d, retries %d\n",
3311                                 __func__, ret, retries);
3312                 else
3313                         break;
3314         }
3315
3316         if (ret)
3317                 dev_err(hba->dev,
3318                         "%s: query flag, opcode %d, idn %d, failed with error %d after %d retries\n",
3319                         __func__, opcode, idn, ret, retries);
3320         return ret;
3321 }
3322
3323 /**
3324  * ufshcd_query_flag() - API function for sending flag query requests
3325  * @hba: per-adapter instance
3326  * @opcode: flag query to perform
3327  * @idn: flag idn to access
3328  * @index: flag index to access
3329  * @flag_res: the flag value after the query request completes
3330  *
3331  * Return: 0 for success, non-zero in case of failure.
3332  */
3333 int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
3334                         enum flag_idn idn, u8 index, bool *flag_res)
3335 {
3336         struct ufs_query_req *request = NULL;
3337         struct ufs_query_res *response = NULL;
3338         int err, selector = 0;
3339         int timeout = QUERY_REQ_TIMEOUT;
3340
3341         BUG_ON(!hba);
3342
3343         ufshcd_hold(hba);
3344         mutex_lock(&hba->dev_cmd.lock);
3345         ufshcd_init_query(hba, &request, &response, opcode, idn, index,
3346                         selector);
3347
3348         switch (opcode) {
3349         case UPIU_QUERY_OPCODE_SET_FLAG:
3350         case UPIU_QUERY_OPCODE_CLEAR_FLAG:
3351         case UPIU_QUERY_OPCODE_TOGGLE_FLAG:
3352                 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
3353                 break;
3354         case UPIU_QUERY_OPCODE_READ_FLAG:
3355                 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
3356                 if (!flag_res) {
3357                         /* No dummy reads */
3358                         dev_err(hba->dev, "%s: Invalid argument for read request\n",
3359                                         __func__);
3360                         err = -EINVAL;
3361                         goto out_unlock;
3362                 }
3363                 break;
3364         default:
3365                 dev_err(hba->dev,
3366                         "%s: Expected query flag opcode but got = %d\n",
3367                         __func__, opcode);
3368                 err = -EINVAL;
3369                 goto out_unlock;
3370         }
3371
3372         err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
3373
3374         if (err) {
3375                 dev_err(hba->dev,
3376                         "%s: Sending flag query for idn %d failed, err = %d\n",
3377                         __func__, idn, err);
3378                 goto out_unlock;
3379         }
3380
3381         if (flag_res)
3382                 *flag_res = (be32_to_cpu(response->upiu_res.value) &
3383                                 MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
3384
3385 out_unlock:
3386         mutex_unlock(&hba->dev_cmd.lock);
3387         ufshcd_release(hba);
3388         return err;
3389 }
3390
3391 /**
3392  * ufshcd_query_attr - API function for sending attribute requests
3393  * @hba: per-adapter instance
3394  * @opcode: attribute opcode
3395  * @idn: attribute idn to access
3396  * @index: index field
3397  * @selector: selector field
3398  * @attr_val: the attribute value after the query request completes
3399  *
3400  * Return: 0 for success, non-zero in case of failure.
3401 */
3402 int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
3403                       enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
3404 {
3405         struct ufs_query_req *request = NULL;
3406         struct ufs_query_res *response = NULL;
3407         int err;
3408
3409         BUG_ON(!hba);
3410
3411         if (!attr_val) {
3412                 dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
3413                                 __func__, opcode);
3414                 return -EINVAL;
3415         }
3416
3417         ufshcd_hold(hba);
3418
3419         mutex_lock(&hba->dev_cmd.lock);
3420         ufshcd_init_query(hba, &request, &response, opcode, idn, index,
3421                         selector);
3422
3423         switch (opcode) {
3424         case UPIU_QUERY_OPCODE_WRITE_ATTR:
3425                 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
3426                 request->upiu_req.value = cpu_to_be32(*attr_val);
3427                 break;
3428         case UPIU_QUERY_OPCODE_READ_ATTR:
3429                 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
3430                 break;
3431         default:
3432                 dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n",
3433                                 __func__, opcode);
3434                 err = -EINVAL;
3435                 goto out_unlock;
3436         }
3437
3438         err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
3439
3440         if (err) {
3441                 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
3442                                 __func__, opcode, idn, index, err);
3443                 goto out_unlock;
3444         }
3445
3446         *attr_val = be32_to_cpu(response->upiu_res.value);
3447
3448 out_unlock:
3449         mutex_unlock(&hba->dev_cmd.lock);
3450         ufshcd_release(hba);
3451         return err;
3452 }
3453
3454 /**
3455  * ufshcd_query_attr_retry() - API function for sending query
3456  * attribute with retries
3457  * @hba: per-adapter instance
3458  * @opcode: attribute opcode
3459  * @idn: attribute idn to access
3460  * @index: index field
3461  * @selector: selector field
3462  * @attr_val: the attribute value after the query request
3463  * completes
3464  *
3465  * Return: 0 for success, non-zero in case of failure.
3466 */
3467 int ufshcd_query_attr_retry(struct ufs_hba *hba,
3468         enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
3469         u32 *attr_val)
3470 {
3471         int ret = 0;
3472         u32 retries;
3473
3474         for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
3475                 ret = ufshcd_query_attr(hba, opcode, idn, index,
3476                                                 selector, attr_val);
3477                 if (ret)
3478                         dev_dbg(hba->dev, "%s: failed with error %d, retries %d\n",
3479                                 __func__, ret, retries);
3480                 else
3481                         break;
3482         }
3483
3484         if (ret)
3485                 dev_err(hba->dev,
3486                         "%s: query attribute, idn %d, failed with error %d after %d retries\n",
3487                         __func__, idn, ret, QUERY_REQ_RETRIES);
3488         return ret;
3489 }
3490
3491 static int __ufshcd_query_descriptor(struct ufs_hba *hba,
3492                         enum query_opcode opcode, enum desc_idn idn, u8 index,
3493                         u8 selector, u8 *desc_buf, int *buf_len)
3494 {
3495         struct ufs_query_req *request = NULL;
3496         struct ufs_query_res *response = NULL;
3497         int err;
3498
3499         BUG_ON(!hba);
3500
3501         if (!desc_buf) {
3502                 dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
3503                                 __func__, opcode);
3504                 return -EINVAL;
3505         }
3506
3507         if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
3508                 dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
3509                                 __func__, *buf_len);
3510                 return -EINVAL;
3511         }
3512
3513         ufshcd_hold(hba);
3514
3515         mutex_lock(&hba->dev_cmd.lock);
3516         ufshcd_init_query(hba, &request, &response, opcode, idn, index,
3517                         selector);
3518         hba->dev_cmd.query.descriptor = desc_buf;
3519         request->upiu_req.length = cpu_to_be16(*buf_len);
3520
3521         switch (opcode) {
3522         case UPIU_QUERY_OPCODE_WRITE_DESC:
3523                 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
3524                 break;
3525         case UPIU_QUERY_OPCODE_READ_DESC:
3526                 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
3527                 break;
3528         default:
3529                 dev_err(hba->dev,
3530                                 "%s: Expected query descriptor opcode but got = 0x%.2x\n",
3531                                 __func__, opcode);
3532                 err = -EINVAL;
3533                 goto out_unlock;
3534         }
3535
3536         err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
3537
3538         if (err) {
3539                 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
3540                                 __func__, opcode, idn, index, err);
3541                 goto out_unlock;
3542         }
3543
3544         *buf_len = be16_to_cpu(response->upiu_res.length);
3545
3546 out_unlock:
3547         hba->dev_cmd.query.descriptor = NULL;
3548         mutex_unlock(&hba->dev_cmd.lock);
3549         ufshcd_release(hba);
3550         return err;
3551 }
3552
3553 /**
3554  * ufshcd_query_descriptor_retry - API function for sending descriptor requests
3555  * @hba: per-adapter instance
3556  * @opcode: attribute opcode
3557  * @idn: attribute idn to access
3558  * @index: index field
3559  * @selector: selector field
3560  * @desc_buf: the buffer that contains the descriptor
3561  * @buf_len: length parameter passed to the device
3562  *
3563  * The buf_len parameter will contain, on return, the length parameter
3564  * received on the response.
3565  *
3566  * Return: 0 for success, non-zero in case of failure.
3567  */
3568 int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
3569                                   enum query_opcode opcode,
3570                                   enum desc_idn idn, u8 index,
3571                                   u8 selector,
3572                                   u8 *desc_buf, int *buf_len)
3573 {
3574         int err;
3575         int retries;
3576
3577         for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
3578                 err = __ufshcd_query_descriptor(hba, opcode, idn, index,
3579                                                 selector, desc_buf, buf_len);
3580                 if (!err || err == -EINVAL)
3581                         break;
3582         }
3583
3584         return err;
3585 }
3586
3587 /**
3588  * ufshcd_read_desc_param - read the specified descriptor parameter
3589  * @hba: Pointer to adapter instance
3590  * @desc_id: descriptor idn value
3591  * @desc_index: descriptor index
3592  * @param_offset: offset of the parameter to read
3593  * @param_read_buf: pointer to buffer where parameter would be read
3594  * @param_size: sizeof(param_read_buf)
3595  *
3596  * Return: 0 in case of success, non-zero otherwise.
3597  */
3598 int ufshcd_read_desc_param(struct ufs_hba *hba,
3599                            enum desc_idn desc_id,
3600                            int desc_index,
3601                            u8 param_offset,
3602                            u8 *param_read_buf,
3603                            u8 param_size)
3604 {
3605         int ret;
3606         u8 *desc_buf;
3607         int buff_len = QUERY_DESC_MAX_SIZE;
3608         bool is_kmalloc = true;
3609
3610         /* Safety check */
3611         if (desc_id >= QUERY_DESC_IDN_MAX || !param_size)
3612                 return -EINVAL;
3613
3614         /* Check whether we need temp memory */
3615         if (param_offset != 0 || param_size < buff_len) {
3616                 desc_buf = kzalloc(buff_len, GFP_KERNEL);
3617                 if (!desc_buf)
3618                         return -ENOMEM;
3619         } else {
3620                 desc_buf = param_read_buf;
3621                 is_kmalloc = false;
3622         }
3623
3624         /* Request for full descriptor */
3625         ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
3626                                             desc_id, desc_index, 0,
3627                                             desc_buf, &buff_len);
3628         if (ret) {
3629                 dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d\n",
3630                         __func__, desc_id, desc_index, param_offset, ret);
3631                 goto out;
3632         }
3633
3634         /* Update descriptor length */
3635         buff_len = desc_buf[QUERY_DESC_LENGTH_OFFSET];
3636
3637         if (param_offset >= buff_len) {
3638                 dev_err(hba->dev, "%s: Invalid offset 0x%x in descriptor IDN 0x%x, length 0x%x\n",
3639                         __func__, param_offset, desc_id, buff_len);
3640                 ret = -EINVAL;
3641                 goto out;
3642         }
3643
3644         /* Sanity check */
3645         if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
3646                 dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header\n",
3647                         __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]);
3648                 ret = -EINVAL;
3649                 goto out;
3650         }
3651
3652         if (is_kmalloc) {
3653                 /* Make sure we don't copy more data than available */
3654                 if (param_offset >= buff_len)
3655                         ret = -EINVAL;
3656                 else
3657                         memcpy(param_read_buf, &desc_buf[param_offset],
3658                                min_t(u32, param_size, buff_len - param_offset));
3659         }
3660 out:
3661         if (is_kmalloc)
3662                 kfree(desc_buf);
3663         return ret;
3664 }
3665
3666 /**
3667  * struct uc_string_id - unicode string
3668  *
3669  * @len: size of this descriptor inclusive
3670  * @type: descriptor type
3671  * @uc: unicode string character
3672  */
3673 struct uc_string_id {
3674         u8 len;
3675         u8 type;
3676         wchar_t uc[];
3677 } __packed;
3678
3679 /* replace non-printable or non-ASCII characters with spaces */
3680 static inline char ufshcd_remove_non_printable(u8 ch)
3681 {
3682         return (ch >= 0x20 && ch <= 0x7e) ? ch : ' ';
3683 }
3684
3685 /**
3686  * ufshcd_read_string_desc - read string descriptor
3687  * @hba: pointer to adapter instance
3688  * @desc_index: descriptor index
3689  * @buf: pointer to buffer where descriptor would be read,
3690  *       the caller should free the memory.
3691  * @ascii: if true convert from unicode to ascii characters
3692  *         null terminated string.
3693  *
3694  * Return:
3695  * *      string size on success.
3696  * *      -ENOMEM: on allocation failure
3697  * *      -EINVAL: on a wrong parameter
3698  */
3699 int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
3700                             u8 **buf, bool ascii)
3701 {
3702         struct uc_string_id *uc_str;
3703         u8 *str;
3704         int ret;
3705
3706         if (!buf)
3707                 return -EINVAL;
3708
3709         uc_str = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
3710         if (!uc_str)
3711                 return -ENOMEM;
3712
3713         ret = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_STRING, desc_index, 0,
3714                                      (u8 *)uc_str, QUERY_DESC_MAX_SIZE);
3715         if (ret < 0) {
3716                 dev_err(hba->dev, "Reading String Desc failed after %d retries. err = %d\n",
3717                         QUERY_REQ_RETRIES, ret);
3718                 str = NULL;
3719                 goto out;
3720         }
3721
3722         if (uc_str->len <= QUERY_DESC_HDR_SIZE) {
3723                 dev_dbg(hba->dev, "String Desc is of zero length\n");
3724                 str = NULL;
3725                 ret = 0;
3726                 goto out;
3727         }
3728
3729         if (ascii) {
3730                 ssize_t ascii_len;
3731                 int i;
3732                 /* remove header and divide by 2 to move from UTF16 to UTF8 */
3733                 ascii_len = (uc_str->len - QUERY_DESC_HDR_SIZE) / 2 + 1;
3734                 str = kzalloc(ascii_len, GFP_KERNEL);
3735                 if (!str) {
3736                         ret = -ENOMEM;
3737                         goto out;
3738                 }
3739
3740                 /*
3741                  * the descriptor contains string in UTF16 format
3742                  * we need to convert to utf-8 so it can be displayed
3743                  */
3744                 ret = utf16s_to_utf8s(uc_str->uc,
3745                                       uc_str->len - QUERY_DESC_HDR_SIZE,
3746                                       UTF16_BIG_ENDIAN, str, ascii_len - 1);
3747
3748                 /* replace non-printable or non-ASCII characters with spaces */
3749                 for (i = 0; i < ret; i++)
3750                         str[i] = ufshcd_remove_non_printable(str[i]);
3751
3752                 str[ret++] = '\0';
3753
3754         } else {
3755                 str = kmemdup(uc_str, uc_str->len, GFP_KERNEL);
3756                 if (!str) {
3757                         ret = -ENOMEM;
3758                         goto out;
3759                 }
3760                 ret = uc_str->len;
3761         }
3762 out:
3763         *buf = str;
3764         kfree(uc_str);
3765         return ret;
3766 }
3767
3768 /**
3769  * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
3770  * @hba: Pointer to adapter instance
3771  * @lun: lun id
3772  * @param_offset: offset of the parameter to read
3773  * @param_read_buf: pointer to buffer where parameter would be read
3774  * @param_size: sizeof(param_read_buf)
3775  *
3776  * Return: 0 in case of success, non-zero otherwise.
3777  */
3778 static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
3779                                               int lun,
3780                                               enum unit_desc_param param_offset,
3781                                               u8 *param_read_buf,
3782                                               u32 param_size)
3783 {
3784         /*
3785          * Unit descriptors are only available for general purpose LUs (LUN id
3786          * from 0 to 7) and RPMB Well known LU.
3787          */
3788         if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun))
3789                 return -EOPNOTSUPP;
3790
3791         return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
3792                                       param_offset, param_read_buf, param_size);
3793 }
3794
3795 static int ufshcd_get_ref_clk_gating_wait(struct ufs_hba *hba)
3796 {
3797         int err = 0;
3798         u32 gating_wait = UFSHCD_REF_CLK_GATING_WAIT_US;
3799
3800         if (hba->dev_info.wspecversion >= 0x300) {
3801                 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
3802                                 QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME, 0, 0,
3803                                 &gating_wait);
3804                 if (err)
3805                         dev_err(hba->dev, "Failed reading bRefClkGatingWait. err = %d, use default %uus\n",
3806                                          err, gating_wait);
3807
3808                 if (gating_wait == 0) {
3809                         gating_wait = UFSHCD_REF_CLK_GATING_WAIT_US;
3810                         dev_err(hba->dev, "Undefined ref clk gating wait time, use default %uus\n",
3811                                          gating_wait);
3812                 }
3813
3814                 hba->dev_info.clk_gating_wait_us = gating_wait;
3815         }
3816
3817         return err;
3818 }
3819
3820 /**
3821  * ufshcd_memory_alloc - allocate memory for host memory space data structures
3822  * @hba: per adapter instance
3823  *
3824  * 1. Allocate DMA memory for Command Descriptor array
3825  *      Each command descriptor consist of Command UPIU, Response UPIU and PRDT
3826  * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL).
3827  * 3. Allocate DMA memory for UTP Task Management Request Descriptor List
3828  *      (UTMRDL)
3829  * 4. Allocate memory for local reference block(lrb).
3830  *
3831  * Return: 0 for success, non-zero in case of failure.
3832  */
3833 static int ufshcd_memory_alloc(struct ufs_hba *hba)
3834 {
3835         size_t utmrdl_size, utrdl_size, ucdl_size;
3836
3837         /* Allocate memory for UTP command descriptors */
3838         ucdl_size = ufshcd_get_ucd_size(hba) * hba->nutrs;
3839         hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
3840                                                   ucdl_size,
3841                                                   &hba->ucdl_dma_addr,
3842                                                   GFP_KERNEL);
3843
3844         /*
3845          * UFSHCI requires UTP command descriptor to be 128 byte aligned.
3846          */
3847         if (!hba->ucdl_base_addr ||
3848             WARN_ON(hba->ucdl_dma_addr & (128 - 1))) {
3849                 dev_err(hba->dev,
3850                         "Command Descriptor Memory allocation failed\n");
3851                 goto out;
3852         }
3853
3854         /*
3855          * Allocate memory for UTP Transfer descriptors
3856          * UFSHCI requires 1KB alignment of UTRD
3857          */
3858         utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
3859         hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev,
3860                                                    utrdl_size,
3861                                                    &hba->utrdl_dma_addr,
3862                                                    GFP_KERNEL);
3863         if (!hba->utrdl_base_addr ||
3864             WARN_ON(hba->utrdl_dma_addr & (SZ_1K - 1))) {
3865                 dev_err(hba->dev,
3866                         "Transfer Descriptor Memory allocation failed\n");
3867                 goto out;
3868         }
3869
3870         /*
3871          * Skip utmrdl allocation; it may have been
3872          * allocated during first pass and not released during
3873          * MCQ memory allocation.
3874          * See ufshcd_release_sdb_queue() and ufshcd_config_mcq()
3875          */
3876         if (hba->utmrdl_base_addr)
3877                 goto skip_utmrdl;
3878         /*
3879          * Allocate memory for UTP Task Management descriptors
3880          * UFSHCI requires 1KB alignment of UTMRD
3881          */
3882         utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
3883         hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev,
3884                                                     utmrdl_size,
3885                                                     &hba->utmrdl_dma_addr,
3886                                                     GFP_KERNEL);
3887         if (!hba->utmrdl_base_addr ||
3888             WARN_ON(hba->utmrdl_dma_addr & (SZ_1K - 1))) {
3889                 dev_err(hba->dev,
3890                 "Task Management Descriptor Memory allocation failed\n");
3891                 goto out;
3892         }
3893
3894 skip_utmrdl:
3895         /* Allocate memory for local reference block */
3896         hba->lrb = devm_kcalloc(hba->dev,
3897                                 hba->nutrs, sizeof(struct ufshcd_lrb),
3898                                 GFP_KERNEL);
3899         if (!hba->lrb) {
3900                 dev_err(hba->dev, "LRB Memory allocation failed\n");
3901                 goto out;
3902         }
3903         return 0;
3904 out:
3905         return -ENOMEM;
3906 }
3907
3908 /**
3909  * ufshcd_host_memory_configure - configure local reference block with
3910  *                              memory offsets
3911  * @hba: per adapter instance
3912  *
3913  * Configure Host memory space
3914  * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA
3915  * address.
3916  * 2. Update each UTRD with Response UPIU offset, Response UPIU length
3917  * and PRDT offset.
3918  * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT
3919  * into local reference block.
3920  */
3921 static void ufshcd_host_memory_configure(struct ufs_hba *hba)
3922 {
3923         struct utp_transfer_req_desc *utrdlp;
3924         dma_addr_t cmd_desc_dma_addr;
3925         dma_addr_t cmd_desc_element_addr;
3926         u16 response_offset;
3927         u16 prdt_offset;
3928         int cmd_desc_size;
3929         int i;
3930
3931         utrdlp = hba->utrdl_base_addr;
3932
3933         response_offset =
3934                 offsetof(struct utp_transfer_cmd_desc, response_upiu);
3935         prdt_offset =
3936                 offsetof(struct utp_transfer_cmd_desc, prd_table);
3937
3938         cmd_desc_size = ufshcd_get_ucd_size(hba);
3939         cmd_desc_dma_addr = hba->ucdl_dma_addr;
3940
3941         for (i = 0; i < hba->nutrs; i++) {
3942                 /* Configure UTRD with command descriptor base address */
3943                 cmd_desc_element_addr =
3944                                 (cmd_desc_dma_addr + (cmd_desc_size * i));
3945                 utrdlp[i].command_desc_base_addr =
3946                                 cpu_to_le64(cmd_desc_element_addr);
3947
3948                 /* Response upiu and prdt offset should be in double words */
3949                 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) {
3950                         utrdlp[i].response_upiu_offset =
3951                                 cpu_to_le16(response_offset);
3952                         utrdlp[i].prd_table_offset =
3953                                 cpu_to_le16(prdt_offset);
3954                         utrdlp[i].response_upiu_length =
3955                                 cpu_to_le16(ALIGNED_UPIU_SIZE);
3956                 } else {
3957                         utrdlp[i].response_upiu_offset =
3958                                 cpu_to_le16(response_offset >> 2);
3959                         utrdlp[i].prd_table_offset =
3960                                 cpu_to_le16(prdt_offset >> 2);
3961                         utrdlp[i].response_upiu_length =
3962                                 cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
3963                 }
3964
3965                 ufshcd_init_lrb(hba, &hba->lrb[i], i);
3966         }
3967 }
3968
3969 /**
3970  * ufshcd_dme_link_startup - Notify Unipro to perform link startup
3971  * @hba: per adapter instance
3972  *
3973  * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer,
3974  * in order to initialize the Unipro link startup procedure.
3975  * Once the Unipro links are up, the device connected to the controller
3976  * is detected.
3977  *
3978  * Return: 0 on success, non-zero value on failure.
3979  */
3980 static int ufshcd_dme_link_startup(struct ufs_hba *hba)
3981 {
3982         struct uic_command uic_cmd = {0};
3983         int ret;
3984
3985         uic_cmd.command = UIC_CMD_DME_LINK_STARTUP;
3986
3987         ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3988         if (ret)
3989                 dev_dbg(hba->dev,
3990                         "dme-link-startup: error code %d\n", ret);
3991         return ret;
3992 }
3993 /**
3994  * ufshcd_dme_reset - UIC command for DME_RESET
3995  * @hba: per adapter instance
3996  *
3997  * DME_RESET command is issued in order to reset UniPro stack.
3998  * This function now deals with cold reset.
3999  *
4000  * Return: 0 on success, non-zero value on failure.
4001  */
4002 static int ufshcd_dme_reset(struct ufs_hba *hba)
4003 {
4004         struct uic_command uic_cmd = {0};
4005         int ret;
4006
4007         uic_cmd.command = UIC_CMD_DME_RESET;
4008
4009         ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
4010         if (ret)
4011                 dev_err(hba->dev,
4012                         "dme-reset: error code %d\n", ret);
4013
4014         return ret;
4015 }
4016
4017 int ufshcd_dme_configure_adapt(struct ufs_hba *hba,
4018                                int agreed_gear,
4019                                int adapt_val)
4020 {
4021         int ret;
4022
4023         if (agreed_gear < UFS_HS_G4)
4024                 adapt_val = PA_NO_ADAPT;
4025
4026         ret = ufshcd_dme_set(hba,
4027                              UIC_ARG_MIB(PA_TXHSADAPTTYPE),
4028                              adapt_val);
4029         return ret;
4030 }
4031 EXPORT_SYMBOL_GPL(ufshcd_dme_configure_adapt);
4032
4033 /**
4034  * ufshcd_dme_enable - UIC command for DME_ENABLE
4035  * @hba: per adapter instance
4036  *
4037  * DME_ENABLE command is issued in order to enable UniPro stack.
4038  *
4039  * Return: 0 on success, non-zero value on failure.
4040  */
4041 static int ufshcd_dme_enable(struct ufs_hba *hba)
4042 {
4043         struct uic_command uic_cmd = {0};
4044         int ret;
4045
4046         uic_cmd.command = UIC_CMD_DME_ENABLE;
4047
4048         ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
4049         if (ret)
4050                 dev_err(hba->dev,
4051                         "dme-enable: error code %d\n", ret);
4052
4053         return ret;
4054 }
4055
4056 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
4057 {
4058         #define MIN_DELAY_BEFORE_DME_CMDS_US    1000
4059         unsigned long min_sleep_time_us;
4060
4061         if (!(hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS))
4062                 return;
4063
4064         /*
4065          * last_dme_cmd_tstamp will be 0 only for 1st call to
4066          * this function
4067          */
4068         if (unlikely(!ktime_to_us(hba->last_dme_cmd_tstamp))) {
4069                 min_sleep_time_us = MIN_DELAY_BEFORE_DME_CMDS_US;
4070         } else {
4071                 unsigned long delta =
4072                         (unsigned long) ktime_to_us(
4073                                 ktime_sub(ktime_get(),
4074                                 hba->last_dme_cmd_tstamp));
4075
4076                 if (delta < MIN_DELAY_BEFORE_DME_CMDS_US)
4077                         min_sleep_time_us =
4078                                 MIN_DELAY_BEFORE_DME_CMDS_US - delta;
4079                 else
4080                         return; /* no more delay required */
4081         }
4082
4083         /* allow sleep for extra 50us if needed */
4084         usleep_range(min_sleep_time_us, min_sleep_time_us + 50);
4085 }
4086
4087 /**
4088  * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
4089  * @hba: per adapter instance
4090  * @attr_sel: uic command argument1
4091  * @attr_set: attribute set type as uic command argument2
4092  * @mib_val: setting value as uic command argument3
4093  * @peer: indicate whether peer or local
4094  *
4095  * Return: 0 on success, non-zero value on failure.
4096  */
4097 int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
4098                         u8 attr_set, u32 mib_val, u8 peer)
4099 {
4100         struct uic_command uic_cmd = {0};
4101         static const char *const action[] = {
4102                 "dme-set",
4103                 "dme-peer-set"
4104         };
4105         const char *set = action[!!peer];
4106         int ret;
4107         int retries = UFS_UIC_COMMAND_RETRIES;
4108
4109         uic_cmd.command = peer ?
4110                 UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
4111         uic_cmd.argument1 = attr_sel;
4112         uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
4113         uic_cmd.argument3 = mib_val;
4114
4115         do {
4116                 /* for peer attributes we retry upon failure */
4117                 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
4118                 if (ret)
4119                         dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
4120                                 set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
4121         } while (ret && peer && --retries);
4122
4123         if (ret)
4124                 dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
4125                         set, UIC_GET_ATTR_ID(attr_sel), mib_val,
4126                         UFS_UIC_COMMAND_RETRIES - retries);
4127
4128         return ret;
4129 }
4130 EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr);
4131
4132 /**
4133  * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
4134  * @hba: per adapter instance
4135  * @attr_sel: uic command argument1
4136  * @mib_val: the value of the attribute as returned by the UIC command
4137  * @peer: indicate whether peer or local
4138  *
4139  * Return: 0 on success, non-zero value on failure.
4140  */
4141 int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
4142                         u32 *mib_val, u8 peer)
4143 {
4144         struct uic_command uic_cmd = {0};
4145         static const char *const action[] = {
4146                 "dme-get",
4147                 "dme-peer-get"
4148         };
4149         const char *get = action[!!peer];
4150         int ret;
4151         int retries = UFS_UIC_COMMAND_RETRIES;
4152         struct ufs_pa_layer_attr orig_pwr_info;
4153         struct ufs_pa_layer_attr temp_pwr_info;
4154         bool pwr_mode_change = false;
4155
4156         if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)) {
4157                 orig_pwr_info = hba->pwr_info;
4158                 temp_pwr_info = orig_pwr_info;
4159
4160                 if (orig_pwr_info.pwr_tx == FAST_MODE ||
4161                     orig_pwr_info.pwr_rx == FAST_MODE) {
4162                         temp_pwr_info.pwr_tx = FASTAUTO_MODE;
4163                         temp_pwr_info.pwr_rx = FASTAUTO_MODE;
4164                         pwr_mode_change = true;
4165                 } else if (orig_pwr_info.pwr_tx == SLOW_MODE ||
4166                     orig_pwr_info.pwr_rx == SLOW_MODE) {
4167                         temp_pwr_info.pwr_tx = SLOWAUTO_MODE;
4168                         temp_pwr_info.pwr_rx = SLOWAUTO_MODE;
4169                         pwr_mode_change = true;
4170                 }
4171                 if (pwr_mode_change) {
4172                         ret = ufshcd_change_power_mode(hba, &temp_pwr_info);
4173                         if (ret)
4174                                 goto out;
4175                 }
4176         }
4177
4178         uic_cmd.command = peer ?
4179                 UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
4180         uic_cmd.argument1 = attr_sel;
4181
4182         do {
4183                 /* for peer attributes we retry upon failure */
4184                 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
4185                 if (ret)
4186                         dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n",
4187                                 get, UIC_GET_ATTR_ID(attr_sel), ret);
4188         } while (ret && peer && --retries);
4189
4190         if (ret)
4191                 dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n",
4192                         get, UIC_GET_ATTR_ID(attr_sel),
4193                         UFS_UIC_COMMAND_RETRIES - retries);
4194
4195         if (mib_val && !ret)
4196                 *mib_val = uic_cmd.argument3;
4197
4198         if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)
4199             && pwr_mode_change)
4200                 ufshcd_change_power_mode(hba, &orig_pwr_info);
4201 out:
4202         return ret;
4203 }
4204 EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
4205
4206 /**
4207  * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
4208  * state) and waits for it to take effect.
4209  *
4210  * @hba: per adapter instance
4211  * @cmd: UIC command to execute
4212  *
4213  * DME operations like DME_SET(PA_PWRMODE), DME_HIBERNATE_ENTER &
4214  * DME_HIBERNATE_EXIT commands take some time to take its effect on both host
4215  * and device UniPro link and hence it's final completion would be indicated by
4216  * dedicated status bits in Interrupt Status register (UPMS, UHES, UHXS) in
4217  * addition to normal UIC command completion Status (UCCS). This function only
4218  * returns after the relevant status bits indicate the completion.
4219  *
4220  * Return: 0 on success, non-zero value on failure.
4221  */
4222 static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
4223 {
4224         DECLARE_COMPLETION_ONSTACK(uic_async_done);
4225         unsigned long flags;
4226         u8 status;
4227         int ret;
4228         bool reenable_intr = false;
4229
4230         mutex_lock(&hba->uic_cmd_mutex);
4231         ufshcd_add_delay_before_dme_cmd(hba);
4232
4233         spin_lock_irqsave(hba->host->host_lock, flags);
4234         if (ufshcd_is_link_broken(hba)) {
4235                 ret = -ENOLINK;
4236                 goto out_unlock;
4237         }
4238         hba->uic_async_done = &uic_async_done;
4239         if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) {
4240                 ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
4241                 /*
4242                  * Make sure UIC command completion interrupt is disabled before
4243                  * issuing UIC command.
4244                  */
4245                 wmb();
4246                 reenable_intr = true;
4247         }
4248         spin_unlock_irqrestore(hba->host->host_lock, flags);
4249         ret = __ufshcd_send_uic_cmd(hba, cmd, false);
4250         if (ret) {
4251                 dev_err(hba->dev,
4252                         "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
4253                         cmd->command, cmd->argument3, ret);
4254                 goto out;
4255         }
4256
4257         if (!wait_for_completion_timeout(hba->uic_async_done,
4258                                          msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
4259                 dev_err(hba->dev,
4260                         "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
4261                         cmd->command, cmd->argument3);
4262
4263                 if (!cmd->cmd_active) {
4264                         dev_err(hba->dev, "%s: Power Mode Change operation has been completed, go check UPMCRS\n",
4265                                 __func__);
4266                         goto check_upmcrs;
4267                 }
4268
4269                 ret = -ETIMEDOUT;
4270                 goto out;
4271         }
4272
4273 check_upmcrs:
4274         status = ufshcd_get_upmcrs(hba);
4275         if (status != PWR_LOCAL) {
4276                 dev_err(hba->dev,
4277                         "pwr ctrl cmd 0x%x failed, host upmcrs:0x%x\n",
4278                         cmd->command, status);
4279                 ret = (status != PWR_OK) ? status : -1;
4280         }
4281 out:
4282         if (ret) {
4283                 ufshcd_print_host_state(hba);
4284                 ufshcd_print_pwr_info(hba);
4285                 ufshcd_print_evt_hist(hba);
4286         }
4287
4288         spin_lock_irqsave(hba->host->host_lock, flags);
4289         hba->active_uic_cmd = NULL;
4290         hba->uic_async_done = NULL;
4291         if (reenable_intr)
4292                 ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
4293         if (ret) {
4294                 ufshcd_set_link_broken(hba);
4295                 ufshcd_schedule_eh_work(hba);
4296         }
4297 out_unlock:
4298         spin_unlock_irqrestore(hba->host->host_lock, flags);
4299         mutex_unlock(&hba->uic_cmd_mutex);
4300
4301         return ret;
4302 }
4303
4304 /**
4305  * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
4306  *                              using DME_SET primitives.
4307  * @hba: per adapter instance
4308  * @mode: powr mode value
4309  *
4310  * Return: 0 on success, non-zero value on failure.
4311  */
4312 int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
4313 {
4314         struct uic_command uic_cmd = {0};
4315         int ret;
4316
4317         if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) {
4318                 ret = ufshcd_dme_set(hba,
4319                                 UIC_ARG_MIB_SEL(PA_RXHSUNTERMCAP, 0), 1);
4320                 if (ret) {
4321                         dev_err(hba->dev, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n",
4322                                                 __func__, ret);
4323                         goto out;
4324                 }
4325         }
4326
4327         uic_cmd.command = UIC_CMD_DME_SET;
4328         uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
4329         uic_cmd.argument3 = mode;
4330         ufshcd_hold(hba);
4331         ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
4332         ufshcd_release(hba);
4333
4334 out:
4335         return ret;
4336 }
4337 EXPORT_SYMBOL_GPL(ufshcd_uic_change_pwr_mode);
4338
4339 int ufshcd_link_recovery(struct ufs_hba *hba)
4340 {
4341         int ret;
4342         unsigned long flags;
4343
4344         spin_lock_irqsave(hba->host->host_lock, flags);
4345         hba->ufshcd_state = UFSHCD_STATE_RESET;
4346         ufshcd_set_eh_in_progress(hba);
4347         spin_unlock_irqrestore(hba->host->host_lock, flags);
4348
4349         /* Reset the attached device */
4350         ufshcd_device_reset(hba);
4351
4352         ret = ufshcd_host_reset_and_restore(hba);
4353
4354         spin_lock_irqsave(hba->host->host_lock, flags);
4355         if (ret)
4356                 hba->ufshcd_state = UFSHCD_STATE_ERROR;
4357         ufshcd_clear_eh_in_progress(hba);
4358         spin_unlock_irqrestore(hba->host->host_lock, flags);
4359
4360         if (ret)
4361                 dev_err(hba->dev, "%s: link recovery failed, err %d",
4362                         __func__, ret);
4363
4364         return ret;
4365 }
4366 EXPORT_SYMBOL_GPL(ufshcd_link_recovery);
4367
4368 int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
4369 {
4370         int ret;
4371         struct uic_command uic_cmd = {0};
4372         ktime_t start = ktime_get();
4373
4374         ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE);
4375
4376         uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
4377         ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
4378         trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter",
4379                              ktime_to_us(ktime_sub(ktime_get(), start)), ret);
4380
4381         if (ret)
4382                 dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n",
4383                         __func__, ret);
4384         else
4385                 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER,
4386                                                                 POST_CHANGE);
4387
4388         return ret;
4389 }
4390 EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_enter);
4391
4392 int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
4393 {
4394         struct uic_command uic_cmd = {0};
4395         int ret;
4396         ktime_t start = ktime_get();
4397
4398         ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE);
4399
4400         uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
4401         ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
4402         trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit",
4403                              ktime_to_us(ktime_sub(ktime_get(), start)), ret);
4404
4405         if (ret) {
4406                 dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n",
4407                         __func__, ret);
4408         } else {
4409                 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT,
4410                                                                 POST_CHANGE);
4411                 hba->ufs_stats.last_hibern8_exit_tstamp = local_clock();
4412                 hba->ufs_stats.hibern8_exit_cnt++;
4413         }
4414
4415         return ret;
4416 }
4417 EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_exit);
4418
4419 static void ufshcd_configure_auto_hibern8(struct ufs_hba *hba)
4420 {
4421         if (!ufshcd_is_auto_hibern8_supported(hba))
4422                 return;
4423
4424         ufshcd_writel(hba, hba->ahit, REG_AUTO_HIBERNATE_IDLE_TIMER);
4425 }
4426
4427 void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit)
4428 {
4429         const u32 cur_ahit = READ_ONCE(hba->ahit);
4430
4431         if (!ufshcd_is_auto_hibern8_supported(hba) || cur_ahit == ahit)
4432                 return;
4433
4434         WRITE_ONCE(hba->ahit, ahit);
4435         if (!pm_runtime_suspended(&hba->ufs_device_wlun->sdev_gendev)) {
4436                 ufshcd_rpm_get_sync(hba);
4437                 ufshcd_hold(hba);
4438                 ufshcd_configure_auto_hibern8(hba);
4439                 ufshcd_release(hba);
4440                 ufshcd_rpm_put_sync(hba);
4441         }
4442 }
4443 EXPORT_SYMBOL_GPL(ufshcd_auto_hibern8_update);
4444
4445  /**
4446  * ufshcd_init_pwr_info - setting the POR (power on reset)
4447  * values in hba power info
4448  * @hba: per-adapter instance
4449  */
4450 static void ufshcd_init_pwr_info(struct ufs_hba *hba)
4451 {
4452         hba->pwr_info.gear_rx = UFS_PWM_G1;
4453         hba->pwr_info.gear_tx = UFS_PWM_G1;
4454         hba->pwr_info.lane_rx = UFS_LANE_1;
4455         hba->pwr_info.lane_tx = UFS_LANE_1;
4456         hba->pwr_info.pwr_rx = SLOWAUTO_MODE;
4457         hba->pwr_info.pwr_tx = SLOWAUTO_MODE;
4458         hba->pwr_info.hs_rate = 0;
4459 }
4460
4461 /**
4462  * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
4463  * @hba: per-adapter instance
4464  *
4465  * Return: 0 upon success; < 0 upon failure.
4466  */
4467 static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
4468 {
4469         struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
4470
4471         if (hba->max_pwr_info.is_valid)
4472                 return 0;
4473
4474         if (hba->quirks & UFSHCD_QUIRK_HIBERN_FASTAUTO) {
4475                 pwr_info->pwr_tx = FASTAUTO_MODE;
4476                 pwr_info->pwr_rx = FASTAUTO_MODE;
4477         } else {
4478                 pwr_info->pwr_tx = FAST_MODE;
4479                 pwr_info->pwr_rx = FAST_MODE;
4480         }
4481         pwr_info->hs_rate = PA_HS_MODE_B;
4482
4483         /* Get the connected lane count */
4484         ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES),
4485                         &pwr_info->lane_rx);
4486         ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4487                         &pwr_info->lane_tx);
4488
4489         if (!pwr_info->lane_rx || !pwr_info->lane_tx) {
4490                 dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
4491                                 __func__,
4492                                 pwr_info->lane_rx,
4493                                 pwr_info->lane_tx);
4494                 return -EINVAL;
4495         }
4496
4497         /*
4498          * First, get the maximum gears of HS speed.
4499          * If a zero value, it means there is no HSGEAR capability.
4500          * Then, get the maximum gears of PWM speed.
4501          */
4502         ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx);
4503         if (!pwr_info->gear_rx) {
4504                 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
4505                                 &pwr_info->gear_rx);
4506                 if (!pwr_info->gear_rx) {
4507                         dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n",
4508                                 __func__, pwr_info->gear_rx);
4509                         return -EINVAL;
4510                 }
4511                 pwr_info->pwr_rx = SLOW_MODE;
4512         }
4513
4514         ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
4515                         &pwr_info->gear_tx);
4516         if (!pwr_info->gear_tx) {
4517                 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
4518                                 &pwr_info->gear_tx);
4519                 if (!pwr_info->gear_tx) {
4520                         dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n",
4521                                 __func__, pwr_info->gear_tx);
4522                         return -EINVAL;
4523                 }
4524                 pwr_info->pwr_tx = SLOW_MODE;
4525         }
4526
4527         hba->max_pwr_info.is_valid = true;
4528         return 0;
4529 }
4530
4531 static int ufshcd_change_power_mode(struct ufs_hba *hba,
4532                              struct ufs_pa_layer_attr *pwr_mode)
4533 {
4534         int ret;
4535
4536         /* if already configured to the requested pwr_mode */
4537         if (!hba->force_pmc &&
4538             pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
4539             pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
4540             pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
4541             pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
4542             pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
4543             pwr_mode->pwr_tx == hba->pwr_info.pwr_tx &&
4544             pwr_mode->hs_rate == hba->pwr_info.hs_rate) {
4545                 dev_dbg(hba->dev, "%s: power already configured\n", __func__);
4546                 return 0;
4547         }
4548
4549         /*
4550          * Configure attributes for power mode change with below.
4551          * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
4552          * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
4553          * - PA_HSSERIES
4554          */
4555         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx);
4556         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
4557                         pwr_mode->lane_rx);
4558         if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
4559                         pwr_mode->pwr_rx == FAST_MODE)
4560                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), true);
4561         else
4562                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), false);
4563
4564         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx);
4565         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
4566                         pwr_mode->lane_tx);
4567         if (pwr_mode->pwr_tx == FASTAUTO_MODE ||
4568                         pwr_mode->pwr_tx == FAST_MODE)
4569                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), true);
4570         else
4571                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), false);
4572
4573         if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
4574             pwr_mode->pwr_tx == FASTAUTO_MODE ||
4575             pwr_mode->pwr_rx == FAST_MODE ||
4576             pwr_mode->pwr_tx == FAST_MODE)
4577                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
4578                                                 pwr_mode->hs_rate);
4579
4580         if (!(hba->quirks & UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING)) {
4581                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0),
4582                                 DL_FC0ProtectionTimeOutVal_Default);
4583                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1),
4584                                 DL_TC0ReplayTimeOutVal_Default);
4585                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2),
4586                                 DL_AFC0ReqTimeOutVal_Default);
4587                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA3),
4588                                 DL_FC1ProtectionTimeOutVal_Default);
4589                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA4),
4590                                 DL_TC1ReplayTimeOutVal_Default);
4591                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA5),
4592                                 DL_AFC1ReqTimeOutVal_Default);
4593
4594                 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal),
4595                                 DL_FC0ProtectionTimeOutVal_Default);
4596                 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal),
4597                                 DL_TC0ReplayTimeOutVal_Default);
4598                 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal),
4599                                 DL_AFC0ReqTimeOutVal_Default);
4600         }
4601
4602         ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
4603                         | pwr_mode->pwr_tx);
4604
4605         if (ret) {
4606                 dev_err(hba->dev,
4607                         "%s: power mode change failed %d\n", __func__, ret);
4608         } else {
4609                 ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL,
4610                                                                 pwr_mode);
4611
4612                 memcpy(&hba->pwr_info, pwr_mode,
4613                         sizeof(struct ufs_pa_layer_attr));
4614         }
4615
4616         return ret;
4617 }
4618
4619 /**
4620  * ufshcd_config_pwr_mode - configure a new power mode
4621  * @hba: per-adapter instance
4622  * @desired_pwr_mode: desired power configuration
4623  *
4624  * Return: 0 upon success; < 0 upon failure.
4625  */
4626 int ufshcd_config_pwr_mode(struct ufs_hba *hba,
4627                 struct ufs_pa_layer_attr *desired_pwr_mode)
4628 {
4629         struct ufs_pa_layer_attr final_params = { 0 };
4630         int ret;
4631
4632         ret = ufshcd_vops_pwr_change_notify(hba, PRE_CHANGE,
4633                                         desired_pwr_mode, &final_params);
4634
4635         if (ret)
4636                 memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
4637
4638         ret = ufshcd_change_power_mode(hba, &final_params);
4639
4640         return ret;
4641 }
4642 EXPORT_SYMBOL_GPL(ufshcd_config_pwr_mode);
4643
4644 /**
4645  * ufshcd_complete_dev_init() - checks device readiness
4646  * @hba: per-adapter instance
4647  *
4648  * Set fDeviceInit flag and poll until device toggles it.
4649  *
4650  * Return: 0 upon success; < 0 upon failure.
4651  */
4652 static int ufshcd_complete_dev_init(struct ufs_hba *hba)
4653 {
4654         int err;
4655         bool flag_res = true;
4656         ktime_t timeout;
4657
4658         err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
4659                 QUERY_FLAG_IDN_FDEVICEINIT, 0, NULL);
4660         if (err) {
4661                 dev_err(hba->dev,
4662                         "%s: setting fDeviceInit flag failed with error %d\n",
4663                         __func__, err);
4664                 goto out;
4665         }
4666
4667         /* Poll fDeviceInit flag to be cleared */
4668         timeout = ktime_add_ms(ktime_get(), FDEVICEINIT_COMPL_TIMEOUT);
4669         do {
4670                 err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG,
4671                                         QUERY_FLAG_IDN_FDEVICEINIT, 0, &flag_res);
4672                 if (!flag_res)
4673                         break;
4674                 usleep_range(500, 1000);
4675         } while (ktime_before(ktime_get(), timeout));
4676
4677         if (err) {
4678                 dev_err(hba->dev,
4679                                 "%s: reading fDeviceInit flag failed with error %d\n",
4680                                 __func__, err);
4681         } else if (flag_res) {
4682                 dev_err(hba->dev,
4683                                 "%s: fDeviceInit was not cleared by the device\n",
4684                                 __func__);
4685                 err = -EBUSY;
4686         }
4687 out:
4688         return err;
4689 }
4690
4691 /**
4692  * ufshcd_make_hba_operational - Make UFS controller operational
4693  * @hba: per adapter instance
4694  *
4695  * To bring UFS host controller to operational state,
4696  * 1. Enable required interrupts
4697  * 2. Configure interrupt aggregation
4698  * 3. Program UTRL and UTMRL base address
4699  * 4. Configure run-stop-registers
4700  *
4701  * Return: 0 on success, non-zero value on failure.
4702  */
4703 int ufshcd_make_hba_operational(struct ufs_hba *hba)
4704 {
4705         int err = 0;
4706         u32 reg;
4707
4708         /* Enable required interrupts */
4709         ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
4710
4711         /* Configure interrupt aggregation */
4712         if (ufshcd_is_intr_aggr_allowed(hba))
4713                 ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO);
4714         else
4715                 ufshcd_disable_intr_aggr(hba);
4716
4717         /* Configure UTRL and UTMRL base address registers */
4718         ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
4719                         REG_UTP_TRANSFER_REQ_LIST_BASE_L);
4720         ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
4721                         REG_UTP_TRANSFER_REQ_LIST_BASE_H);
4722         ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
4723                         REG_UTP_TASK_REQ_LIST_BASE_L);
4724         ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
4725                         REG_UTP_TASK_REQ_LIST_BASE_H);
4726
4727         /*
4728          * Make sure base address and interrupt setup are updated before
4729          * enabling the run/stop registers below.
4730          */
4731         wmb();
4732
4733         /*
4734          * UCRDY, UTMRLDY and UTRLRDY bits must be 1
4735          */
4736         reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
4737         if (!(ufshcd_get_lists_status(reg))) {
4738                 ufshcd_enable_run_stop_reg(hba);
4739         } else {
4740                 dev_err(hba->dev,
4741                         "Host controller not ready to process requests");
4742                 err = -EIO;
4743         }
4744
4745         return err;
4746 }
4747 EXPORT_SYMBOL_GPL(ufshcd_make_hba_operational);
4748
4749 /**
4750  * ufshcd_hba_stop - Send controller to reset state
4751  * @hba: per adapter instance
4752  */
4753 void ufshcd_hba_stop(struct ufs_hba *hba)
4754 {
4755         unsigned long flags;
4756         int err;
4757
4758         /*
4759          * Obtain the host lock to prevent that the controller is disabled
4760          * while the UFS interrupt handler is active on another CPU.
4761          */
4762         spin_lock_irqsave(hba->host->host_lock, flags);
4763         ufshcd_writel(hba, CONTROLLER_DISABLE,  REG_CONTROLLER_ENABLE);
4764         spin_unlock_irqrestore(hba->host->host_lock, flags);
4765
4766         err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE,
4767                                         CONTROLLER_ENABLE, CONTROLLER_DISABLE,
4768                                         10, 1);
4769         if (err)
4770                 dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
4771 }
4772 EXPORT_SYMBOL_GPL(ufshcd_hba_stop);
4773
4774 /**
4775  * ufshcd_hba_execute_hce - initialize the controller
4776  * @hba: per adapter instance
4777  *
4778  * The controller resets itself and controller firmware initialization
4779  * sequence kicks off. When controller is ready it will set
4780  * the Host Controller Enable bit to 1.
4781  *
4782  * Return: 0 on success, non-zero value on failure.
4783  */
4784 static int ufshcd_hba_execute_hce(struct ufs_hba *hba)
4785 {
4786         int retry_outer = 3;
4787         int retry_inner;
4788
4789 start:
4790         if (ufshcd_is_hba_active(hba))
4791                 /* change controller state to "reset state" */
4792                 ufshcd_hba_stop(hba);
4793
4794         /* UniPro link is disabled at this point */
4795         ufshcd_set_link_off(hba);
4796
4797         ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
4798
4799         /* start controller initialization sequence */
4800         ufshcd_hba_start(hba);
4801
4802         /*
4803          * To initialize a UFS host controller HCE bit must be set to 1.
4804          * During initialization the HCE bit value changes from 1->0->1.
4805          * When the host controller completes initialization sequence
4806          * it sets the value of HCE bit to 1. The same HCE bit is read back
4807          * to check if the controller has completed initialization sequence.
4808          * So without this delay the value HCE = 1, set in the previous
4809          * instruction might be read back.
4810          * This delay can be changed based on the controller.
4811          */
4812         ufshcd_delay_us(hba->vps->hba_enable_delay_us, 100);
4813
4814         /* wait for the host controller to complete initialization */
4815         retry_inner = 50;
4816         while (!ufshcd_is_hba_active(hba)) {
4817                 if (retry_inner) {
4818                         retry_inner--;
4819                 } else {
4820                         dev_err(hba->dev,
4821                                 "Controller enable failed\n");
4822                         if (retry_outer) {
4823                                 retry_outer--;
4824                                 goto start;
4825                         }
4826                         return -EIO;
4827                 }
4828                 usleep_range(1000, 1100);
4829         }
4830
4831         /* enable UIC related interrupts */
4832         ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
4833
4834         ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
4835
4836         return 0;
4837 }
4838
4839 int ufshcd_hba_enable(struct ufs_hba *hba)
4840 {
4841         int ret;
4842
4843         if (hba->quirks & UFSHCI_QUIRK_BROKEN_HCE) {
4844                 ufshcd_set_link_off(hba);
4845                 ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
4846
4847                 /* enable UIC related interrupts */
4848                 ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
4849                 ret = ufshcd_dme_reset(hba);
4850                 if (ret) {
4851                         dev_err(hba->dev, "DME_RESET failed\n");
4852                         return ret;
4853                 }
4854
4855                 ret = ufshcd_dme_enable(hba);
4856                 if (ret) {
4857                         dev_err(hba->dev, "Enabling DME failed\n");
4858                         return ret;
4859                 }
4860
4861                 ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
4862         } else {
4863                 ret = ufshcd_hba_execute_hce(hba);
4864         }
4865
4866         return ret;
4867 }
4868 EXPORT_SYMBOL_GPL(ufshcd_hba_enable);
4869
4870 static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
4871 {
4872         int tx_lanes = 0, i, err = 0;
4873
4874         if (!peer)
4875                 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4876                                &tx_lanes);
4877         else
4878                 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4879                                     &tx_lanes);
4880         for (i = 0; i < tx_lanes; i++) {
4881                 if (!peer)
4882                         err = ufshcd_dme_set(hba,
4883                                 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
4884                                         UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
4885                                         0);
4886                 else
4887                         err = ufshcd_dme_peer_set(hba,
4888                                 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
4889                                         UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
4890                                         0);
4891                 if (err) {
4892                         dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d",
4893                                 __func__, peer, i, err);
4894                         break;
4895                 }
4896         }
4897
4898         return err;
4899 }
4900
4901 static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba)
4902 {
4903         return ufshcd_disable_tx_lcc(hba, true);
4904 }
4905
4906 void ufshcd_update_evt_hist(struct ufs_hba *hba, u32 id, u32 val)
4907 {
4908         struct ufs_event_hist *e;
4909
4910         if (id >= UFS_EVT_CNT)
4911                 return;
4912
4913         e = &hba->ufs_stats.event[id];
4914         e->val[e->pos] = val;
4915         e->tstamp[e->pos] = local_clock();
4916         e->cnt += 1;
4917         e->pos = (e->pos + 1) % UFS_EVENT_HIST_LENGTH;
4918
4919         ufshcd_vops_event_notify(hba, id, &val);
4920 }
4921 EXPORT_SYMBOL_GPL(ufshcd_update_evt_hist);
4922
4923 /**
4924  * ufshcd_link_startup - Initialize unipro link startup
4925  * @hba: per adapter instance
4926  *
4927  * Return: 0 for success, non-zero in case of failure.
4928  */
4929 static int ufshcd_link_startup(struct ufs_hba *hba)
4930 {
4931         int ret;
4932         int retries = DME_LINKSTARTUP_RETRIES;
4933         bool link_startup_again = false;
4934
4935         /*
4936          * If UFS device isn't active then we will have to issue link startup
4937          * 2 times to make sure the device state move to active.
4938          */
4939         if (!ufshcd_is_ufs_dev_active(hba))
4940                 link_startup_again = true;
4941
4942 link_startup:
4943         do {
4944                 ufshcd_vops_link_startup_notify(hba, PRE_CHANGE);
4945
4946                 ret = ufshcd_dme_link_startup(hba);
4947
4948                 /* check if device is detected by inter-connect layer */
4949                 if (!ret && !ufshcd_is_device_present(hba)) {
4950                         ufshcd_update_evt_hist(hba,
4951                                                UFS_EVT_LINK_STARTUP_FAIL,
4952                                                0);
4953                         dev_err(hba->dev, "%s: Device not present\n", __func__);
4954                         ret = -ENXIO;
4955                         goto out;
4956                 }
4957
4958                 /*
4959                  * DME link lost indication is only received when link is up,
4960                  * but we can't be sure if the link is up until link startup
4961                  * succeeds. So reset the local Uni-Pro and try again.
4962                  */
4963                 if (ret && retries && ufshcd_hba_enable(hba)) {
4964                         ufshcd_update_evt_hist(hba,
4965                                                UFS_EVT_LINK_STARTUP_FAIL,
4966                                                (u32)ret);
4967                         goto out;
4968                 }
4969         } while (ret && retries--);
4970
4971         if (ret) {
4972                 /* failed to get the link up... retire */
4973                 ufshcd_update_evt_hist(hba,
4974                                        UFS_EVT_LINK_STARTUP_FAIL,
4975                                        (u32)ret);
4976                 goto out;
4977         }
4978
4979         if (link_startup_again) {
4980                 link_startup_again = false;
4981                 retries = DME_LINKSTARTUP_RETRIES;
4982                 goto link_startup;
4983         }
4984
4985         /* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */
4986         ufshcd_init_pwr_info(hba);
4987         ufshcd_print_pwr_info(hba);
4988
4989         if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
4990                 ret = ufshcd_disable_device_tx_lcc(hba);
4991                 if (ret)
4992                         goto out;
4993         }
4994
4995         /* Include any host controller configuration via UIC commands */
4996         ret = ufshcd_vops_link_startup_notify(hba, POST_CHANGE);
4997         if (ret)
4998                 goto out;
4999
5000         /* Clear UECPA once due to LINERESET has happened during LINK_STARTUP */
5001         ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
5002         ret = ufshcd_make_hba_operational(hba);
5003 out:
5004         if (ret) {
5005                 dev_err(hba->dev, "link startup failed %d\n", ret);
5006                 ufshcd_print_host_state(hba);
5007                 ufshcd_print_pwr_info(hba);
5008                 ufshcd_print_evt_hist(hba);
5009         }
5010         return ret;
5011 }
5012
5013 /**
5014  * ufshcd_verify_dev_init() - Verify device initialization
5015  * @hba: per-adapter instance
5016  *
5017  * Send NOP OUT UPIU and wait for NOP IN response to check whether the
5018  * device Transport Protocol (UTP) layer is ready after a reset.
5019  * If the UTP layer at the device side is not initialized, it may
5020  * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT
5021  * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations.
5022  *
5023  * Return: 0 upon success; < 0 upon failure.
5024  */
5025 static int ufshcd_verify_dev_init(struct ufs_hba *hba)
5026 {
5027         int err = 0;
5028         int retries;
5029
5030         ufshcd_hold(hba);
5031         mutex_lock(&hba->dev_cmd.lock);
5032         for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
5033                 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
5034                                           hba->nop_out_timeout);
5035
5036                 if (!err || err == -ETIMEDOUT)
5037                         break;
5038
5039                 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
5040         }
5041         mutex_unlock(&hba->dev_cmd.lock);
5042         ufshcd_release(hba);
5043
5044         if (err)
5045                 dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
5046         return err;
5047 }
5048
5049 /**
5050  * ufshcd_setup_links - associate link b/w device wlun and other luns
5051  * @sdev: pointer to SCSI device
5052  * @hba: pointer to ufs hba
5053  */
5054 static void ufshcd_setup_links(struct ufs_hba *hba, struct scsi_device *sdev)
5055 {
5056         struct device_link *link;
5057
5058         /*
5059          * Device wlun is the supplier & rest of the luns are consumers.
5060          * This ensures that device wlun suspends after all other luns.
5061          */
5062         if (hba->ufs_device_wlun) {
5063                 link = device_link_add(&sdev->sdev_gendev,
5064                                        &hba->ufs_device_wlun->sdev_gendev,
5065                                        DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE);
5066                 if (!link) {
5067                         dev_err(&sdev->sdev_gendev, "Failed establishing link - %s\n",
5068                                 dev_name(&hba->ufs_device_wlun->sdev_gendev));
5069                         return;
5070                 }
5071                 hba->luns_avail--;
5072                 /* Ignore REPORT_LUN wlun probing */
5073                 if (hba->luns_avail == 1) {
5074                         ufshcd_rpm_put(hba);
5075                         return;
5076                 }
5077         } else {
5078                 /*
5079                  * Device wlun is probed. The assumption is that WLUNs are
5080                  * scanned before other LUNs.
5081                  */
5082                 hba->luns_avail--;
5083         }
5084 }
5085
5086 /**
5087  * ufshcd_lu_init - Initialize the relevant parameters of the LU
5088  * @hba: per-adapter instance
5089  * @sdev: pointer to SCSI device
5090  */
5091 static void ufshcd_lu_init(struct ufs_hba *hba, struct scsi_device *sdev)
5092 {
5093         int len = QUERY_DESC_MAX_SIZE;
5094         u8 lun = ufshcd_scsi_to_upiu_lun(sdev->lun);
5095         u8 lun_qdepth = hba->nutrs;
5096         u8 *desc_buf;
5097         int ret;
5098
5099         desc_buf = kzalloc(len, GFP_KERNEL);
5100         if (!desc_buf)
5101                 goto set_qdepth;
5102
5103         ret = ufshcd_read_unit_desc_param(hba, lun, 0, desc_buf, len);
5104         if (ret < 0) {
5105                 if (ret == -EOPNOTSUPP)
5106                         /* If LU doesn't support unit descriptor, its queue depth is set to 1 */
5107                         lun_qdepth = 1;
5108                 kfree(desc_buf);
5109                 goto set_qdepth;
5110         }
5111
5112         if (desc_buf[UNIT_DESC_PARAM_LU_Q_DEPTH]) {
5113                 /*
5114                  * In per-LU queueing architecture, bLUQueueDepth will not be 0, then we will
5115                  * use the smaller between UFSHCI CAP.NUTRS and UFS LU bLUQueueDepth
5116                  */
5117                 lun_qdepth = min_t(int, desc_buf[UNIT_DESC_PARAM_LU_Q_DEPTH], hba->nutrs);
5118         }
5119         /*
5120          * According to UFS device specification, the write protection mode is only supported by
5121          * normal LU, not supported by WLUN.
5122          */
5123         if (hba->dev_info.f_power_on_wp_en && lun < hba->dev_info.max_lu_supported &&
5124             !hba->dev_info.is_lu_power_on_wp &&
5125             desc_buf[UNIT_DESC_PARAM_LU_WR_PROTECT] == UFS_LU_POWER_ON_WP)
5126                 hba->dev_info.is_lu_power_on_wp = true;
5127
5128         /* In case of RPMB LU, check if advanced RPMB mode is enabled */
5129         if (desc_buf[UNIT_DESC_PARAM_UNIT_INDEX] == UFS_UPIU_RPMB_WLUN &&
5130             desc_buf[RPMB_UNIT_DESC_PARAM_REGION_EN] & BIT(4))
5131                 hba->dev_info.b_advanced_rpmb_en = true;
5132
5133
5134         kfree(desc_buf);
5135 set_qdepth:
5136         /*
5137          * For WLUNs that don't support unit descriptor, queue depth is set to 1. For LUs whose
5138          * bLUQueueDepth == 0, the queue depth is set to a maximum value that host can queue.
5139          */
5140         dev_dbg(hba->dev, "Set LU %x queue depth %d\n", lun, lun_qdepth);
5141         scsi_change_queue_depth(sdev, lun_qdepth);
5142 }
5143
5144 /**
5145  * ufshcd_slave_alloc - handle initial SCSI device configurations
5146  * @sdev: pointer to SCSI device
5147  *
5148  * Return: success.
5149  */
5150 static int ufshcd_slave_alloc(struct scsi_device *sdev)
5151 {
5152         struct ufs_hba *hba;
5153
5154         hba = shost_priv(sdev->host);
5155
5156         /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
5157         sdev->use_10_for_ms = 1;
5158
5159         /* DBD field should be set to 1 in mode sense(10) */
5160         sdev->set_dbd_for_ms = 1;
5161
5162         /* allow SCSI layer to restart the device in case of errors */
5163         sdev->allow_restart = 1;
5164
5165         /* REPORT SUPPORTED OPERATION CODES is not supported */
5166         sdev->no_report_opcodes = 1;
5167
5168         /* WRITE_SAME command is not supported */
5169         sdev->no_write_same = 1;
5170
5171         ufshcd_lu_init(hba, sdev);
5172
5173         ufshcd_setup_links(hba, sdev);
5174
5175         return 0;
5176 }
5177
5178 /**
5179  * ufshcd_change_queue_depth - change queue depth
5180  * @sdev: pointer to SCSI device
5181  * @depth: required depth to set
5182  *
5183  * Change queue depth and make sure the max. limits are not crossed.
5184  *
5185  * Return: new queue depth.
5186  */
5187 static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth)
5188 {
5189         return scsi_change_queue_depth(sdev, min(depth, sdev->host->can_queue));
5190 }
5191
5192 /**
5193  * ufshcd_slave_configure - adjust SCSI device configurations
5194  * @sdev: pointer to SCSI device
5195  *
5196  * Return: 0 (success).
5197  */
5198 static int ufshcd_slave_configure(struct scsi_device *sdev)
5199 {
5200         struct ufs_hba *hba = shost_priv(sdev->host);
5201         struct request_queue *q = sdev->request_queue;
5202
5203         blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
5204
5205         /*
5206          * Block runtime-pm until all consumers are added.
5207          * Refer ufshcd_setup_links().
5208          */
5209         if (is_device_wlun(sdev))
5210                 pm_runtime_get_noresume(&sdev->sdev_gendev);
5211         else if (ufshcd_is_rpm_autosuspend_allowed(hba))
5212                 sdev->rpm_autosuspend = 1;
5213         /*
5214          * Do not print messages during runtime PM to avoid never-ending cycles
5215          * of messages written back to storage by user space causing runtime
5216          * resume, causing more messages and so on.
5217          */
5218         sdev->silence_suspend = 1;
5219
5220         if (hba->vops && hba->vops->config_scsi_dev)
5221                 hba->vops->config_scsi_dev(sdev);
5222
5223         ufshcd_crypto_register(hba, q);
5224
5225         return 0;
5226 }
5227
5228 /**
5229  * ufshcd_slave_destroy - remove SCSI device configurations
5230  * @sdev: pointer to SCSI device
5231  */
5232 static void ufshcd_slave_destroy(struct scsi_device *sdev)
5233 {
5234         struct ufs_hba *hba;
5235         unsigned long flags;
5236
5237         hba = shost_priv(sdev->host);
5238
5239         /* Drop the reference as it won't be needed anymore */
5240         if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) {
5241                 spin_lock_irqsave(hba->host->host_lock, flags);
5242                 hba->ufs_device_wlun = NULL;
5243                 spin_unlock_irqrestore(hba->host->host_lock, flags);
5244         } else if (hba->ufs_device_wlun) {
5245                 struct device *supplier = NULL;
5246
5247                 /* Ensure UFS Device WLUN exists and does not disappear */
5248                 spin_lock_irqsave(hba->host->host_lock, flags);
5249                 if (hba->ufs_device_wlun) {
5250                         supplier = &hba->ufs_device_wlun->sdev_gendev;
5251                         get_device(supplier);
5252                 }
5253                 spin_unlock_irqrestore(hba->host->host_lock, flags);
5254
5255                 if (supplier) {
5256                         /*
5257                          * If a LUN fails to probe (e.g. absent BOOT WLUN), the
5258                          * device will not have been registered but can still
5259                          * have a device link holding a reference to the device.
5260                          */
5261                         device_link_remove(&sdev->sdev_gendev, supplier);
5262                         put_device(supplier);
5263                 }
5264         }
5265 }
5266
5267 /**
5268  * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
5269  * @lrbp: pointer to local reference block of completed command
5270  * @scsi_status: SCSI command status
5271  *
5272  * Return: value base on SCSI command status.
5273  */
5274 static inline int
5275 ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
5276 {
5277         int result = 0;
5278
5279         switch (scsi_status) {
5280         case SAM_STAT_CHECK_CONDITION:
5281                 ufshcd_copy_sense_data(lrbp);
5282                 fallthrough;
5283         case SAM_STAT_GOOD:
5284                 result |= DID_OK << 16 | scsi_status;
5285                 break;
5286         case SAM_STAT_TASK_SET_FULL:
5287         case SAM_STAT_BUSY:
5288         case SAM_STAT_TASK_ABORTED:
5289                 ufshcd_copy_sense_data(lrbp);
5290                 result |= scsi_status;
5291                 break;
5292         default:
5293                 result |= DID_ERROR << 16;
5294                 break;
5295         } /* end of switch */
5296
5297         return result;
5298 }
5299
5300 /**
5301  * ufshcd_transfer_rsp_status - Get overall status of the response
5302  * @hba: per adapter instance
5303  * @lrbp: pointer to local reference block of completed command
5304  * @cqe: pointer to the completion queue entry
5305  *
5306  * Return: result of the command to notify SCSI midlayer.
5307  */
5308 static inline int
5309 ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
5310                            struct cq_entry *cqe)
5311 {
5312         int result = 0;
5313         int scsi_status;
5314         enum utp_ocs ocs;
5315         u8 upiu_flags;
5316         u32 resid;
5317
5318         upiu_flags = lrbp->ucd_rsp_ptr->header.flags;
5319         resid = be32_to_cpu(lrbp->ucd_rsp_ptr->sr.residual_transfer_count);
5320         /*
5321          * Test !overflow instead of underflow to support UFS devices that do
5322          * not set either flag.
5323          */
5324         if (resid && !(upiu_flags & UPIU_RSP_FLAG_OVERFLOW))
5325                 scsi_set_resid(lrbp->cmd, resid);
5326
5327         /* overall command status of utrd */
5328         ocs = ufshcd_get_tr_ocs(lrbp, cqe);
5329
5330         if (hba->quirks & UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR) {
5331                 if (lrbp->ucd_rsp_ptr->header.response ||
5332                     lrbp->ucd_rsp_ptr->header.status)
5333                         ocs = OCS_SUCCESS;
5334         }
5335
5336         switch (ocs) {
5337         case OCS_SUCCESS:
5338                 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
5339                 switch (ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr)) {
5340                 case UPIU_TRANSACTION_RESPONSE:
5341                         /*
5342                          * get the result based on SCSI status response
5343                          * to notify the SCSI midlayer of the command status
5344                          */
5345                         scsi_status = lrbp->ucd_rsp_ptr->header.status;
5346                         result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
5347
5348                         /*
5349                          * Currently we are only supporting BKOPs exception
5350                          * events hence we can ignore BKOPs exception event
5351                          * during power management callbacks. BKOPs exception
5352                          * event is not expected to be raised in runtime suspend
5353                          * callback as it allows the urgent bkops.
5354                          * During system suspend, we are anyway forcefully
5355                          * disabling the bkops and if urgent bkops is needed
5356                          * it will be enabled on system resume. Long term
5357                          * solution could be to abort the system suspend if
5358                          * UFS device needs urgent BKOPs.
5359                          */
5360                         if (!hba->pm_op_in_progress &&
5361                             !ufshcd_eh_in_progress(hba) &&
5362                             ufshcd_is_exception_event(lrbp->ucd_rsp_ptr))
5363                                 /* Flushed in suspend */
5364                                 schedule_work(&hba->eeh_work);
5365                         break;
5366                 case UPIU_TRANSACTION_REJECT_UPIU:
5367                         /* TODO: handle Reject UPIU Response */
5368                         result = DID_ERROR << 16;
5369                         dev_err(hba->dev,
5370                                 "Reject UPIU not fully implemented\n");
5371                         break;
5372                 default:
5373                         dev_err(hba->dev,
5374                                 "Unexpected request response code = %x\n",
5375                                 result);
5376                         result = DID_ERROR << 16;
5377                         break;
5378                 }
5379                 break;
5380         case OCS_ABORTED:
5381                 result |= DID_ABORT << 16;
5382                 break;
5383         case OCS_INVALID_COMMAND_STATUS:
5384                 result |= DID_REQUEUE << 16;
5385                 break;
5386         case OCS_INVALID_CMD_TABLE_ATTR:
5387         case OCS_INVALID_PRDT_ATTR:
5388         case OCS_MISMATCH_DATA_BUF_SIZE:
5389         case OCS_MISMATCH_RESP_UPIU_SIZE:
5390         case OCS_PEER_COMM_FAILURE:
5391         case OCS_FATAL_ERROR:
5392         case OCS_DEVICE_FATAL_ERROR:
5393         case OCS_INVALID_CRYPTO_CONFIG:
5394         case OCS_GENERAL_CRYPTO_ERROR:
5395         default:
5396                 result |= DID_ERROR << 16;
5397                 dev_err(hba->dev,
5398                                 "OCS error from controller = %x for tag %d\n",
5399                                 ocs, lrbp->task_tag);
5400                 ufshcd_print_evt_hist(hba);
5401                 ufshcd_print_host_state(hba);
5402                 break;
5403         } /* end of switch */
5404
5405         if ((host_byte(result) != DID_OK) &&
5406             (host_byte(result) != DID_REQUEUE) && !hba->silence_err_logs)
5407                 ufshcd_print_tr(hba, lrbp->task_tag, true);
5408         return result;
5409 }
5410
5411 static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba,
5412                                          u32 intr_mask)
5413 {
5414         if (!ufshcd_is_auto_hibern8_supported(hba) ||
5415             !ufshcd_is_auto_hibern8_enabled(hba))
5416                 return false;
5417
5418         if (!(intr_mask & UFSHCD_UIC_HIBERN8_MASK))
5419                 return false;
5420
5421         if (hba->active_uic_cmd &&
5422             (hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_ENTER ||
5423             hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_EXIT))
5424                 return false;
5425
5426         return true;
5427 }
5428
5429 /**
5430  * ufshcd_uic_cmd_compl - handle completion of uic command
5431  * @hba: per adapter instance
5432  * @intr_status: interrupt status generated by the controller
5433  *
5434  * Return:
5435  *  IRQ_HANDLED - If interrupt is valid
5436  *  IRQ_NONE    - If invalid interrupt
5437  */
5438 static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
5439 {
5440         irqreturn_t retval = IRQ_NONE;
5441
5442         spin_lock(hba->host->host_lock);
5443         if (ufshcd_is_auto_hibern8_error(hba, intr_status))
5444                 hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status);
5445
5446         if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
5447                 hba->active_uic_cmd->argument2 |=
5448                         ufshcd_get_uic_cmd_result(hba);
5449                 hba->active_uic_cmd->argument3 =
5450                         ufshcd_get_dme_attr_val(hba);
5451                 if (!hba->uic_async_done)
5452                         hba->active_uic_cmd->cmd_active = 0;
5453                 complete(&hba->active_uic_cmd->done);
5454                 retval = IRQ_HANDLED;
5455         }
5456
5457         if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done) {
5458                 hba->active_uic_cmd->cmd_active = 0;
5459                 complete(hba->uic_async_done);
5460                 retval = IRQ_HANDLED;
5461         }
5462
5463         if (retval == IRQ_HANDLED)
5464                 ufshcd_add_uic_command_trace(hba, hba->active_uic_cmd,
5465                                              UFS_CMD_COMP);
5466         spin_unlock(hba->host->host_lock);
5467         return retval;
5468 }
5469
5470 /* Release the resources allocated for processing a SCSI command. */
5471 void ufshcd_release_scsi_cmd(struct ufs_hba *hba,
5472                              struct ufshcd_lrb *lrbp)
5473 {
5474         struct scsi_cmnd *cmd = lrbp->cmd;
5475
5476         scsi_dma_unmap(cmd);
5477         ufshcd_release(hba);
5478         ufshcd_clk_scaling_update_busy(hba);
5479 }
5480
5481 /**
5482  * ufshcd_compl_one_cqe - handle a completion queue entry
5483  * @hba: per adapter instance
5484  * @task_tag: the task tag of the request to be completed
5485  * @cqe: pointer to the completion queue entry
5486  */
5487 void ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag,
5488                           struct cq_entry *cqe)
5489 {
5490         struct ufshcd_lrb *lrbp;
5491         struct scsi_cmnd *cmd;
5492         enum utp_ocs ocs;
5493
5494         lrbp = &hba->lrb[task_tag];
5495         lrbp->compl_time_stamp = ktime_get();
5496         cmd = lrbp->cmd;
5497         if (cmd) {
5498                 if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
5499                         ufshcd_update_monitor(hba, lrbp);
5500                 ufshcd_add_command_trace(hba, task_tag, UFS_CMD_COMP);
5501                 cmd->result = ufshcd_transfer_rsp_status(hba, lrbp, cqe);
5502                 ufshcd_release_scsi_cmd(hba, lrbp);
5503                 /* Do not touch lrbp after scsi done */
5504                 scsi_done(cmd);
5505         } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
5506                    lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
5507                 if (hba->dev_cmd.complete) {
5508                         if (cqe) {
5509                                 ocs = le32_to_cpu(cqe->status) & MASK_OCS;
5510                                 lrbp->utr_descriptor_ptr->header.ocs = ocs;
5511                         }
5512                         complete(hba->dev_cmd.complete);
5513                 }
5514         }
5515 }
5516
5517 /**
5518  * __ufshcd_transfer_req_compl - handle SCSI and query command completion
5519  * @hba: per adapter instance
5520  * @completed_reqs: bitmask that indicates which requests to complete
5521  */
5522 static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
5523                                         unsigned long completed_reqs)
5524 {
5525         int tag;
5526
5527         for_each_set_bit(tag, &completed_reqs, hba->nutrs)
5528                 ufshcd_compl_one_cqe(hba, tag, NULL);
5529 }
5530
5531 /* Any value that is not an existing queue number is fine for this constant. */
5532 enum {
5533         UFSHCD_POLL_FROM_INTERRUPT_CONTEXT = -1
5534 };
5535
5536 static void ufshcd_clear_polled(struct ufs_hba *hba,
5537                                 unsigned long *completed_reqs)
5538 {
5539         int tag;
5540
5541         for_each_set_bit(tag, completed_reqs, hba->nutrs) {
5542                 struct scsi_cmnd *cmd = hba->lrb[tag].cmd;
5543
5544                 if (!cmd)
5545                         continue;
5546                 if (scsi_cmd_to_rq(cmd)->cmd_flags & REQ_POLLED)
5547                         __clear_bit(tag, completed_reqs);
5548         }
5549 }
5550
5551 /*
5552  * Return: > 0 if one or more commands have been completed or 0 if no
5553  * requests have been completed.
5554  */
5555 static int ufshcd_poll(struct Scsi_Host *shost, unsigned int queue_num)
5556 {
5557         struct ufs_hba *hba = shost_priv(shost);
5558         unsigned long completed_reqs, flags;
5559         u32 tr_doorbell;
5560         struct ufs_hw_queue *hwq;
5561
5562         if (is_mcq_enabled(hba)) {
5563                 hwq = &hba->uhq[queue_num];
5564
5565                 return ufshcd_mcq_poll_cqe_lock(hba, hwq);
5566         }
5567
5568         spin_lock_irqsave(&hba->outstanding_lock, flags);
5569         tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
5570         completed_reqs = ~tr_doorbell & hba->outstanding_reqs;
5571         WARN_ONCE(completed_reqs & ~hba->outstanding_reqs,
5572                   "completed: %#lx; outstanding: %#lx\n", completed_reqs,
5573                   hba->outstanding_reqs);
5574         if (queue_num == UFSHCD_POLL_FROM_INTERRUPT_CONTEXT) {
5575                 /* Do not complete polled requests from interrupt context. */
5576                 ufshcd_clear_polled(hba, &completed_reqs);
5577         }
5578         hba->outstanding_reqs &= ~completed_reqs;
5579         spin_unlock_irqrestore(&hba->outstanding_lock, flags);
5580
5581         if (completed_reqs)
5582                 __ufshcd_transfer_req_compl(hba, completed_reqs);
5583
5584         return completed_reqs != 0;
5585 }
5586
5587 /**
5588  * ufshcd_mcq_compl_pending_transfer - MCQ mode function. It is
5589  * invoked from the error handler context or ufshcd_host_reset_and_restore()
5590  * to complete the pending transfers and free the resources associated with
5591  * the scsi command.
5592  *
5593  * @hba: per adapter instance
5594  * @force_compl: This flag is set to true when invoked
5595  * from ufshcd_host_reset_and_restore() in which case it requires special
5596  * handling because the host controller has been reset by ufshcd_hba_stop().
5597  */
5598 static void ufshcd_mcq_compl_pending_transfer(struct ufs_hba *hba,
5599                                               bool force_compl)
5600 {
5601         struct ufs_hw_queue *hwq;
5602         struct ufshcd_lrb *lrbp;
5603         struct scsi_cmnd *cmd;
5604         unsigned long flags;
5605         u32 hwq_num, utag;
5606         int tag;
5607
5608         for (tag = 0; tag < hba->nutrs; tag++) {
5609                 lrbp = &hba->lrb[tag];
5610                 cmd = lrbp->cmd;
5611                 if (!ufshcd_cmd_inflight(cmd) ||
5612                     test_bit(SCMD_STATE_COMPLETE, &cmd->state))
5613                         continue;
5614
5615                 utag = blk_mq_unique_tag(scsi_cmd_to_rq(cmd));
5616                 hwq_num = blk_mq_unique_tag_to_hwq(utag);
5617                 hwq = &hba->uhq[hwq_num];
5618
5619                 if (force_compl) {
5620                         ufshcd_mcq_compl_all_cqes_lock(hba, hwq);
5621                         /*
5622                          * For those cmds of which the cqes are not present
5623                          * in the cq, complete them explicitly.
5624                          */
5625                         spin_lock_irqsave(&hwq->cq_lock, flags);
5626                         if (cmd && !test_bit(SCMD_STATE_COMPLETE, &cmd->state)) {
5627                                 set_host_byte(cmd, DID_REQUEUE);
5628                                 ufshcd_release_scsi_cmd(hba, lrbp);
5629                                 scsi_done(cmd);
5630                         }
5631                         spin_unlock_irqrestore(&hwq->cq_lock, flags);
5632                 } else {
5633                         ufshcd_mcq_poll_cqe_lock(hba, hwq);
5634                 }
5635         }
5636 }
5637
5638 /**
5639  * ufshcd_transfer_req_compl - handle SCSI and query command completion
5640  * @hba: per adapter instance
5641  *
5642  * Return:
5643  *  IRQ_HANDLED - If interrupt is valid
5644  *  IRQ_NONE    - If invalid interrupt
5645  */
5646 static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
5647 {
5648         /* Resetting interrupt aggregation counters first and reading the
5649          * DOOR_BELL afterward allows us to handle all the completed requests.
5650          * In order to prevent other interrupts starvation the DB is read once
5651          * after reset. The down side of this solution is the possibility of
5652          * false interrupt if device completes another request after resetting
5653          * aggregation and before reading the DB.
5654          */
5655         if (ufshcd_is_intr_aggr_allowed(hba) &&
5656             !(hba->quirks & UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR))
5657                 ufshcd_reset_intr_aggr(hba);
5658
5659         if (ufs_fail_completion(hba))
5660                 return IRQ_HANDLED;
5661
5662         /*
5663          * Ignore the ufshcd_poll() return value and return IRQ_HANDLED since we
5664          * do not want polling to trigger spurious interrupt complaints.
5665          */
5666         ufshcd_poll(hba->host, UFSHCD_POLL_FROM_INTERRUPT_CONTEXT);
5667
5668         return IRQ_HANDLED;
5669 }
5670
5671 int __ufshcd_write_ee_control(struct ufs_hba *hba, u32 ee_ctrl_mask)
5672 {
5673         return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
5674                                        QUERY_ATTR_IDN_EE_CONTROL, 0, 0,
5675                                        &ee_ctrl_mask);
5676 }
5677
5678 int ufshcd_write_ee_control(struct ufs_hba *hba)
5679 {
5680         int err;
5681
5682         mutex_lock(&hba->ee_ctrl_mutex);
5683         err = __ufshcd_write_ee_control(hba, hba->ee_ctrl_mask);
5684         mutex_unlock(&hba->ee_ctrl_mutex);
5685         if (err)
5686                 dev_err(hba->dev, "%s: failed to write ee control %d\n",
5687                         __func__, err);
5688         return err;
5689 }
5690
5691 int ufshcd_update_ee_control(struct ufs_hba *hba, u16 *mask,
5692                              const u16 *other_mask, u16 set, u16 clr)
5693 {
5694         u16 new_mask, ee_ctrl_mask;
5695         int err = 0;
5696
5697         mutex_lock(&hba->ee_ctrl_mutex);
5698         new_mask = (*mask & ~clr) | set;
5699         ee_ctrl_mask = new_mask | *other_mask;
5700         if (ee_ctrl_mask != hba->ee_ctrl_mask)
5701                 err = __ufshcd_write_ee_control(hba, ee_ctrl_mask);
5702         /* Still need to update 'mask' even if 'ee_ctrl_mask' was unchanged */
5703         if (!err) {
5704                 hba->ee_ctrl_mask = ee_ctrl_mask;
5705                 *mask = new_mask;
5706         }
5707         mutex_unlock(&hba->ee_ctrl_mutex);
5708         return err;
5709 }
5710
5711 /**
5712  * ufshcd_disable_ee - disable exception event
5713  * @hba: per-adapter instance
5714  * @mask: exception event to disable
5715  *
5716  * Disables exception event in the device so that the EVENT_ALERT
5717  * bit is not set.
5718  *
5719  * Return: zero on success, non-zero error value on failure.
5720  */
5721 static inline int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask)
5722 {
5723         return ufshcd_update_ee_drv_mask(hba, 0, mask);
5724 }
5725
5726 /**
5727  * ufshcd_enable_ee - enable exception event
5728  * @hba: per-adapter instance
5729  * @mask: exception event to enable
5730  *
5731  * Enable corresponding exception event in the device to allow
5732  * device to alert host in critical scenarios.
5733  *
5734  * Return: zero on success, non-zero error value on failure.
5735  */
5736 static inline int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
5737 {
5738         return ufshcd_update_ee_drv_mask(hba, mask, 0);
5739 }
5740
5741 /**
5742  * ufshcd_enable_auto_bkops - Allow device managed BKOPS
5743  * @hba: per-adapter instance
5744  *
5745  * Allow device to manage background operations on its own. Enabling
5746  * this might lead to inconsistent latencies during normal data transfers
5747  * as the device is allowed to manage its own way of handling background
5748  * operations.
5749  *
5750  * Return: zero on success, non-zero on failure.
5751  */
5752 static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
5753 {
5754         int err = 0;
5755
5756         if (hba->auto_bkops_enabled)
5757                 goto out;
5758
5759         err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
5760                         QUERY_FLAG_IDN_BKOPS_EN, 0, NULL);
5761         if (err) {
5762                 dev_err(hba->dev, "%s: failed to enable bkops %d\n",
5763                                 __func__, err);
5764                 goto out;
5765         }
5766
5767         hba->auto_bkops_enabled = true;
5768         trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Enabled");
5769
5770         /* No need of URGENT_BKOPS exception from the device */
5771         err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
5772         if (err)
5773                 dev_err(hba->dev, "%s: failed to disable exception event %d\n",
5774                                 __func__, err);
5775 out:
5776         return err;
5777 }
5778
5779 /**
5780  * ufshcd_disable_auto_bkops - block device in doing background operations
5781  * @hba: per-adapter instance
5782  *
5783  * Disabling background operations improves command response latency but
5784  * has drawback of device moving into critical state where the device is
5785  * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the
5786  * host is idle so that BKOPS are managed effectively without any negative
5787  * impacts.
5788  *
5789  * Return: zero on success, non-zero on failure.
5790  */
5791 static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
5792 {
5793         int err = 0;
5794
5795         if (!hba->auto_bkops_enabled)
5796                 goto out;
5797
5798         /*
5799          * If host assisted BKOPs is to be enabled, make sure
5800          * urgent bkops exception is allowed.
5801          */
5802         err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS);
5803         if (err) {
5804                 dev_err(hba->dev, "%s: failed to enable exception event %d\n",
5805                                 __func__, err);
5806                 goto out;
5807         }
5808
5809         err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
5810                         QUERY_FLAG_IDN_BKOPS_EN, 0, NULL);
5811         if (err) {
5812                 dev_err(hba->dev, "%s: failed to disable bkops %d\n",
5813                                 __func__, err);
5814                 ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
5815                 goto out;
5816         }
5817
5818         hba->auto_bkops_enabled = false;
5819         trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Disabled");
5820         hba->is_urgent_bkops_lvl_checked = false;
5821 out:
5822         return err;
5823 }
5824
5825 /**
5826  * ufshcd_force_reset_auto_bkops - force reset auto bkops state
5827  * @hba: per adapter instance
5828  *
5829  * After a device reset the device may toggle the BKOPS_EN flag
5830  * to default value. The s/w tracking variables should be updated
5831  * as well. This function would change the auto-bkops state based on
5832  * UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND.
5833  */
5834 static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
5835 {
5836         if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) {
5837                 hba->auto_bkops_enabled = false;
5838                 hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS;
5839                 ufshcd_enable_auto_bkops(hba);
5840         } else {
5841                 hba->auto_bkops_enabled = true;
5842                 hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS;
5843                 ufshcd_disable_auto_bkops(hba);
5844         }
5845         hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT;
5846         hba->is_urgent_bkops_lvl_checked = false;
5847 }
5848
5849 static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
5850 {
5851         return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5852                         QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status);
5853 }
5854
5855 /**
5856  * ufshcd_bkops_ctrl - control the auto bkops based on current bkops status
5857  * @hba: per-adapter instance
5858  * @status: bkops_status value
5859  *
5860  * Read the bkops_status from the UFS device and Enable fBackgroundOpsEn
5861  * flag in the device to permit background operations if the device
5862  * bkops_status is greater than or equal to "status" argument passed to
5863  * this function, disable otherwise.
5864  *
5865  * Return: 0 for success, non-zero in case of failure.
5866  *
5867  * NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag
5868  * to know whether auto bkops is enabled or disabled after this function
5869  * returns control to it.
5870  */
5871 static int ufshcd_bkops_ctrl(struct ufs_hba *hba,
5872                              enum bkops_status status)
5873 {
5874         int err;
5875         u32 curr_status = 0;
5876
5877         err = ufshcd_get_bkops_status(hba, &curr_status);
5878         if (err) {
5879                 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
5880                                 __func__, err);
5881                 goto out;
5882         } else if (curr_status > BKOPS_STATUS_MAX) {
5883                 dev_err(hba->dev, "%s: invalid BKOPS status %d\n",
5884                                 __func__, curr_status);
5885                 err = -EINVAL;
5886                 goto out;
5887         }
5888
5889         if (curr_status >= status)
5890                 err = ufshcd_enable_auto_bkops(hba);
5891         else
5892                 err = ufshcd_disable_auto_bkops(hba);
5893 out:
5894         return err;
5895 }
5896
5897 /**
5898  * ufshcd_urgent_bkops - handle urgent bkops exception event
5899  * @hba: per-adapter instance
5900  *
5901  * Enable fBackgroundOpsEn flag in the device to permit background
5902  * operations.
5903  *
5904  * If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled
5905  * and negative error value for any other failure.
5906  *
5907  * Return: 0 upon success; < 0 upon failure.
5908  */
5909 static int ufshcd_urgent_bkops(struct ufs_hba *hba)
5910 {
5911         return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl);
5912 }
5913
5914 static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
5915 {
5916         return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5917                         QUERY_ATTR_IDN_EE_STATUS, 0, 0, status);
5918 }
5919
5920 static void ufshcd_bkops_exception_event_handler(struct ufs_hba *hba)
5921 {
5922         int err;
5923         u32 curr_status = 0;
5924
5925         if (hba->is_urgent_bkops_lvl_checked)
5926                 goto enable_auto_bkops;
5927
5928         err = ufshcd_get_bkops_status(hba, &curr_status);
5929         if (err) {
5930                 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
5931                                 __func__, err);
5932                 goto out;
5933         }
5934
5935         /*
5936          * We are seeing that some devices are raising the urgent bkops
5937          * exception events even when BKOPS status doesn't indicate performace
5938          * impacted or critical. Handle these device by determining their urgent
5939          * bkops status at runtime.
5940          */
5941         if (curr_status < BKOPS_STATUS_PERF_IMPACT) {
5942                 dev_err(hba->dev, "%s: device raised urgent BKOPS exception for bkops status %d\n",
5943                                 __func__, curr_status);
5944                 /* update the current status as the urgent bkops level */
5945                 hba->urgent_bkops_lvl = curr_status;
5946                 hba->is_urgent_bkops_lvl_checked = true;
5947         }
5948
5949 enable_auto_bkops:
5950         err = ufshcd_enable_auto_bkops(hba);
5951 out:
5952         if (err < 0)
5953                 dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
5954                                 __func__, err);
5955 }
5956
5957 static void ufshcd_temp_exception_event_handler(struct ufs_hba *hba, u16 status)
5958 {
5959         u32 value;
5960
5961         if (ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5962                                 QUERY_ATTR_IDN_CASE_ROUGH_TEMP, 0, 0, &value))
5963                 return;
5964
5965         dev_info(hba->dev, "exception Tcase %d\n", value - 80);
5966
5967         ufs_hwmon_notify_event(hba, status & MASK_EE_URGENT_TEMP);
5968
5969         /*
5970          * A placeholder for the platform vendors to add whatever additional
5971          * steps required
5972          */
5973 }
5974
5975 static int __ufshcd_wb_toggle(struct ufs_hba *hba, bool set, enum flag_idn idn)
5976 {
5977         u8 index;
5978         enum query_opcode opcode = set ? UPIU_QUERY_OPCODE_SET_FLAG :
5979                                    UPIU_QUERY_OPCODE_CLEAR_FLAG;
5980
5981         index = ufshcd_wb_get_query_index(hba);
5982         return ufshcd_query_flag_retry(hba, opcode, idn, index, NULL);
5983 }
5984
5985 int ufshcd_wb_toggle(struct ufs_hba *hba, bool enable)
5986 {
5987         int ret;
5988
5989         if (!ufshcd_is_wb_allowed(hba) ||
5990             hba->dev_info.wb_enabled == enable)
5991                 return 0;
5992
5993         ret = __ufshcd_wb_toggle(hba, enable, QUERY_FLAG_IDN_WB_EN);
5994         if (ret) {
5995                 dev_err(hba->dev, "%s: Write Booster %s failed %d\n",
5996                         __func__, enable ? "enabling" : "disabling", ret);
5997                 return ret;
5998         }
5999
6000         hba->dev_info.wb_enabled = enable;
6001         dev_dbg(hba->dev, "%s: Write Booster %s\n",
6002                         __func__, enable ? "enabled" : "disabled");
6003
6004         return ret;
6005 }
6006
6007 static void ufshcd_wb_toggle_buf_flush_during_h8(struct ufs_hba *hba,
6008                                                  bool enable)
6009 {
6010         int ret;
6011
6012         ret = __ufshcd_wb_toggle(hba, enable,
6013                         QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8);
6014         if (ret) {
6015                 dev_err(hba->dev, "%s: WB-Buf Flush during H8 %s failed %d\n",
6016                         __func__, enable ? "enabling" : "disabling", ret);
6017                 return;
6018         }
6019         dev_dbg(hba->dev, "%s: WB-Buf Flush during H8 %s\n",
6020                         __func__, enable ? "enabled" : "disabled");
6021 }
6022
6023 int ufshcd_wb_toggle_buf_flush(struct ufs_hba *hba, bool enable)
6024 {
6025         int ret;
6026
6027         if (!ufshcd_is_wb_allowed(hba) ||
6028             hba->dev_info.wb_buf_flush_enabled == enable)
6029                 return 0;
6030
6031         ret = __ufshcd_wb_toggle(hba, enable, QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN);
6032         if (ret) {
6033                 dev_err(hba->dev, "%s: WB-Buf Flush %s failed %d\n",
6034                         __func__, enable ? "enabling" : "disabling", ret);
6035                 return ret;
6036         }
6037
6038         hba->dev_info.wb_buf_flush_enabled = enable;
6039         dev_dbg(hba->dev, "%s: WB-Buf Flush %s\n",
6040                         __func__, enable ? "enabled" : "disabled");
6041
6042         return ret;
6043 }
6044
6045 static bool ufshcd_wb_presrv_usrspc_keep_vcc_on(struct ufs_hba *hba,
6046                                                 u32 avail_buf)
6047 {
6048         u32 cur_buf;
6049         int ret;
6050         u8 index;
6051
6052         index = ufshcd_wb_get_query_index(hba);
6053         ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
6054                                               QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE,
6055                                               index, 0, &cur_buf);
6056         if (ret) {
6057                 dev_err(hba->dev, "%s: dCurWriteBoosterBufferSize read failed %d\n",
6058                         __func__, ret);
6059                 return false;
6060         }
6061
6062         if (!cur_buf) {
6063                 dev_info(hba->dev, "dCurWBBuf: %d WB disabled until free-space is available\n",
6064                          cur_buf);
6065                 return false;
6066         }
6067         /* Let it continue to flush when available buffer exceeds threshold */
6068         return avail_buf < hba->vps->wb_flush_threshold;
6069 }
6070
6071 static void ufshcd_wb_force_disable(struct ufs_hba *hba)
6072 {
6073         if (ufshcd_is_wb_buf_flush_allowed(hba))
6074                 ufshcd_wb_toggle_buf_flush(hba, false);
6075
6076         ufshcd_wb_toggle_buf_flush_during_h8(hba, false);
6077         ufshcd_wb_toggle(hba, false);
6078         hba->caps &= ~UFSHCD_CAP_WB_EN;
6079
6080         dev_info(hba->dev, "%s: WB force disabled\n", __func__);
6081 }
6082
6083 static bool ufshcd_is_wb_buf_lifetime_available(struct ufs_hba *hba)
6084 {
6085         u32 lifetime;
6086         int ret;
6087         u8 index;
6088
6089         index = ufshcd_wb_get_query_index(hba);
6090         ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
6091                                       QUERY_ATTR_IDN_WB_BUFF_LIFE_TIME_EST,
6092                                       index, 0, &lifetime);
6093         if (ret) {
6094                 dev_err(hba->dev,
6095                         "%s: bWriteBoosterBufferLifeTimeEst read failed %d\n",
6096                         __func__, ret);
6097                 return false;
6098         }
6099
6100         if (lifetime == UFS_WB_EXCEED_LIFETIME) {
6101                 dev_err(hba->dev, "%s: WB buf lifetime is exhausted 0x%02X\n",
6102                         __func__, lifetime);
6103                 return false;
6104         }
6105
6106         dev_dbg(hba->dev, "%s: WB buf lifetime is 0x%02X\n",
6107                 __func__, lifetime);
6108
6109         return true;
6110 }
6111
6112 static bool ufshcd_wb_need_flush(struct ufs_hba *hba)
6113 {
6114         int ret;
6115         u32 avail_buf;
6116         u8 index;
6117
6118         if (!ufshcd_is_wb_allowed(hba))
6119                 return false;
6120
6121         if (!ufshcd_is_wb_buf_lifetime_available(hba)) {
6122                 ufshcd_wb_force_disable(hba);
6123                 return false;
6124         }
6125
6126         /*
6127          * The ufs device needs the vcc to be ON to flush.
6128          * With user-space reduction enabled, it's enough to enable flush
6129          * by checking only the available buffer. The threshold
6130          * defined here is > 90% full.
6131          * With user-space preserved enabled, the current-buffer
6132          * should be checked too because the wb buffer size can reduce
6133          * when disk tends to be full. This info is provided by current
6134          * buffer (dCurrentWriteBoosterBufferSize). There's no point in
6135          * keeping vcc on when current buffer is empty.
6136          */
6137         index = ufshcd_wb_get_query_index(hba);
6138         ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
6139                                       QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE,
6140                                       index, 0, &avail_buf);
6141         if (ret) {
6142                 dev_warn(hba->dev, "%s: dAvailableWriteBoosterBufferSize read failed %d\n",
6143                          __func__, ret);
6144                 return false;
6145         }
6146
6147         if (!hba->dev_info.b_presrv_uspc_en)
6148                 return avail_buf <= UFS_WB_BUF_REMAIN_PERCENT(10);
6149
6150         return ufshcd_wb_presrv_usrspc_keep_vcc_on(hba, avail_buf);
6151 }
6152
6153 static void ufshcd_rpm_dev_flush_recheck_work(struct work_struct *work)
6154 {
6155         struct ufs_hba *hba = container_of(to_delayed_work(work),
6156                                            struct ufs_hba,
6157                                            rpm_dev_flush_recheck_work);
6158         /*
6159          * To prevent unnecessary VCC power drain after device finishes
6160          * WriteBooster buffer flush or Auto BKOPs, force runtime resume
6161          * after a certain delay to recheck the threshold by next runtime
6162          * suspend.
6163          */
6164         ufshcd_rpm_get_sync(hba);
6165         ufshcd_rpm_put_sync(hba);
6166 }
6167
6168 /**
6169  * ufshcd_exception_event_handler - handle exceptions raised by device
6170  * @work: pointer to work data
6171  *
6172  * Read bExceptionEventStatus attribute from the device and handle the
6173  * exception event accordingly.
6174  */
6175 static void ufshcd_exception_event_handler(struct work_struct *work)
6176 {
6177         struct ufs_hba *hba;
6178         int err;
6179         u32 status = 0;
6180         hba = container_of(work, struct ufs_hba, eeh_work);
6181
6182         ufshcd_scsi_block_requests(hba);
6183         err = ufshcd_get_ee_status(hba, &status);
6184         if (err) {
6185                 dev_err(hba->dev, "%s: failed to get exception status %d\n",
6186                                 __func__, err);
6187                 goto out;
6188         }
6189
6190         trace_ufshcd_exception_event(dev_name(hba->dev), status);
6191
6192         if (status & hba->ee_drv_mask & MASK_EE_URGENT_BKOPS)
6193                 ufshcd_bkops_exception_event_handler(hba);
6194
6195         if (status & hba->ee_drv_mask & MASK_EE_URGENT_TEMP)
6196                 ufshcd_temp_exception_event_handler(hba, status);
6197
6198         ufs_debugfs_exception_event(hba, status);
6199 out:
6200         ufshcd_scsi_unblock_requests(hba);
6201 }
6202
6203 /* Complete requests that have door-bell cleared */
6204 static void ufshcd_complete_requests(struct ufs_hba *hba, bool force_compl)
6205 {
6206         if (is_mcq_enabled(hba))
6207                 ufshcd_mcq_compl_pending_transfer(hba, force_compl);
6208         else
6209                 ufshcd_transfer_req_compl(hba);
6210
6211         ufshcd_tmc_handler(hba);
6212 }
6213
6214 /**
6215  * ufshcd_quirk_dl_nac_errors - This function checks if error handling is
6216  *                              to recover from the DL NAC errors or not.
6217  * @hba: per-adapter instance
6218  *
6219  * Return: true if error handling is required, false otherwise.
6220  */
6221 static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba)
6222 {
6223         unsigned long flags;
6224         bool err_handling = true;
6225
6226         spin_lock_irqsave(hba->host->host_lock, flags);
6227         /*
6228          * UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS only workaround the
6229          * device fatal error and/or DL NAC & REPLAY timeout errors.
6230          */
6231         if (hba->saved_err & (CONTROLLER_FATAL_ERROR | SYSTEM_BUS_FATAL_ERROR))
6232                 goto out;
6233
6234         if ((hba->saved_err & DEVICE_FATAL_ERROR) ||
6235             ((hba->saved_err & UIC_ERROR) &&
6236              (hba->saved_uic_err & UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))
6237                 goto out;
6238
6239         if ((hba->saved_err & UIC_ERROR) &&
6240             (hba->saved_uic_err & UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)) {
6241                 int err;
6242                 /*
6243                  * wait for 50ms to see if we can get any other errors or not.
6244                  */
6245                 spin_unlock_irqrestore(hba->host->host_lock, flags);
6246                 msleep(50);
6247                 spin_lock_irqsave(hba->host->host_lock, flags);
6248
6249                 /*
6250                  * now check if we have got any other severe errors other than
6251                  * DL NAC error?
6252                  */
6253                 if ((hba->saved_err & INT_FATAL_ERRORS) ||
6254                     ((hba->saved_err & UIC_ERROR) &&
6255                     (hba->saved_uic_err & ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)))
6256                         goto out;
6257
6258                 /*
6259                  * As DL NAC is the only error received so far, send out NOP
6260                  * command to confirm if link is still active or not.
6261                  *   - If we don't get any response then do error recovery.
6262                  *   - If we get response then clear the DL NAC error bit.
6263                  */
6264
6265                 spin_unlock_irqrestore(hba->host->host_lock, flags);
6266                 err = ufshcd_verify_dev_init(hba);
6267                 spin_lock_irqsave(hba->host->host_lock, flags);
6268
6269                 if (err)
6270                         goto out;
6271
6272                 /* Link seems to be alive hence ignore the DL NAC errors */
6273                 if (hba->saved_uic_err == UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)
6274                         hba->saved_err &= ~UIC_ERROR;
6275                 /* clear NAC error */
6276                 hba->saved_uic_err &= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
6277                 if (!hba->saved_uic_err)
6278                         err_handling = false;
6279         }
6280 out:
6281         spin_unlock_irqrestore(hba->host->host_lock, flags);
6282         return err_handling;
6283 }
6284
6285 /* host lock must be held before calling this func */
6286 static inline bool ufshcd_is_saved_err_fatal(struct ufs_hba *hba)
6287 {
6288         return (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR) ||
6289                (hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK));
6290 }
6291
6292 void ufshcd_schedule_eh_work(struct ufs_hba *hba)
6293 {
6294         lockdep_assert_held(hba->host->host_lock);
6295
6296         /* handle fatal errors only when link is not in error state */
6297         if (hba->ufshcd_state != UFSHCD_STATE_ERROR) {
6298                 if (hba->force_reset || ufshcd_is_link_broken(hba) ||
6299                     ufshcd_is_saved_err_fatal(hba))
6300                         hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_FATAL;
6301                 else
6302                         hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_NON_FATAL;
6303                 queue_work(hba->eh_wq, &hba->eh_work);
6304         }
6305 }
6306
6307 static void ufshcd_force_error_recovery(struct ufs_hba *hba)
6308 {
6309         spin_lock_irq(hba->host->host_lock);
6310         hba->force_reset = true;
6311         ufshcd_schedule_eh_work(hba);
6312         spin_unlock_irq(hba->host->host_lock);
6313 }
6314
6315 static void ufshcd_clk_scaling_allow(struct ufs_hba *hba, bool allow)
6316 {
6317         mutex_lock(&hba->wb_mutex);
6318         down_write(&hba->clk_scaling_lock);
6319         hba->clk_scaling.is_allowed = allow;
6320         up_write(&hba->clk_scaling_lock);
6321         mutex_unlock(&hba->wb_mutex);
6322 }
6323
6324 static void ufshcd_clk_scaling_suspend(struct ufs_hba *hba, bool suspend)
6325 {
6326         if (suspend) {
6327                 if (hba->clk_scaling.is_enabled)
6328                         ufshcd_suspend_clkscaling(hba);
6329                 ufshcd_clk_scaling_allow(hba, false);
6330         } else {
6331                 ufshcd_clk_scaling_allow(hba, true);
6332                 if (hba->clk_scaling.is_enabled)
6333                         ufshcd_resume_clkscaling(hba);
6334         }
6335 }
6336
6337 static void ufshcd_err_handling_prepare(struct ufs_hba *hba)
6338 {
6339         ufshcd_rpm_get_sync(hba);
6340         if (pm_runtime_status_suspended(&hba->ufs_device_wlun->sdev_gendev) ||
6341             hba->is_sys_suspended) {
6342                 enum ufs_pm_op pm_op;
6343
6344                 /*
6345                  * Don't assume anything of resume, if
6346                  * resume fails, irq and clocks can be OFF, and powers
6347                  * can be OFF or in LPM.
6348                  */
6349                 ufshcd_setup_hba_vreg(hba, true);
6350                 ufshcd_enable_irq(hba);
6351                 ufshcd_setup_vreg(hba, true);
6352                 ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
6353                 ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
6354                 ufshcd_hold(hba);
6355                 if (!ufshcd_is_clkgating_allowed(hba))
6356                         ufshcd_setup_clocks(hba, true);
6357                 pm_op = hba->is_sys_suspended ? UFS_SYSTEM_PM : UFS_RUNTIME_PM;
6358                 ufshcd_vops_resume(hba, pm_op);
6359         } else {
6360                 ufshcd_hold(hba);
6361                 if (ufshcd_is_clkscaling_supported(hba) &&
6362                     hba->clk_scaling.is_enabled)
6363                         ufshcd_suspend_clkscaling(hba);
6364                 ufshcd_clk_scaling_allow(hba, false);
6365         }
6366         ufshcd_scsi_block_requests(hba);
6367         /* Wait for ongoing ufshcd_queuecommand() calls to finish. */
6368         blk_mq_wait_quiesce_done(&hba->host->tag_set);
6369         cancel_work_sync(&hba->eeh_work);
6370 }
6371
6372 static void ufshcd_err_handling_unprepare(struct ufs_hba *hba)
6373 {
6374         ufshcd_scsi_unblock_requests(hba);
6375         ufshcd_release(hba);
6376         if (ufshcd_is_clkscaling_supported(hba))
6377                 ufshcd_clk_scaling_suspend(hba, false);
6378         ufshcd_rpm_put(hba);
6379 }
6380
6381 static inline bool ufshcd_err_handling_should_stop(struct ufs_hba *hba)
6382 {
6383         return (!hba->is_powered || hba->shutting_down ||
6384                 !hba->ufs_device_wlun ||
6385                 hba->ufshcd_state == UFSHCD_STATE_ERROR ||
6386                 (!(hba->saved_err || hba->saved_uic_err || hba->force_reset ||
6387                    ufshcd_is_link_broken(hba))));
6388 }
6389
6390 #ifdef CONFIG_PM
6391 static void ufshcd_recover_pm_error(struct ufs_hba *hba)
6392 {
6393         struct Scsi_Host *shost = hba->host;
6394         struct scsi_device *sdev;
6395         struct request_queue *q;
6396         int ret;
6397
6398         hba->is_sys_suspended = false;
6399         /*
6400          * Set RPM status of wlun device to RPM_ACTIVE,
6401          * this also clears its runtime error.
6402          */
6403         ret = pm_runtime_set_active(&hba->ufs_device_wlun->sdev_gendev);
6404
6405         /* hba device might have a runtime error otherwise */
6406         if (ret)
6407                 ret = pm_runtime_set_active(hba->dev);
6408         /*
6409          * If wlun device had runtime error, we also need to resume those
6410          * consumer scsi devices in case any of them has failed to be
6411          * resumed due to supplier runtime resume failure. This is to unblock
6412          * blk_queue_enter in case there are bios waiting inside it.
6413          */
6414         if (!ret) {
6415                 shost_for_each_device(sdev, shost) {
6416                         q = sdev->request_queue;
6417                         if (q->dev && (q->rpm_status == RPM_SUSPENDED ||
6418                                        q->rpm_status == RPM_SUSPENDING))
6419                                 pm_request_resume(q->dev);
6420                 }
6421         }
6422 }
6423 #else
6424 static inline void ufshcd_recover_pm_error(struct ufs_hba *hba)
6425 {
6426 }
6427 #endif
6428
6429 static bool ufshcd_is_pwr_mode_restore_needed(struct ufs_hba *hba)
6430 {
6431         struct ufs_pa_layer_attr *pwr_info = &hba->pwr_info;
6432         u32 mode;
6433
6434         ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PWRMODE), &mode);
6435
6436         if (pwr_info->pwr_rx != ((mode >> PWRMODE_RX_OFFSET) & PWRMODE_MASK))
6437                 return true;
6438
6439         if (pwr_info->pwr_tx != (mode & PWRMODE_MASK))
6440                 return true;
6441
6442         return false;
6443 }
6444
6445 static bool ufshcd_abort_one(struct request *rq, void *priv)
6446 {
6447         int *ret = priv;
6448         u32 tag = rq->tag;
6449         struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
6450         struct scsi_device *sdev = cmd->device;
6451         struct Scsi_Host *shost = sdev->host;
6452         struct ufs_hba *hba = shost_priv(shost);
6453         struct ufshcd_lrb *lrbp = &hba->lrb[tag];
6454         struct ufs_hw_queue *hwq;
6455         unsigned long flags;
6456
6457         *ret = ufshcd_try_to_abort_task(hba, tag);
6458         dev_err(hba->dev, "Aborting tag %d / CDB %#02x %s\n", tag,
6459                 hba->lrb[tag].cmd ? hba->lrb[tag].cmd->cmnd[0] : -1,
6460                 *ret ? "failed" : "succeeded");
6461
6462         /* Release cmd in MCQ mode if abort succeeds */
6463         if (is_mcq_enabled(hba) && (*ret == 0)) {
6464                 hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(lrbp->cmd));
6465                 spin_lock_irqsave(&hwq->cq_lock, flags);
6466                 if (ufshcd_cmd_inflight(lrbp->cmd))
6467                         ufshcd_release_scsi_cmd(hba, lrbp);
6468                 spin_unlock_irqrestore(&hwq->cq_lock, flags);
6469         }
6470
6471         return *ret == 0;
6472 }
6473
6474 /**
6475  * ufshcd_abort_all - Abort all pending commands.
6476  * @hba: Host bus adapter pointer.
6477  *
6478  * Return: true if and only if the host controller needs to be reset.
6479  */
6480 static bool ufshcd_abort_all(struct ufs_hba *hba)
6481 {
6482         int tag, ret = 0;
6483
6484         blk_mq_tagset_busy_iter(&hba->host->tag_set, ufshcd_abort_one, &ret);
6485         if (ret)
6486                 goto out;
6487
6488         /* Clear pending task management requests */
6489         for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) {
6490                 ret = ufshcd_clear_tm_cmd(hba, tag);
6491                 if (ret)
6492                         goto out;
6493         }
6494
6495 out:
6496         /* Complete the requests that are cleared by s/w */
6497         ufshcd_complete_requests(hba, false);
6498
6499         return ret != 0;
6500 }
6501
6502 /**
6503  * ufshcd_err_handler - handle UFS errors that require s/w attention
6504  * @work: pointer to work structure
6505  */
6506 static void ufshcd_err_handler(struct work_struct *work)
6507 {
6508         int retries = MAX_ERR_HANDLER_RETRIES;
6509         struct ufs_hba *hba;
6510         unsigned long flags;
6511         bool needs_restore;
6512         bool needs_reset;
6513         int pmc_err;
6514
6515         hba = container_of(work, struct ufs_hba, eh_work);
6516
6517         dev_info(hba->dev,
6518                  "%s started; HBA state %s; powered %d; shutting down %d; saved_err = %d; saved_uic_err = %d; force_reset = %d%s\n",
6519                  __func__, ufshcd_state_name[hba->ufshcd_state],
6520                  hba->is_powered, hba->shutting_down, hba->saved_err,
6521                  hba->saved_uic_err, hba->force_reset,
6522                  ufshcd_is_link_broken(hba) ? "; link is broken" : "");
6523
6524         down(&hba->host_sem);
6525         spin_lock_irqsave(hba->host->host_lock, flags);
6526         if (ufshcd_err_handling_should_stop(hba)) {
6527                 if (hba->ufshcd_state != UFSHCD_STATE_ERROR)
6528                         hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
6529                 spin_unlock_irqrestore(hba->host->host_lock, flags);
6530                 up(&hba->host_sem);
6531                 return;
6532         }
6533         ufshcd_set_eh_in_progress(hba);
6534         spin_unlock_irqrestore(hba->host->host_lock, flags);
6535         ufshcd_err_handling_prepare(hba);
6536         /* Complete requests that have door-bell cleared by h/w */
6537         ufshcd_complete_requests(hba, false);
6538         spin_lock_irqsave(hba->host->host_lock, flags);
6539 again:
6540         needs_restore = false;
6541         needs_reset = false;
6542
6543         if (hba->ufshcd_state != UFSHCD_STATE_ERROR)
6544                 hba->ufshcd_state = UFSHCD_STATE_RESET;
6545         /*
6546          * A full reset and restore might have happened after preparation
6547          * is finished, double check whether we should stop.
6548          */
6549         if (ufshcd_err_handling_should_stop(hba))
6550                 goto skip_err_handling;
6551
6552         if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
6553                 bool ret;
6554
6555                 spin_unlock_irqrestore(hba->host->host_lock, flags);
6556                 /* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */
6557                 ret = ufshcd_quirk_dl_nac_errors(hba);
6558                 spin_lock_irqsave(hba->host->host_lock, flags);
6559                 if (!ret && ufshcd_err_handling_should_stop(hba))
6560                         goto skip_err_handling;
6561         }
6562
6563         if ((hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)) ||
6564             (hba->saved_uic_err &&
6565              (hba->saved_uic_err != UFSHCD_UIC_PA_GENERIC_ERROR))) {
6566                 bool pr_prdt = !!(hba->saved_err & SYSTEM_BUS_FATAL_ERROR);
6567
6568                 spin_unlock_irqrestore(hba->host->host_lock, flags);
6569                 ufshcd_print_host_state(hba);
6570                 ufshcd_print_pwr_info(hba);
6571                 ufshcd_print_evt_hist(hba);
6572                 ufshcd_print_tmrs(hba, hba->outstanding_tasks);
6573                 ufshcd_print_trs_all(hba, pr_prdt);
6574                 spin_lock_irqsave(hba->host->host_lock, flags);
6575         }
6576
6577         /*
6578          * if host reset is required then skip clearing the pending
6579          * transfers forcefully because they will get cleared during
6580          * host reset and restore
6581          */
6582         if (hba->force_reset || ufshcd_is_link_broken(hba) ||
6583             ufshcd_is_saved_err_fatal(hba) ||
6584             ((hba->saved_err & UIC_ERROR) &&
6585              (hba->saved_uic_err & (UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
6586                                     UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))) {
6587                 needs_reset = true;
6588                 goto do_reset;
6589         }
6590
6591         /*
6592          * If LINERESET was caught, UFS might have been put to PWM mode,
6593          * check if power mode restore is needed.
6594          */
6595         if (hba->saved_uic_err & UFSHCD_UIC_PA_GENERIC_ERROR) {
6596                 hba->saved_uic_err &= ~UFSHCD_UIC_PA_GENERIC_ERROR;
6597                 if (!hba->saved_uic_err)
6598                         hba->saved_err &= ~UIC_ERROR;
6599                 spin_unlock_irqrestore(hba->host->host_lock, flags);
6600                 if (ufshcd_is_pwr_mode_restore_needed(hba))
6601                         needs_restore = true;
6602                 spin_lock_irqsave(hba->host->host_lock, flags);
6603                 if (!hba->saved_err && !needs_restore)
6604                         goto skip_err_handling;
6605         }
6606
6607         hba->silence_err_logs = true;
6608         /* release lock as clear command might sleep */
6609         spin_unlock_irqrestore(hba->host->host_lock, flags);
6610
6611         needs_reset = ufshcd_abort_all(hba);
6612
6613         spin_lock_irqsave(hba->host->host_lock, flags);
6614         hba->silence_err_logs = false;
6615         if (needs_reset)
6616                 goto do_reset;
6617
6618         /*
6619          * After all reqs and tasks are cleared from doorbell,
6620          * now it is safe to retore power mode.
6621          */
6622         if (needs_restore) {
6623                 spin_unlock_irqrestore(hba->host->host_lock, flags);
6624                 /*
6625                  * Hold the scaling lock just in case dev cmds
6626                  * are sent via bsg and/or sysfs.
6627                  */
6628                 down_write(&hba->clk_scaling_lock);
6629                 hba->force_pmc = true;
6630                 pmc_err = ufshcd_config_pwr_mode(hba, &(hba->pwr_info));
6631                 if (pmc_err) {
6632                         needs_reset = true;
6633                         dev_err(hba->dev, "%s: Failed to restore power mode, err = %d\n",
6634                                         __func__, pmc_err);
6635                 }
6636                 hba->force_pmc = false;
6637                 ufshcd_print_pwr_info(hba);
6638                 up_write(&hba->clk_scaling_lock);
6639                 spin_lock_irqsave(hba->host->host_lock, flags);
6640         }
6641
6642 do_reset:
6643         /* Fatal errors need reset */
6644         if (needs_reset) {
6645                 int err;
6646
6647                 hba->force_reset = false;
6648                 spin_unlock_irqrestore(hba->host->host_lock, flags);
6649                 err = ufshcd_reset_and_restore(hba);
6650                 if (err)
6651                         dev_err(hba->dev, "%s: reset and restore failed with err %d\n",
6652                                         __func__, err);
6653                 else
6654                         ufshcd_recover_pm_error(hba);
6655                 spin_lock_irqsave(hba->host->host_lock, flags);
6656         }
6657
6658 skip_err_handling:
6659         if (!needs_reset) {
6660                 if (hba->ufshcd_state == UFSHCD_STATE_RESET)
6661                         hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
6662                 if (hba->saved_err || hba->saved_uic_err)
6663                         dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x",
6664                             __func__, hba->saved_err, hba->saved_uic_err);
6665         }
6666         /* Exit in an operational state or dead */
6667         if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL &&
6668             hba->ufshcd_state != UFSHCD_STATE_ERROR) {
6669                 if (--retries)
6670                         goto again;
6671                 hba->ufshcd_state = UFSHCD_STATE_ERROR;
6672         }
6673         ufshcd_clear_eh_in_progress(hba);
6674         spin_unlock_irqrestore(hba->host->host_lock, flags);
6675         ufshcd_err_handling_unprepare(hba);
6676         up(&hba->host_sem);
6677
6678         dev_info(hba->dev, "%s finished; HBA state %s\n", __func__,
6679                  ufshcd_state_name[hba->ufshcd_state]);
6680 }
6681
6682 /**
6683  * ufshcd_update_uic_error - check and set fatal UIC error flags.
6684  * @hba: per-adapter instance
6685  *
6686  * Return:
6687  *  IRQ_HANDLED - If interrupt is valid
6688  *  IRQ_NONE    - If invalid interrupt
6689  */
6690 static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba)
6691 {
6692         u32 reg;
6693         irqreturn_t retval = IRQ_NONE;
6694
6695         /* PHY layer error */
6696         reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
6697         if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) &&
6698             (reg & UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK)) {
6699                 ufshcd_update_evt_hist(hba, UFS_EVT_PA_ERR, reg);
6700                 /*
6701                  * To know whether this error is fatal or not, DB timeout
6702                  * must be checked but this error is handled separately.
6703                  */
6704                 if (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK)
6705                         dev_dbg(hba->dev, "%s: UIC Lane error reported\n",
6706                                         __func__);
6707
6708                 /* Got a LINERESET indication. */
6709                 if (reg & UIC_PHY_ADAPTER_LAYER_GENERIC_ERROR) {
6710                         struct uic_command *cmd = NULL;
6711
6712                         hba->uic_error |= UFSHCD_UIC_PA_GENERIC_ERROR;
6713                         if (hba->uic_async_done && hba->active_uic_cmd)
6714                                 cmd = hba->active_uic_cmd;
6715                         /*
6716                          * Ignore the LINERESET during power mode change
6717                          * operation via DME_SET command.
6718                          */
6719                         if (cmd && (cmd->command == UIC_CMD_DME_SET))
6720                                 hba->uic_error &= ~UFSHCD_UIC_PA_GENERIC_ERROR;
6721                 }
6722                 retval |= IRQ_HANDLED;
6723         }
6724
6725         /* PA_INIT_ERROR is fatal and needs UIC reset */
6726         reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
6727         if ((reg & UIC_DATA_LINK_LAYER_ERROR) &&
6728             (reg & UIC_DATA_LINK_LAYER_ERROR_CODE_MASK)) {
6729                 ufshcd_update_evt_hist(hba, UFS_EVT_DL_ERR, reg);
6730
6731                 if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
6732                         hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
6733                 else if (hba->dev_quirks &
6734                                 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
6735                         if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
6736                                 hba->uic_error |=
6737                                         UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
6738                         else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
6739                                 hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
6740                 }
6741                 retval |= IRQ_HANDLED;
6742         }
6743
6744         /* UIC NL/TL/DME errors needs software retry */
6745         reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
6746         if ((reg & UIC_NETWORK_LAYER_ERROR) &&
6747             (reg & UIC_NETWORK_LAYER_ERROR_CODE_MASK)) {
6748                 ufshcd_update_evt_hist(hba, UFS_EVT_NL_ERR, reg);
6749                 hba->uic_error |= UFSHCD_UIC_NL_ERROR;
6750                 retval |= IRQ_HANDLED;
6751         }
6752
6753         reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
6754         if ((reg & UIC_TRANSPORT_LAYER_ERROR) &&
6755             (reg & UIC_TRANSPORT_LAYER_ERROR_CODE_MASK)) {
6756                 ufshcd_update_evt_hist(hba, UFS_EVT_TL_ERR, reg);
6757                 hba->uic_error |= UFSHCD_UIC_TL_ERROR;
6758                 retval |= IRQ_HANDLED;
6759         }
6760
6761         reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
6762         if ((reg & UIC_DME_ERROR) &&
6763             (reg & UIC_DME_ERROR_CODE_MASK)) {
6764                 ufshcd_update_evt_hist(hba, UFS_EVT_DME_ERR, reg);
6765                 hba->uic_error |= UFSHCD_UIC_DME_ERROR;
6766                 retval |= IRQ_HANDLED;
6767         }
6768
6769         dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
6770                         __func__, hba->uic_error);
6771         return retval;
6772 }
6773
6774 /**
6775  * ufshcd_check_errors - Check for errors that need s/w attention
6776  * @hba: per-adapter instance
6777  * @intr_status: interrupt status generated by the controller
6778  *
6779  * Return:
6780  *  IRQ_HANDLED - If interrupt is valid
6781  *  IRQ_NONE    - If invalid interrupt
6782  */
6783 static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba, u32 intr_status)
6784 {
6785         bool queue_eh_work = false;
6786         irqreturn_t retval = IRQ_NONE;
6787
6788         spin_lock(hba->host->host_lock);
6789         hba->errors |= UFSHCD_ERROR_MASK & intr_status;
6790
6791         if (hba->errors & INT_FATAL_ERRORS) {
6792                 ufshcd_update_evt_hist(hba, UFS_EVT_FATAL_ERR,
6793                                        hba->errors);
6794                 queue_eh_work = true;
6795         }
6796
6797         if (hba->errors & UIC_ERROR) {
6798                 hba->uic_error = 0;
6799                 retval = ufshcd_update_uic_error(hba);
6800                 if (hba->uic_error)
6801                         queue_eh_work = true;
6802         }
6803
6804         if (hba->errors & UFSHCD_UIC_HIBERN8_MASK) {
6805                 dev_err(hba->dev,
6806                         "%s: Auto Hibern8 %s failed - status: 0x%08x, upmcrs: 0x%08x\n",
6807                         __func__, (hba->errors & UIC_HIBERNATE_ENTER) ?
6808                         "Enter" : "Exit",
6809                         hba->errors, ufshcd_get_upmcrs(hba));
6810                 ufshcd_update_evt_hist(hba, UFS_EVT_AUTO_HIBERN8_ERR,
6811                                        hba->errors);
6812                 ufshcd_set_link_broken(hba);
6813                 queue_eh_work = true;
6814         }
6815
6816         if (queue_eh_work) {
6817                 /*
6818                  * update the transfer error masks to sticky bits, let's do this
6819                  * irrespective of current ufshcd_state.
6820                  */
6821                 hba->saved_err |= hba->errors;
6822                 hba->saved_uic_err |= hba->uic_error;
6823
6824                 /* dump controller state before resetting */
6825                 if ((hba->saved_err &
6826                      (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)) ||
6827                     (hba->saved_uic_err &&
6828                      (hba->saved_uic_err != UFSHCD_UIC_PA_GENERIC_ERROR))) {
6829                         dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x\n",
6830                                         __func__, hba->saved_err,
6831                                         hba->saved_uic_err);
6832                         ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE,
6833                                          "host_regs: ");
6834                         ufshcd_print_pwr_info(hba);
6835                 }
6836                 ufshcd_schedule_eh_work(hba);
6837                 retval |= IRQ_HANDLED;
6838         }
6839         /*
6840          * if (!queue_eh_work) -
6841          * Other errors are either non-fatal where host recovers
6842          * itself without s/w intervention or errors that will be
6843          * handled by the SCSI core layer.
6844          */
6845         hba->errors = 0;
6846         hba->uic_error = 0;
6847         spin_unlock(hba->host->host_lock);
6848         return retval;
6849 }
6850
6851 /**
6852  * ufshcd_tmc_handler - handle task management function completion
6853  * @hba: per adapter instance
6854  *
6855  * Return:
6856  *  IRQ_HANDLED - If interrupt is valid
6857  *  IRQ_NONE    - If invalid interrupt
6858  */
6859 static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
6860 {
6861         unsigned long flags, pending, issued;
6862         irqreturn_t ret = IRQ_NONE;
6863         int tag;
6864
6865         spin_lock_irqsave(hba->host->host_lock, flags);
6866         pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
6867         issued = hba->outstanding_tasks & ~pending;
6868         for_each_set_bit(tag, &issued, hba->nutmrs) {
6869                 struct request *req = hba->tmf_rqs[tag];
6870                 struct completion *c = req->end_io_data;
6871
6872                 complete(c);
6873                 ret = IRQ_HANDLED;
6874         }
6875         spin_unlock_irqrestore(hba->host->host_lock, flags);
6876
6877         return ret;
6878 }
6879
6880 /**
6881  * ufshcd_handle_mcq_cq_events - handle MCQ completion queue events
6882  * @hba: per adapter instance
6883  *
6884  * Return: IRQ_HANDLED if interrupt is handled.
6885  */
6886 static irqreturn_t ufshcd_handle_mcq_cq_events(struct ufs_hba *hba)
6887 {
6888         struct ufs_hw_queue *hwq;
6889         unsigned long outstanding_cqs;
6890         unsigned int nr_queues;
6891         int i, ret;
6892         u32 events;
6893
6894         ret = ufshcd_vops_get_outstanding_cqs(hba, &outstanding_cqs);
6895         if (ret)
6896                 outstanding_cqs = (1U << hba->nr_hw_queues) - 1;
6897
6898         /* Exclude the poll queues */
6899         nr_queues = hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL];
6900         for_each_set_bit(i, &outstanding_cqs, nr_queues) {
6901                 hwq = &hba->uhq[i];
6902
6903                 events = ufshcd_mcq_read_cqis(hba, i);
6904                 if (events)
6905                         ufshcd_mcq_write_cqis(hba, events, i);
6906
6907                 if (events & UFSHCD_MCQ_CQIS_TAIL_ENT_PUSH_STS)
6908                         ufshcd_mcq_poll_cqe_lock(hba, hwq);
6909         }
6910
6911         return IRQ_HANDLED;
6912 }
6913
6914 /**
6915  * ufshcd_sl_intr - Interrupt service routine
6916  * @hba: per adapter instance
6917  * @intr_status: contains interrupts generated by the controller
6918  *
6919  * Return:
6920  *  IRQ_HANDLED - If interrupt is valid
6921  *  IRQ_NONE    - If invalid interrupt
6922  */
6923 static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
6924 {
6925         irqreturn_t retval = IRQ_NONE;
6926
6927         if (intr_status & UFSHCD_UIC_MASK)
6928                 retval |= ufshcd_uic_cmd_compl(hba, intr_status);
6929
6930         if (intr_status & UFSHCD_ERROR_MASK || hba->errors)
6931                 retval |= ufshcd_check_errors(hba, intr_status);
6932
6933         if (intr_status & UTP_TASK_REQ_COMPL)
6934                 retval |= ufshcd_tmc_handler(hba);
6935
6936         if (intr_status & UTP_TRANSFER_REQ_COMPL)
6937                 retval |= ufshcd_transfer_req_compl(hba);
6938
6939         if (intr_status & MCQ_CQ_EVENT_STATUS)
6940                 retval |= ufshcd_handle_mcq_cq_events(hba);
6941
6942         return retval;
6943 }
6944
6945 /**
6946  * ufshcd_intr - Main interrupt service routine
6947  * @irq: irq number
6948  * @__hba: pointer to adapter instance
6949  *
6950  * Return:
6951  *  IRQ_HANDLED - If interrupt is valid
6952  *  IRQ_NONE    - If invalid interrupt
6953  */
6954 static irqreturn_t ufshcd_intr(int irq, void *__hba)
6955 {
6956         u32 intr_status, enabled_intr_status = 0;
6957         irqreturn_t retval = IRQ_NONE;
6958         struct ufs_hba *hba = __hba;
6959         int retries = hba->nutrs;
6960
6961         intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
6962         hba->ufs_stats.last_intr_status = intr_status;
6963         hba->ufs_stats.last_intr_ts = local_clock();
6964
6965         /*
6966          * There could be max of hba->nutrs reqs in flight and in worst case
6967          * if the reqs get finished 1 by 1 after the interrupt status is
6968          * read, make sure we handle them by checking the interrupt status
6969          * again in a loop until we process all of the reqs before returning.
6970          */
6971         while (intr_status && retries--) {
6972                 enabled_intr_status =
6973                         intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
6974                 ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
6975                 if (enabled_intr_status)
6976                         retval |= ufshcd_sl_intr(hba, enabled_intr_status);
6977
6978                 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
6979         }
6980
6981         if (enabled_intr_status && retval == IRQ_NONE &&
6982             (!(enabled_intr_status & UTP_TRANSFER_REQ_COMPL) ||
6983              hba->outstanding_reqs) && !ufshcd_eh_in_progress(hba)) {
6984                 dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x (0x%08x, 0x%08x)\n",
6985                                         __func__,
6986                                         intr_status,
6987                                         hba->ufs_stats.last_intr_status,
6988                                         enabled_intr_status);
6989                 ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
6990         }
6991
6992         return retval;
6993 }
6994
6995 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
6996 {
6997         int err = 0;
6998         u32 mask = 1 << tag;
6999         unsigned long flags;
7000
7001         if (!test_bit(tag, &hba->outstanding_tasks))
7002                 goto out;
7003
7004         spin_lock_irqsave(hba->host->host_lock, flags);
7005         ufshcd_utmrl_clear(hba, tag);
7006         spin_unlock_irqrestore(hba->host->host_lock, flags);
7007
7008         /* poll for max. 1 sec to clear door bell register by h/w */
7009         err = ufshcd_wait_for_register(hba,
7010                         REG_UTP_TASK_REQ_DOOR_BELL,
7011                         mask, 0, 1000, 1000);
7012
7013         dev_err(hba->dev, "Clearing task management function with tag %d %s\n",
7014                 tag, err < 0 ? "failed" : "succeeded");
7015
7016 out:
7017         return err;
7018 }
7019
7020 static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
7021                 struct utp_task_req_desc *treq, u8 tm_function)
7022 {
7023         struct request_queue *q = hba->tmf_queue;
7024         struct Scsi_Host *host = hba->host;
7025         DECLARE_COMPLETION_ONSTACK(wait);
7026         struct request *req;
7027         unsigned long flags;
7028         int task_tag, err;
7029
7030         /*
7031          * blk_mq_alloc_request() is used here only to get a free tag.
7032          */
7033         req = blk_mq_alloc_request(q, REQ_OP_DRV_OUT, 0);
7034         if (IS_ERR(req))
7035                 return PTR_ERR(req);
7036
7037         req->end_io_data = &wait;
7038         ufshcd_hold(hba);
7039
7040         spin_lock_irqsave(host->host_lock, flags);
7041
7042         task_tag = req->tag;
7043         hba->tmf_rqs[req->tag] = req;
7044         treq->upiu_req.req_header.task_tag = task_tag;
7045
7046         memcpy(hba->utmrdl_base_addr + task_tag, treq, sizeof(*treq));
7047         ufshcd_vops_setup_task_mgmt(hba, task_tag, tm_function);
7048
7049         /* send command to the controller */
7050         __set_bit(task_tag, &hba->outstanding_tasks);
7051
7052         ufshcd_writel(hba, 1 << task_tag, REG_UTP_TASK_REQ_DOOR_BELL);
7053         /* Make sure that doorbell is committed immediately */
7054         wmb();
7055
7056         spin_unlock_irqrestore(host->host_lock, flags);
7057
7058         ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_SEND);
7059
7060         /* wait until the task management command is completed */
7061         err = wait_for_completion_io_timeout(&wait,
7062                         msecs_to_jiffies(TM_CMD_TIMEOUT));
7063         if (!err) {
7064                 ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_ERR);
7065                 dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
7066                                 __func__, tm_function);
7067                 if (ufshcd_clear_tm_cmd(hba, task_tag))
7068                         dev_WARN(hba->dev, "%s: unable to clear tm cmd (slot %d) after timeout\n",
7069                                         __func__, task_tag);
7070                 err = -ETIMEDOUT;
7071         } else {
7072                 err = 0;
7073                 memcpy(treq, hba->utmrdl_base_addr + task_tag, sizeof(*treq));
7074
7075                 ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_COMP);
7076         }
7077
7078         spin_lock_irqsave(hba->host->host_lock, flags);
7079         hba->tmf_rqs[req->tag] = NULL;
7080         __clear_bit(task_tag, &hba->outstanding_tasks);
7081         spin_unlock_irqrestore(hba->host->host_lock, flags);
7082
7083         ufshcd_release(hba);
7084         blk_mq_free_request(req);
7085
7086         return err;
7087 }
7088
7089 /**
7090  * ufshcd_issue_tm_cmd - issues task management commands to controller
7091  * @hba: per adapter instance
7092  * @lun_id: LUN ID to which TM command is sent
7093  * @task_id: task ID to which the TM command is applicable
7094  * @tm_function: task management function opcode
7095  * @tm_response: task management service response return value
7096  *
7097  * Return: non-zero value on error, zero on success.
7098  */
7099 static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
7100                 u8 tm_function, u8 *tm_response)
7101 {
7102         struct utp_task_req_desc treq = { };
7103         enum utp_ocs ocs_value;
7104         int err;
7105
7106         /* Configure task request descriptor */
7107         treq.header.interrupt = 1;
7108         treq.header.ocs = OCS_INVALID_COMMAND_STATUS;
7109
7110         /* Configure task request UPIU */
7111         treq.upiu_req.req_header.transaction_code = UPIU_TRANSACTION_TASK_REQ;
7112         treq.upiu_req.req_header.lun = lun_id;
7113         treq.upiu_req.req_header.tm_function = tm_function;
7114
7115         /*
7116          * The host shall provide the same value for LUN field in the basic
7117          * header and for Input Parameter.
7118          */
7119         treq.upiu_req.input_param1 = cpu_to_be32(lun_id);
7120         treq.upiu_req.input_param2 = cpu_to_be32(task_id);
7121
7122         err = __ufshcd_issue_tm_cmd(hba, &treq, tm_function);
7123         if (err == -ETIMEDOUT)
7124                 return err;
7125
7126         ocs_value = treq.header.ocs & MASK_OCS;
7127         if (ocs_value != OCS_SUCCESS)
7128                 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
7129                                 __func__, ocs_value);
7130         else if (tm_response)
7131                 *tm_response = be32_to_cpu(treq.upiu_rsp.output_param1) &
7132                                 MASK_TM_SERVICE_RESP;
7133         return err;
7134 }
7135
7136 /**
7137  * ufshcd_issue_devman_upiu_cmd - API for sending "utrd" type requests
7138  * @hba:        per-adapter instance
7139  * @req_upiu:   upiu request
7140  * @rsp_upiu:   upiu reply
7141  * @desc_buff:  pointer to descriptor buffer, NULL if NA
7142  * @buff_len:   descriptor size, 0 if NA
7143  * @cmd_type:   specifies the type (NOP, Query...)
7144  * @desc_op:    descriptor operation
7145  *
7146  * Those type of requests uses UTP Transfer Request Descriptor - utrd.
7147  * Therefore, it "rides" the device management infrastructure: uses its tag and
7148  * tasks work queues.
7149  *
7150  * Since there is only one available tag for device management commands,
7151  * the caller is expected to hold the hba->dev_cmd.lock mutex.
7152  *
7153  * Return: 0 upon success; < 0 upon failure.
7154  */
7155 static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
7156                                         struct utp_upiu_req *req_upiu,
7157                                         struct utp_upiu_req *rsp_upiu,
7158                                         u8 *desc_buff, int *buff_len,
7159                                         enum dev_cmd_type cmd_type,
7160                                         enum query_opcode desc_op)
7161 {
7162         DECLARE_COMPLETION_ONSTACK(wait);
7163         const u32 tag = hba->reserved_slot;
7164         struct ufshcd_lrb *lrbp;
7165         int err = 0;
7166         u8 upiu_flags;
7167
7168         /* Protects use of hba->reserved_slot. */
7169         lockdep_assert_held(&hba->dev_cmd.lock);
7170
7171         down_read(&hba->clk_scaling_lock);
7172
7173         lrbp = &hba->lrb[tag];
7174         lrbp->cmd = NULL;
7175         lrbp->task_tag = tag;
7176         lrbp->lun = 0;
7177         lrbp->intr_cmd = true;
7178         ufshcd_prepare_lrbp_crypto(NULL, lrbp);
7179         hba->dev_cmd.type = cmd_type;
7180
7181         if (hba->ufs_version <= ufshci_version(1, 1))
7182                 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
7183         else
7184                 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
7185
7186         /* update the task tag in the request upiu */
7187         req_upiu->header.task_tag = tag;
7188
7189         ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE, 0);
7190
7191         /* just copy the upiu request as it is */
7192         memcpy(lrbp->ucd_req_ptr, req_upiu, sizeof(*lrbp->ucd_req_ptr));
7193         if (desc_buff && desc_op == UPIU_QUERY_OPCODE_WRITE_DESC) {
7194                 /* The Data Segment Area is optional depending upon the query
7195                  * function value. for WRITE DESCRIPTOR, the data segment
7196                  * follows right after the tsf.
7197                  */
7198                 memcpy(lrbp->ucd_req_ptr + 1, desc_buff, *buff_len);
7199                 *buff_len = 0;
7200         }
7201
7202         memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
7203
7204         hba->dev_cmd.complete = &wait;
7205
7206         ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
7207
7208         ufshcd_send_command(hba, tag, hba->dev_cmd_queue);
7209         /*
7210          * ignore the returning value here - ufshcd_check_query_response is
7211          * bound to fail since dev_cmd.query and dev_cmd.type were left empty.
7212          * read the response directly ignoring all errors.
7213          */
7214         ufshcd_wait_for_dev_cmd(hba, lrbp, QUERY_REQ_TIMEOUT);
7215
7216         /* just copy the upiu response as it is */
7217         memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu));
7218         if (desc_buff && desc_op == UPIU_QUERY_OPCODE_READ_DESC) {
7219                 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr + sizeof(*rsp_upiu);
7220                 u16 resp_len = be16_to_cpu(lrbp->ucd_rsp_ptr->header
7221                                            .data_segment_length);
7222
7223                 if (*buff_len >= resp_len) {
7224                         memcpy(desc_buff, descp, resp_len);
7225                         *buff_len = resp_len;
7226                 } else {
7227                         dev_warn(hba->dev,
7228                                  "%s: rsp size %d is bigger than buffer size %d",
7229                                  __func__, resp_len, *buff_len);
7230                         *buff_len = 0;
7231                         err = -EINVAL;
7232                 }
7233         }
7234         ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP,
7235                                     (struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
7236
7237         up_read(&hba->clk_scaling_lock);
7238         return err;
7239 }
7240
7241 /**
7242  * ufshcd_exec_raw_upiu_cmd - API function for sending raw upiu commands
7243  * @hba:        per-adapter instance
7244  * @req_upiu:   upiu request
7245  * @rsp_upiu:   upiu reply - only 8 DW as we do not support scsi commands
7246  * @msgcode:    message code, one of UPIU Transaction Codes Initiator to Target
7247  * @desc_buff:  pointer to descriptor buffer, NULL if NA
7248  * @buff_len:   descriptor size, 0 if NA
7249  * @desc_op:    descriptor operation
7250  *
7251  * Supports UTP Transfer requests (nop and query), and UTP Task
7252  * Management requests.
7253  * It is up to the caller to fill the upiu conent properly, as it will
7254  * be copied without any further input validations.
7255  *
7256  * Return: 0 upon success; < 0 upon failure.
7257  */
7258 int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
7259                              struct utp_upiu_req *req_upiu,
7260                              struct utp_upiu_req *rsp_upiu,
7261                              enum upiu_request_transaction msgcode,
7262                              u8 *desc_buff, int *buff_len,
7263                              enum query_opcode desc_op)
7264 {
7265         int err;
7266         enum dev_cmd_type cmd_type = DEV_CMD_TYPE_QUERY;
7267         struct utp_task_req_desc treq = { };
7268         enum utp_ocs ocs_value;
7269         u8 tm_f = req_upiu->header.tm_function;
7270
7271         switch (msgcode) {
7272         case UPIU_TRANSACTION_NOP_OUT:
7273                 cmd_type = DEV_CMD_TYPE_NOP;
7274                 fallthrough;
7275         case UPIU_TRANSACTION_QUERY_REQ:
7276                 ufshcd_hold(hba);
7277                 mutex_lock(&hba->dev_cmd.lock);
7278                 err = ufshcd_issue_devman_upiu_cmd(hba, req_upiu, rsp_upiu,
7279                                                    desc_buff, buff_len,
7280                                                    cmd_type, desc_op);
7281                 mutex_unlock(&hba->dev_cmd.lock);
7282                 ufshcd_release(hba);
7283
7284                 break;
7285         case UPIU_TRANSACTION_TASK_REQ:
7286                 treq.header.interrupt = 1;
7287                 treq.header.ocs = OCS_INVALID_COMMAND_STATUS;
7288
7289                 memcpy(&treq.upiu_req, req_upiu, sizeof(*req_upiu));
7290
7291                 err = __ufshcd_issue_tm_cmd(hba, &treq, tm_f);
7292                 if (err == -ETIMEDOUT)
7293                         break;
7294
7295                 ocs_value = treq.header.ocs & MASK_OCS;
7296                 if (ocs_value != OCS_SUCCESS) {
7297                         dev_err(hba->dev, "%s: failed, ocs = 0x%x\n", __func__,
7298                                 ocs_value);
7299                         break;
7300                 }
7301
7302                 memcpy(rsp_upiu, &treq.upiu_rsp, sizeof(*rsp_upiu));
7303
7304                 break;
7305         default:
7306                 err = -EINVAL;
7307
7308                 break;
7309         }
7310
7311         return err;
7312 }
7313
7314 /**
7315  * ufshcd_advanced_rpmb_req_handler - handle advanced RPMB request
7316  * @hba:        per adapter instance
7317  * @req_upiu:   upiu request
7318  * @rsp_upiu:   upiu reply
7319  * @req_ehs:    EHS field which contains Advanced RPMB Request Message
7320  * @rsp_ehs:    EHS field which returns Advanced RPMB Response Message
7321  * @sg_cnt:     The number of sg lists actually used
7322  * @sg_list:    Pointer to SG list when DATA IN/OUT UPIU is required in ARPMB operation
7323  * @dir:        DMA direction
7324  *
7325  * Return: zero on success, non-zero on failure.
7326  */
7327 int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *req_upiu,
7328                          struct utp_upiu_req *rsp_upiu, struct ufs_ehs *req_ehs,
7329                          struct ufs_ehs *rsp_ehs, int sg_cnt, struct scatterlist *sg_list,
7330                          enum dma_data_direction dir)
7331 {
7332         DECLARE_COMPLETION_ONSTACK(wait);
7333         const u32 tag = hba->reserved_slot;
7334         struct ufshcd_lrb *lrbp;
7335         int err = 0;
7336         int result;
7337         u8 upiu_flags;
7338         u8 *ehs_data;
7339         u16 ehs_len;
7340
7341         /* Protects use of hba->reserved_slot. */
7342         ufshcd_hold(hba);
7343         mutex_lock(&hba->dev_cmd.lock);
7344         down_read(&hba->clk_scaling_lock);
7345
7346         lrbp = &hba->lrb[tag];
7347         lrbp->cmd = NULL;
7348         lrbp->task_tag = tag;
7349         lrbp->lun = UFS_UPIU_RPMB_WLUN;
7350
7351         lrbp->intr_cmd = true;
7352         ufshcd_prepare_lrbp_crypto(NULL, lrbp);
7353         hba->dev_cmd.type = DEV_CMD_TYPE_RPMB;
7354
7355         /* Advanced RPMB starts from UFS 4.0, so its command type is UTP_CMD_TYPE_UFS_STORAGE */
7356         lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
7357
7358         /*
7359          * According to UFSHCI 4.0 specification page 24, if EHSLUTRDS is 0, host controller takes
7360          * EHS length from CMD UPIU, and SW driver use EHS Length field in CMD UPIU. if it is 1,
7361          * HW controller takes EHS length from UTRD.
7362          */
7363         if (hba->capabilities & MASK_EHSLUTRD_SUPPORTED)
7364                 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, dir, 2);
7365         else
7366                 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, dir, 0);
7367
7368         /* update the task tag */
7369         req_upiu->header.task_tag = tag;
7370
7371         /* copy the UPIU(contains CDB) request as it is */
7372         memcpy(lrbp->ucd_req_ptr, req_upiu, sizeof(*lrbp->ucd_req_ptr));
7373         /* Copy EHS, starting with byte32, immediately after the CDB package */
7374         memcpy(lrbp->ucd_req_ptr + 1, req_ehs, sizeof(*req_ehs));
7375
7376         if (dir != DMA_NONE && sg_list)
7377                 ufshcd_sgl_to_prdt(hba, lrbp, sg_cnt, sg_list);
7378
7379         memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
7380
7381         hba->dev_cmd.complete = &wait;
7382
7383         ufshcd_send_command(hba, tag, hba->dev_cmd_queue);
7384
7385         err = ufshcd_wait_for_dev_cmd(hba, lrbp, ADVANCED_RPMB_REQ_TIMEOUT);
7386
7387         if (!err) {
7388                 /* Just copy the upiu response as it is */
7389                 memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu));
7390                 /* Get the response UPIU result */
7391                 result = (lrbp->ucd_rsp_ptr->header.response << 8) |
7392                         lrbp->ucd_rsp_ptr->header.status;
7393
7394                 ehs_len = lrbp->ucd_rsp_ptr->header.ehs_length;
7395                 /*
7396                  * Since the bLength in EHS indicates the total size of the EHS Header and EHS Data
7397                  * in 32 Byte units, the value of the bLength Request/Response for Advanced RPMB
7398                  * Message is 02h
7399                  */
7400                 if (ehs_len == 2 && rsp_ehs) {
7401                         /*
7402                          * ucd_rsp_ptr points to a buffer with a length of 512 bytes
7403                          * (ALIGNED_UPIU_SIZE = 512), and the EHS data just starts from byte32
7404                          */
7405                         ehs_data = (u8 *)lrbp->ucd_rsp_ptr + EHS_OFFSET_IN_RESPONSE;
7406                         memcpy(rsp_ehs, ehs_data, ehs_len * 32);
7407                 }
7408         }
7409
7410         up_read(&hba->clk_scaling_lock);
7411         mutex_unlock(&hba->dev_cmd.lock);
7412         ufshcd_release(hba);
7413         return err ? : result;
7414 }
7415
7416 /**
7417  * ufshcd_eh_device_reset_handler() - Reset a single logical unit.
7418  * @cmd: SCSI command pointer
7419  *
7420  * Return: SUCCESS or FAILED.
7421  */
7422 static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
7423 {
7424         unsigned long flags, pending_reqs = 0, not_cleared = 0;
7425         struct Scsi_Host *host;
7426         struct ufs_hba *hba;
7427         struct ufs_hw_queue *hwq;
7428         struct ufshcd_lrb *lrbp;
7429         u32 pos, not_cleared_mask = 0;
7430         int err;
7431         u8 resp = 0xF, lun;
7432
7433         host = cmd->device->host;
7434         hba = shost_priv(host);
7435
7436         lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
7437         err = ufshcd_issue_tm_cmd(hba, lun, 0, UFS_LOGICAL_RESET, &resp);
7438         if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
7439                 if (!err)
7440                         err = resp;
7441                 goto out;
7442         }
7443
7444         if (is_mcq_enabled(hba)) {
7445                 for (pos = 0; pos < hba->nutrs; pos++) {
7446                         lrbp = &hba->lrb[pos];
7447                         if (ufshcd_cmd_inflight(lrbp->cmd) &&
7448                             lrbp->lun == lun) {
7449                                 ufshcd_clear_cmd(hba, pos);
7450                                 hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(lrbp->cmd));
7451                                 ufshcd_mcq_poll_cqe_lock(hba, hwq);
7452                         }
7453                 }
7454                 err = 0;
7455                 goto out;
7456         }
7457
7458         /* clear the commands that were pending for corresponding LUN */
7459         spin_lock_irqsave(&hba->outstanding_lock, flags);
7460         for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs)
7461                 if (hba->lrb[pos].lun == lun)
7462                         __set_bit(pos, &pending_reqs);
7463         hba->outstanding_reqs &= ~pending_reqs;
7464         spin_unlock_irqrestore(&hba->outstanding_lock, flags);
7465
7466         for_each_set_bit(pos, &pending_reqs, hba->nutrs) {
7467                 if (ufshcd_clear_cmd(hba, pos) < 0) {
7468                         spin_lock_irqsave(&hba->outstanding_lock, flags);
7469                         not_cleared = 1U << pos &
7470                                 ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
7471                         hba->outstanding_reqs |= not_cleared;
7472                         not_cleared_mask |= not_cleared;
7473                         spin_unlock_irqrestore(&hba->outstanding_lock, flags);
7474
7475                         dev_err(hba->dev, "%s: failed to clear request %d\n",
7476                                 __func__, pos);
7477                 }
7478         }
7479         __ufshcd_transfer_req_compl(hba, pending_reqs & ~not_cleared_mask);
7480
7481 out:
7482         hba->req_abort_count = 0;
7483         ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, (u32)err);
7484         if (!err) {
7485                 err = SUCCESS;
7486         } else {
7487                 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
7488                 err = FAILED;
7489         }
7490         return err;
7491 }
7492
7493 static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap)
7494 {
7495         struct ufshcd_lrb *lrbp;
7496         int tag;
7497
7498         for_each_set_bit(tag, &bitmap, hba->nutrs) {
7499                 lrbp = &hba->lrb[tag];
7500                 lrbp->req_abort_skip = true;
7501         }
7502 }
7503
7504 /**
7505  * ufshcd_try_to_abort_task - abort a specific task
7506  * @hba: Pointer to adapter instance
7507  * @tag: Task tag/index to be aborted
7508  *
7509  * Abort the pending command in device by sending UFS_ABORT_TASK task management
7510  * command, and in host controller by clearing the door-bell register. There can
7511  * be race between controller sending the command to the device while abort is
7512  * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is
7513  * really issued and then try to abort it.
7514  *
7515  * Return: zero on success, non-zero on failure.
7516  */
7517 int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag)
7518 {
7519         struct ufshcd_lrb *lrbp = &hba->lrb[tag];
7520         int err = 0;
7521         int poll_cnt;
7522         u8 resp = 0xF;
7523         u32 reg;
7524
7525         for (poll_cnt = 100; poll_cnt; poll_cnt--) {
7526                 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
7527                                 UFS_QUERY_TASK, &resp);
7528                 if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
7529                         /* cmd pending in the device */
7530                         dev_err(hba->dev, "%s: cmd pending in the device. tag = %d\n",
7531                                 __func__, tag);
7532                         break;
7533                 } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
7534                         /*
7535                          * cmd not pending in the device, check if it is
7536                          * in transition.
7537                          */
7538                         dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.\n",
7539                                 __func__, tag);
7540                         if (is_mcq_enabled(hba)) {
7541                                 /* MCQ mode */
7542                                 if (ufshcd_cmd_inflight(lrbp->cmd)) {
7543                                         /* sleep for max. 200us same delay as in SDB mode */
7544                                         usleep_range(100, 200);
7545                                         continue;
7546                                 }
7547                                 /* command completed already */
7548                                 dev_err(hba->dev, "%s: cmd at tag=%d is cleared.\n",
7549                                         __func__, tag);
7550                                 goto out;
7551                         }
7552
7553                         /* Single Doorbell Mode */
7554                         reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
7555                         if (reg & (1 << tag)) {
7556                                 /* sleep for max. 200us to stabilize */
7557                                 usleep_range(100, 200);
7558                                 continue;
7559                         }
7560                         /* command completed already */
7561                         dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.\n",
7562                                 __func__, tag);
7563                         goto out;
7564                 } else {
7565                         dev_err(hba->dev,
7566                                 "%s: no response from device. tag = %d, err %d\n",
7567                                 __func__, tag, err);
7568                         if (!err)
7569                                 err = resp; /* service response error */
7570                         goto out;
7571                 }
7572         }
7573
7574         if (!poll_cnt) {
7575                 err = -EBUSY;
7576                 goto out;
7577         }
7578
7579         err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
7580                         UFS_ABORT_TASK, &resp);
7581         if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
7582                 if (!err) {
7583                         err = resp; /* service response error */
7584                         dev_err(hba->dev, "%s: issued. tag = %d, err %d\n",
7585                                 __func__, tag, err);
7586                 }
7587                 goto out;
7588         }
7589
7590         err = ufshcd_clear_cmd(hba, tag);
7591         if (err)
7592                 dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d\n",
7593                         __func__, tag, err);
7594
7595 out:
7596         return err;
7597 }
7598
7599 /**
7600  * ufshcd_abort - scsi host template eh_abort_handler callback
7601  * @cmd: SCSI command pointer
7602  *
7603  * Return: SUCCESS or FAILED.
7604  */
7605 static int ufshcd_abort(struct scsi_cmnd *cmd)
7606 {
7607         struct Scsi_Host *host = cmd->device->host;
7608         struct ufs_hba *hba = shost_priv(host);
7609         int tag = scsi_cmd_to_rq(cmd)->tag;
7610         struct ufshcd_lrb *lrbp = &hba->lrb[tag];
7611         unsigned long flags;
7612         int err = FAILED;
7613         bool outstanding;
7614         u32 reg;
7615
7616         ufshcd_hold(hba);
7617
7618         if (!is_mcq_enabled(hba)) {
7619                 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
7620                 if (!test_bit(tag, &hba->outstanding_reqs)) {
7621                         /* If command is already aborted/completed, return FAILED. */
7622                         dev_err(hba->dev,
7623                                 "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
7624                                 __func__, tag, hba->outstanding_reqs, reg);
7625                         goto release;
7626                 }
7627         }
7628
7629         /* Print Transfer Request of aborted task */
7630         dev_info(hba->dev, "%s: Device abort task at tag %d\n", __func__, tag);
7631
7632         /*
7633          * Print detailed info about aborted request.
7634          * As more than one request might get aborted at the same time,
7635          * print full information only for the first aborted request in order
7636          * to reduce repeated printouts. For other aborted requests only print
7637          * basic details.
7638          */
7639         scsi_print_command(cmd);
7640         if (!hba->req_abort_count) {
7641                 ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, tag);
7642                 ufshcd_print_evt_hist(hba);
7643                 ufshcd_print_host_state(hba);
7644                 ufshcd_print_pwr_info(hba);
7645                 ufshcd_print_tr(hba, tag, true);
7646         } else {
7647                 ufshcd_print_tr(hba, tag, false);
7648         }
7649         hba->req_abort_count++;
7650
7651         if (!is_mcq_enabled(hba) && !(reg & (1 << tag))) {
7652                 /* only execute this code in single doorbell mode */
7653                 dev_err(hba->dev,
7654                 "%s: cmd was completed, but without a notifying intr, tag = %d",
7655                 __func__, tag);
7656                 __ufshcd_transfer_req_compl(hba, 1UL << tag);
7657                 goto release;
7658         }
7659
7660         /*
7661          * Task abort to the device W-LUN is illegal. When this command
7662          * will fail, due to spec violation, scsi err handling next step
7663          * will be to send LU reset which, again, is a spec violation.
7664          * To avoid these unnecessary/illegal steps, first we clean up
7665          * the lrb taken by this cmd and re-set it in outstanding_reqs,
7666          * then queue the eh_work and bail.
7667          */
7668         if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN) {
7669                 ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, lrbp->lun);
7670
7671                 spin_lock_irqsave(host->host_lock, flags);
7672                 hba->force_reset = true;
7673                 ufshcd_schedule_eh_work(hba);
7674                 spin_unlock_irqrestore(host->host_lock, flags);
7675                 goto release;
7676         }
7677
7678         if (is_mcq_enabled(hba)) {
7679                 /* MCQ mode. Branch off to handle abort for mcq mode */
7680                 err = ufshcd_mcq_abort(cmd);
7681                 goto release;
7682         }
7683
7684         /* Skip task abort in case previous aborts failed and report failure */
7685         if (lrbp->req_abort_skip) {
7686                 dev_err(hba->dev, "%s: skipping abort\n", __func__);
7687                 ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
7688                 goto release;
7689         }
7690
7691         err = ufshcd_try_to_abort_task(hba, tag);
7692         if (err) {
7693                 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
7694                 ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
7695                 err = FAILED;
7696                 goto release;
7697         }
7698
7699         /*
7700          * Clear the corresponding bit from outstanding_reqs since the command
7701          * has been aborted successfully.
7702          */
7703         spin_lock_irqsave(&hba->outstanding_lock, flags);
7704         outstanding = __test_and_clear_bit(tag, &hba->outstanding_reqs);
7705         spin_unlock_irqrestore(&hba->outstanding_lock, flags);
7706
7707         if (outstanding)
7708                 ufshcd_release_scsi_cmd(hba, lrbp);
7709
7710         err = SUCCESS;
7711
7712 release:
7713         /* Matches the ufshcd_hold() call at the start of this function. */
7714         ufshcd_release(hba);
7715         return err;
7716 }
7717
7718 /**
7719  * ufshcd_host_reset_and_restore - reset and restore host controller
7720  * @hba: per-adapter instance
7721  *
7722  * Note that host controller reset may issue DME_RESET to
7723  * local and remote (device) Uni-Pro stack and the attributes
7724  * are reset to default state.
7725  *
7726  * Return: zero on success, non-zero on failure.
7727  */
7728 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
7729 {
7730         int err;
7731
7732         /*
7733          * Stop the host controller and complete the requests
7734          * cleared by h/w
7735          */
7736         ufshcd_hba_stop(hba);
7737         hba->silence_err_logs = true;
7738         ufshcd_complete_requests(hba, true);
7739         hba->silence_err_logs = false;
7740
7741         /* scale up clocks to max frequency before full reinitialization */
7742         ufshcd_scale_clks(hba, ULONG_MAX, true);
7743
7744         err = ufshcd_hba_enable(hba);
7745
7746         /* Establish the link again and restore the device */
7747         if (!err)
7748                 err = ufshcd_probe_hba(hba, false);
7749
7750         if (err)
7751                 dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
7752         ufshcd_update_evt_hist(hba, UFS_EVT_HOST_RESET, (u32)err);
7753         return err;
7754 }
7755
7756 /**
7757  * ufshcd_reset_and_restore - reset and re-initialize host/device
7758  * @hba: per-adapter instance
7759  *
7760  * Reset and recover device, host and re-establish link. This
7761  * is helpful to recover the communication in fatal error conditions.
7762  *
7763  * Return: zero on success, non-zero on failure.
7764  */
7765 static int ufshcd_reset_and_restore(struct ufs_hba *hba)
7766 {
7767         u32 saved_err = 0;
7768         u32 saved_uic_err = 0;
7769         int err = 0;
7770         unsigned long flags;
7771         int retries = MAX_HOST_RESET_RETRIES;
7772
7773         spin_lock_irqsave(hba->host->host_lock, flags);
7774         do {
7775                 /*
7776                  * This is a fresh start, cache and clear saved error first,
7777                  * in case new error generated during reset and restore.
7778                  */
7779                 saved_err |= hba->saved_err;
7780                 saved_uic_err |= hba->saved_uic_err;
7781                 hba->saved_err = 0;
7782                 hba->saved_uic_err = 0;
7783                 hba->force_reset = false;
7784                 hba->ufshcd_state = UFSHCD_STATE_RESET;
7785                 spin_unlock_irqrestore(hba->host->host_lock, flags);
7786
7787                 /* Reset the attached device */
7788                 ufshcd_device_reset(hba);
7789
7790                 err = ufshcd_host_reset_and_restore(hba);
7791
7792                 spin_lock_irqsave(hba->host->host_lock, flags);
7793                 if (err)
7794                         continue;
7795                 /* Do not exit unless operational or dead */
7796                 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL &&
7797                     hba->ufshcd_state != UFSHCD_STATE_ERROR &&
7798                     hba->ufshcd_state != UFSHCD_STATE_EH_SCHEDULED_NON_FATAL)
7799                         err = -EAGAIN;
7800         } while (err && --retries);
7801
7802         /*
7803          * Inform scsi mid-layer that we did reset and allow to handle
7804          * Unit Attention properly.
7805          */
7806         scsi_report_bus_reset(hba->host, 0);
7807         if (err) {
7808                 hba->ufshcd_state = UFSHCD_STATE_ERROR;
7809                 hba->saved_err |= saved_err;
7810                 hba->saved_uic_err |= saved_uic_err;
7811         }
7812         spin_unlock_irqrestore(hba->host->host_lock, flags);
7813
7814         return err;
7815 }
7816
7817 /**
7818  * ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer
7819  * @cmd: SCSI command pointer
7820  *
7821  * Return: SUCCESS or FAILED.
7822  */
7823 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
7824 {
7825         int err = SUCCESS;
7826         unsigned long flags;
7827         struct ufs_hba *hba;
7828
7829         hba = shost_priv(cmd->device->host);
7830
7831         /*
7832          * If runtime PM sent SSU and got a timeout, scsi_error_handler is
7833          * stuck in this function waiting for flush_work(&hba->eh_work). And
7834          * ufshcd_err_handler(eh_work) is stuck waiting for runtime PM. Do
7835          * ufshcd_link_recovery instead of eh_work to prevent deadlock.
7836          */
7837         if (hba->pm_op_in_progress) {
7838                 if (ufshcd_link_recovery(hba))
7839                         err = FAILED;
7840
7841                 return err;
7842         }
7843
7844         spin_lock_irqsave(hba->host->host_lock, flags);
7845         hba->force_reset = true;
7846         ufshcd_schedule_eh_work(hba);
7847         dev_err(hba->dev, "%s: reset in progress - 1\n", __func__);
7848         spin_unlock_irqrestore(hba->host->host_lock, flags);
7849
7850         flush_work(&hba->eh_work);
7851
7852         spin_lock_irqsave(hba->host->host_lock, flags);
7853         if (hba->ufshcd_state == UFSHCD_STATE_ERROR)
7854                 err = FAILED;
7855         spin_unlock_irqrestore(hba->host->host_lock, flags);
7856
7857         return err;
7858 }
7859
7860 /**
7861  * ufshcd_get_max_icc_level - calculate the ICC level
7862  * @sup_curr_uA: max. current supported by the regulator
7863  * @start_scan: row at the desc table to start scan from
7864  * @buff: power descriptor buffer
7865  *
7866  * Return: calculated max ICC level for specific regulator.
7867  */
7868 static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan,
7869                                     const char *buff)
7870 {
7871         int i;
7872         int curr_uA;
7873         u16 data;
7874         u16 unit;
7875
7876         for (i = start_scan; i >= 0; i--) {
7877                 data = get_unaligned_be16(&buff[2 * i]);
7878                 unit = (data & ATTR_ICC_LVL_UNIT_MASK) >>
7879                                                 ATTR_ICC_LVL_UNIT_OFFSET;
7880                 curr_uA = data & ATTR_ICC_LVL_VALUE_MASK;
7881                 switch (unit) {
7882                 case UFSHCD_NANO_AMP:
7883                         curr_uA = curr_uA / 1000;
7884                         break;
7885                 case UFSHCD_MILI_AMP:
7886                         curr_uA = curr_uA * 1000;
7887                         break;
7888                 case UFSHCD_AMP:
7889                         curr_uA = curr_uA * 1000 * 1000;
7890                         break;
7891                 case UFSHCD_MICRO_AMP:
7892                 default:
7893                         break;
7894                 }
7895                 if (sup_curr_uA >= curr_uA)
7896                         break;
7897         }
7898         if (i < 0) {
7899                 i = 0;
7900                 pr_err("%s: Couldn't find valid icc_level = %d", __func__, i);
7901         }
7902
7903         return (u32)i;
7904 }
7905
7906 /**
7907  * ufshcd_find_max_sup_active_icc_level - calculate the max ICC level
7908  * In case regulators are not initialized we'll return 0
7909  * @hba: per-adapter instance
7910  * @desc_buf: power descriptor buffer to extract ICC levels from.
7911  *
7912  * Return: calculated ICC level.
7913  */
7914 static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
7915                                                 const u8 *desc_buf)
7916 {
7917         u32 icc_level = 0;
7918
7919         if (!hba->vreg_info.vcc || !hba->vreg_info.vccq ||
7920                                                 !hba->vreg_info.vccq2) {
7921                 /*
7922                  * Using dev_dbg to avoid messages during runtime PM to avoid
7923                  * never-ending cycles of messages written back to storage by
7924                  * user space causing runtime resume, causing more messages and
7925                  * so on.
7926                  */
7927                 dev_dbg(hba->dev,
7928                         "%s: Regulator capability was not set, actvIccLevel=%d",
7929                                                         __func__, icc_level);
7930                 goto out;
7931         }
7932
7933         if (hba->vreg_info.vcc->max_uA)
7934                 icc_level = ufshcd_get_max_icc_level(
7935                                 hba->vreg_info.vcc->max_uA,
7936                                 POWER_DESC_MAX_ACTV_ICC_LVLS - 1,
7937                                 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]);
7938
7939         if (hba->vreg_info.vccq->max_uA)
7940                 icc_level = ufshcd_get_max_icc_level(
7941                                 hba->vreg_info.vccq->max_uA,
7942                                 icc_level,
7943                                 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]);
7944
7945         if (hba->vreg_info.vccq2->max_uA)
7946                 icc_level = ufshcd_get_max_icc_level(
7947                                 hba->vreg_info.vccq2->max_uA,
7948                                 icc_level,
7949                                 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ2_0]);
7950 out:
7951         return icc_level;
7952 }
7953
7954 static void ufshcd_set_active_icc_lvl(struct ufs_hba *hba)
7955 {
7956         int ret;
7957         u8 *desc_buf;
7958         u32 icc_level;
7959
7960         desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
7961         if (!desc_buf)
7962                 return;
7963
7964         ret = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_POWER, 0, 0,
7965                                      desc_buf, QUERY_DESC_MAX_SIZE);
7966         if (ret) {
7967                 dev_err(hba->dev,
7968                         "%s: Failed reading power descriptor ret = %d",
7969                         __func__, ret);
7970                 goto out;
7971         }
7972
7973         icc_level = ufshcd_find_max_sup_active_icc_level(hba, desc_buf);
7974         dev_dbg(hba->dev, "%s: setting icc_level 0x%x", __func__, icc_level);
7975
7976         ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
7977                 QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0, &icc_level);
7978
7979         if (ret)
7980                 dev_err(hba->dev,
7981                         "%s: Failed configuring bActiveICCLevel = %d ret = %d",
7982                         __func__, icc_level, ret);
7983
7984 out:
7985         kfree(desc_buf);
7986 }
7987
7988 static inline void ufshcd_blk_pm_runtime_init(struct scsi_device *sdev)
7989 {
7990         scsi_autopm_get_device(sdev);
7991         blk_pm_runtime_init(sdev->request_queue, &sdev->sdev_gendev);
7992         if (sdev->rpm_autosuspend)
7993                 pm_runtime_set_autosuspend_delay(&sdev->sdev_gendev,
7994                                                  RPM_AUTOSUSPEND_DELAY_MS);
7995         scsi_autopm_put_device(sdev);
7996 }
7997
7998 /**
7999  * ufshcd_scsi_add_wlus - Adds required W-LUs
8000  * @hba: per-adapter instance
8001  *
8002  * UFS device specification requires the UFS devices to support 4 well known
8003  * logical units:
8004  *      "REPORT_LUNS" (address: 01h)
8005  *      "UFS Device" (address: 50h)
8006  *      "RPMB" (address: 44h)
8007  *      "BOOT" (address: 30h)
8008  * UFS device's power management needs to be controlled by "POWER CONDITION"
8009  * field of SSU (START STOP UNIT) command. But this "power condition" field
8010  * will take effect only when its sent to "UFS device" well known logical unit
8011  * hence we require the scsi_device instance to represent this logical unit in
8012  * order for the UFS host driver to send the SSU command for power management.
8013  *
8014  * We also require the scsi_device instance for "RPMB" (Replay Protected Memory
8015  * Block) LU so user space process can control this LU. User space may also
8016  * want to have access to BOOT LU.
8017  *
8018  * This function adds scsi device instances for each of all well known LUs
8019  * (except "REPORT LUNS" LU).
8020  *
8021  * Return: zero on success (all required W-LUs are added successfully),
8022  * non-zero error value on failure (if failed to add any of the required W-LU).
8023  */
8024 static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
8025 {
8026         int ret = 0;
8027         struct scsi_device *sdev_boot, *sdev_rpmb;
8028
8029         hba->ufs_device_wlun = __scsi_add_device(hba->host, 0, 0,
8030                 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL);
8031         if (IS_ERR(hba->ufs_device_wlun)) {
8032                 ret = PTR_ERR(hba->ufs_device_wlun);
8033                 hba->ufs_device_wlun = NULL;
8034                 goto out;
8035         }
8036         scsi_device_put(hba->ufs_device_wlun);
8037
8038         sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
8039                 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
8040         if (IS_ERR(sdev_rpmb)) {
8041                 ret = PTR_ERR(sdev_rpmb);
8042                 goto remove_ufs_device_wlun;
8043         }
8044         ufshcd_blk_pm_runtime_init(sdev_rpmb);
8045         scsi_device_put(sdev_rpmb);
8046
8047         sdev_boot = __scsi_add_device(hba->host, 0, 0,
8048                 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
8049         if (IS_ERR(sdev_boot)) {
8050                 dev_err(hba->dev, "%s: BOOT WLUN not found\n", __func__);
8051         } else {
8052                 ufshcd_blk_pm_runtime_init(sdev_boot);
8053                 scsi_device_put(sdev_boot);
8054         }
8055         goto out;
8056
8057 remove_ufs_device_wlun:
8058         scsi_remove_device(hba->ufs_device_wlun);
8059 out:
8060         return ret;
8061 }
8062
8063 static void ufshcd_wb_probe(struct ufs_hba *hba, const u8 *desc_buf)
8064 {
8065         struct ufs_dev_info *dev_info = &hba->dev_info;
8066         u8 lun;
8067         u32 d_lu_wb_buf_alloc;
8068         u32 ext_ufs_feature;
8069
8070         if (!ufshcd_is_wb_allowed(hba))
8071                 return;
8072
8073         /*
8074          * Probe WB only for UFS-2.2 and UFS-3.1 (and later) devices or
8075          * UFS devices with quirk UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES
8076          * enabled
8077          */
8078         if (!(dev_info->wspecversion >= 0x310 ||
8079               dev_info->wspecversion == 0x220 ||
8080              (hba->dev_quirks & UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES)))
8081                 goto wb_disabled;
8082
8083         ext_ufs_feature = get_unaligned_be32(desc_buf +
8084                                         DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP);
8085
8086         if (!(ext_ufs_feature & UFS_DEV_WRITE_BOOSTER_SUP))
8087                 goto wb_disabled;
8088
8089         /*
8090          * WB may be supported but not configured while provisioning. The spec
8091          * says, in dedicated wb buffer mode, a max of 1 lun would have wb
8092          * buffer configured.
8093          */
8094         dev_info->wb_buffer_type = desc_buf[DEVICE_DESC_PARAM_WB_TYPE];
8095
8096         dev_info->b_presrv_uspc_en =
8097                 desc_buf[DEVICE_DESC_PARAM_WB_PRESRV_USRSPC_EN];
8098
8099         if (dev_info->wb_buffer_type == WB_BUF_MODE_SHARED) {
8100                 if (!get_unaligned_be32(desc_buf +
8101                                    DEVICE_DESC_PARAM_WB_SHARED_ALLOC_UNITS))
8102                         goto wb_disabled;
8103         } else {
8104                 for (lun = 0; lun < UFS_UPIU_MAX_WB_LUN_ID; lun++) {
8105                         d_lu_wb_buf_alloc = 0;
8106                         ufshcd_read_unit_desc_param(hba,
8107                                         lun,
8108                                         UNIT_DESC_PARAM_WB_BUF_ALLOC_UNITS,
8109                                         (u8 *)&d_lu_wb_buf_alloc,
8110                                         sizeof(d_lu_wb_buf_alloc));
8111                         if (d_lu_wb_buf_alloc) {
8112                                 dev_info->wb_dedicated_lu = lun;
8113                                 break;
8114                         }
8115                 }
8116
8117                 if (!d_lu_wb_buf_alloc)
8118                         goto wb_disabled;
8119         }
8120
8121         if (!ufshcd_is_wb_buf_lifetime_available(hba))
8122                 goto wb_disabled;
8123
8124         return;
8125
8126 wb_disabled:
8127         hba->caps &= ~UFSHCD_CAP_WB_EN;
8128 }
8129
8130 static void ufshcd_temp_notif_probe(struct ufs_hba *hba, const u8 *desc_buf)
8131 {
8132         struct ufs_dev_info *dev_info = &hba->dev_info;
8133         u32 ext_ufs_feature;
8134         u8 mask = 0;
8135
8136         if (!(hba->caps & UFSHCD_CAP_TEMP_NOTIF) || dev_info->wspecversion < 0x300)
8137                 return;
8138
8139         ext_ufs_feature = get_unaligned_be32(desc_buf + DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP);
8140
8141         if (ext_ufs_feature & UFS_DEV_LOW_TEMP_NOTIF)
8142                 mask |= MASK_EE_TOO_LOW_TEMP;
8143
8144         if (ext_ufs_feature & UFS_DEV_HIGH_TEMP_NOTIF)
8145                 mask |= MASK_EE_TOO_HIGH_TEMP;
8146
8147         if (mask) {
8148                 ufshcd_enable_ee(hba, mask);
8149                 ufs_hwmon_probe(hba, mask);
8150         }
8151 }
8152
8153 static void ufshcd_ext_iid_probe(struct ufs_hba *hba, u8 *desc_buf)
8154 {
8155         struct ufs_dev_info *dev_info = &hba->dev_info;
8156         u32 ext_ufs_feature;
8157         u32 ext_iid_en = 0;
8158         int err;
8159
8160         /* Only UFS-4.0 and above may support EXT_IID */
8161         if (dev_info->wspecversion < 0x400)
8162                 goto out;
8163
8164         ext_ufs_feature = get_unaligned_be32(desc_buf +
8165                                      DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP);
8166         if (!(ext_ufs_feature & UFS_DEV_EXT_IID_SUP))
8167                 goto out;
8168
8169         err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
8170                                       QUERY_ATTR_IDN_EXT_IID_EN, 0, 0, &ext_iid_en);
8171         if (err)
8172                 dev_err(hba->dev, "failed reading bEXTIIDEn. err = %d\n", err);
8173
8174 out:
8175         dev_info->b_ext_iid_en = ext_iid_en;
8176 }
8177
8178 void ufshcd_fixup_dev_quirks(struct ufs_hba *hba,
8179                              const struct ufs_dev_quirk *fixups)
8180 {
8181         const struct ufs_dev_quirk *f;
8182         struct ufs_dev_info *dev_info = &hba->dev_info;
8183
8184         if (!fixups)
8185                 return;
8186
8187         for (f = fixups; f->quirk; f++) {
8188                 if ((f->wmanufacturerid == dev_info->wmanufacturerid ||
8189                      f->wmanufacturerid == UFS_ANY_VENDOR) &&
8190                      ((dev_info->model &&
8191                        STR_PRFX_EQUAL(f->model, dev_info->model)) ||
8192                       !strcmp(f->model, UFS_ANY_MODEL)))
8193                         hba->dev_quirks |= f->quirk;
8194         }
8195 }
8196 EXPORT_SYMBOL_GPL(ufshcd_fixup_dev_quirks);
8197
8198 static void ufs_fixup_device_setup(struct ufs_hba *hba)
8199 {
8200         /* fix by general quirk table */
8201         ufshcd_fixup_dev_quirks(hba, ufs_fixups);
8202
8203         /* allow vendors to fix quirks */
8204         ufshcd_vops_fixup_dev_quirks(hba);
8205 }
8206
8207 static void ufshcd_update_rtc(struct ufs_hba *hba)
8208 {
8209         struct timespec64 ts64;
8210         int err;
8211         u32 val;
8212
8213         ktime_get_real_ts64(&ts64);
8214
8215         if (ts64.tv_sec < hba->dev_info.rtc_time_baseline) {
8216                 dev_warn_once(hba->dev, "%s: Current time precedes previous setting!\n", __func__);
8217                 return;
8218         }
8219
8220         /*
8221          * The Absolute RTC mode has a 136-year limit, spanning from 2010 to 2146. If a time beyond
8222          * 2146 is required, it is recommended to choose the relative RTC mode.
8223          */
8224         val = ts64.tv_sec - hba->dev_info.rtc_time_baseline;
8225
8226         ufshcd_rpm_get_sync(hba);
8227         err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, QUERY_ATTR_IDN_SECONDS_PASSED,
8228                                 0, 0, &val);
8229         ufshcd_rpm_put_sync(hba);
8230
8231         if (err)
8232                 dev_err(hba->dev, "%s: Failed to update rtc %d\n", __func__, err);
8233         else if (hba->dev_info.rtc_type == UFS_RTC_RELATIVE)
8234                 hba->dev_info.rtc_time_baseline = ts64.tv_sec;
8235 }
8236
8237 static void ufshcd_rtc_work(struct work_struct *work)
8238 {
8239         struct ufs_hba *hba;
8240
8241         hba = container_of(to_delayed_work(work), struct ufs_hba, ufs_rtc_update_work);
8242
8243          /* Update RTC only when there are no requests in progress and UFSHCI is operational */
8244         if (!ufshcd_is_ufs_dev_busy(hba) && hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL)
8245                 ufshcd_update_rtc(hba);
8246
8247         if (ufshcd_is_ufs_dev_active(hba) && hba->dev_info.rtc_update_period)
8248                 schedule_delayed_work(&hba->ufs_rtc_update_work,
8249                                       msecs_to_jiffies(hba->dev_info.rtc_update_period));
8250 }
8251
8252 static void ufs_init_rtc(struct ufs_hba *hba, u8 *desc_buf)
8253 {
8254         u16 periodic_rtc_update = get_unaligned_be16(&desc_buf[DEVICE_DESC_PARAM_FRQ_RTC]);
8255         struct ufs_dev_info *dev_info = &hba->dev_info;
8256
8257         if (periodic_rtc_update & UFS_RTC_TIME_BASELINE) {
8258                 dev_info->rtc_type = UFS_RTC_ABSOLUTE;
8259
8260                 /*
8261                  * The concept of measuring time in Linux as the number of seconds elapsed since
8262                  * 00:00:00 UTC on January 1, 1970, and UFS ABS RTC is elapsed from January 1st
8263                  * 2010 00:00, here we need to adjust ABS baseline.
8264                  */
8265                 dev_info->rtc_time_baseline = mktime64(2010, 1, 1, 0, 0, 0) -
8266                                                         mktime64(1970, 1, 1, 0, 0, 0);
8267         } else {
8268                 dev_info->rtc_type = UFS_RTC_RELATIVE;
8269                 dev_info->rtc_time_baseline = 0;
8270         }
8271
8272         /*
8273          * We ignore TIME_PERIOD defined in wPeriodicRTCUpdate because Spec does not clearly state
8274          * how to calculate the specific update period for each time unit. And we disable periodic
8275          * RTC update work, let user configure by sysfs node according to specific circumstance.
8276          */
8277         dev_info->rtc_update_period = 0;
8278 }
8279
8280 static int ufs_get_device_desc(struct ufs_hba *hba)
8281 {
8282         int err;
8283         u8 model_index;
8284         u8 *desc_buf;
8285         struct ufs_dev_info *dev_info = &hba->dev_info;
8286
8287         desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
8288         if (!desc_buf) {
8289                 err = -ENOMEM;
8290                 goto out;
8291         }
8292
8293         err = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_DEVICE, 0, 0, desc_buf,
8294                                      QUERY_DESC_MAX_SIZE);
8295         if (err) {
8296                 dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n",
8297                         __func__, err);
8298                 goto out;
8299         }
8300
8301         /*
8302          * getting vendor (manufacturerID) and Bank Index in big endian
8303          * format
8304          */
8305         dev_info->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
8306                                      desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
8307
8308         /* getting Specification Version in big endian format */
8309         dev_info->wspecversion = desc_buf[DEVICE_DESC_PARAM_SPEC_VER] << 8 |
8310                                       desc_buf[DEVICE_DESC_PARAM_SPEC_VER + 1];
8311         dev_info->bqueuedepth = desc_buf[DEVICE_DESC_PARAM_Q_DPTH];
8312
8313         model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
8314
8315         err = ufshcd_read_string_desc(hba, model_index,
8316                                       &dev_info->model, SD_ASCII_STD);
8317         if (err < 0) {
8318                 dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
8319                         __func__, err);
8320                 goto out;
8321         }
8322
8323         hba->luns_avail = desc_buf[DEVICE_DESC_PARAM_NUM_LU] +
8324                 desc_buf[DEVICE_DESC_PARAM_NUM_WLU];
8325
8326         ufs_fixup_device_setup(hba);
8327
8328         ufshcd_wb_probe(hba, desc_buf);
8329
8330         ufshcd_temp_notif_probe(hba, desc_buf);
8331
8332         ufs_init_rtc(hba, desc_buf);
8333
8334         if (hba->ext_iid_sup)
8335                 ufshcd_ext_iid_probe(hba, desc_buf);
8336
8337         /*
8338          * ufshcd_read_string_desc returns size of the string
8339          * reset the error value
8340          */
8341         err = 0;
8342
8343 out:
8344         kfree(desc_buf);
8345         return err;
8346 }
8347
8348 static void ufs_put_device_desc(struct ufs_hba *hba)
8349 {
8350         struct ufs_dev_info *dev_info = &hba->dev_info;
8351
8352         kfree(dev_info->model);
8353         dev_info->model = NULL;
8354 }
8355
8356 /**
8357  * ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro
8358  * @hba: per-adapter instance
8359  *
8360  * PA_TActivate parameter can be tuned manually if UniPro version is less than
8361  * 1.61. PA_TActivate needs to be greater than or equal to peerM-PHY's
8362  * RX_MIN_ACTIVATETIME_CAPABILITY attribute. This optimal value can help reduce
8363  * the hibern8 exit latency.
8364  *
8365  * Return: zero on success, non-zero error value on failure.
8366  */
8367 static int ufshcd_tune_pa_tactivate(struct ufs_hba *hba)
8368 {
8369         int ret = 0;
8370         u32 peer_rx_min_activatetime = 0, tuned_pa_tactivate;
8371
8372         ret = ufshcd_dme_peer_get(hba,
8373                                   UIC_ARG_MIB_SEL(
8374                                         RX_MIN_ACTIVATETIME_CAPABILITY,
8375                                         UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
8376                                   &peer_rx_min_activatetime);
8377         if (ret)
8378                 goto out;
8379
8380         /* make sure proper unit conversion is applied */
8381         tuned_pa_tactivate =
8382                 ((peer_rx_min_activatetime * RX_MIN_ACTIVATETIME_UNIT_US)
8383                  / PA_TACTIVATE_TIME_UNIT_US);
8384         ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
8385                              tuned_pa_tactivate);
8386
8387 out:
8388         return ret;
8389 }
8390
8391 /**
8392  * ufshcd_tune_pa_hibern8time - Tunes PA_Hibern8Time of local UniPro
8393  * @hba: per-adapter instance
8394  *
8395  * PA_Hibern8Time parameter can be tuned manually if UniPro version is less than
8396  * 1.61. PA_Hibern8Time needs to be maximum of local M-PHY's
8397  * TX_HIBERN8TIME_CAPABILITY & peer M-PHY's RX_HIBERN8TIME_CAPABILITY.
8398  * This optimal value can help reduce the hibern8 exit latency.
8399  *
8400  * Return: zero on success, non-zero error value on failure.
8401  */
8402 static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba)
8403 {
8404         int ret = 0;
8405         u32 local_tx_hibern8_time_cap = 0, peer_rx_hibern8_time_cap = 0;
8406         u32 max_hibern8_time, tuned_pa_hibern8time;
8407
8408         ret = ufshcd_dme_get(hba,
8409                              UIC_ARG_MIB_SEL(TX_HIBERN8TIME_CAPABILITY,
8410                                         UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
8411                                   &local_tx_hibern8_time_cap);
8412         if (ret)
8413                 goto out;
8414
8415         ret = ufshcd_dme_peer_get(hba,
8416                                   UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAPABILITY,
8417                                         UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
8418                                   &peer_rx_hibern8_time_cap);
8419         if (ret)
8420                 goto out;
8421
8422         max_hibern8_time = max(local_tx_hibern8_time_cap,
8423                                peer_rx_hibern8_time_cap);
8424         /* make sure proper unit conversion is applied */
8425         tuned_pa_hibern8time = ((max_hibern8_time * HIBERN8TIME_UNIT_US)
8426                                 / PA_HIBERN8_TIME_UNIT_US);
8427         ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
8428                              tuned_pa_hibern8time);
8429 out:
8430         return ret;
8431 }
8432
8433 /**
8434  * ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is
8435  * less than device PA_TACTIVATE time.
8436  * @hba: per-adapter instance
8437  *
8438  * Some UFS devices require host PA_TACTIVATE to be lower than device
8439  * PA_TACTIVATE, we need to enable UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE quirk
8440  * for such devices.
8441  *
8442  * Return: zero on success, non-zero error value on failure.
8443  */
8444 static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba)
8445 {
8446         int ret = 0;
8447         u32 granularity, peer_granularity;
8448         u32 pa_tactivate, peer_pa_tactivate;
8449         u32 pa_tactivate_us, peer_pa_tactivate_us;
8450         static const u8 gran_to_us_table[] = {1, 4, 8, 16, 32, 100};
8451
8452         ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
8453                                   &granularity);
8454         if (ret)
8455                 goto out;
8456
8457         ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
8458                                   &peer_granularity);
8459         if (ret)
8460                 goto out;
8461
8462         if ((granularity < PA_GRANULARITY_MIN_VAL) ||
8463             (granularity > PA_GRANULARITY_MAX_VAL)) {
8464                 dev_err(hba->dev, "%s: invalid host PA_GRANULARITY %d",
8465                         __func__, granularity);
8466                 return -EINVAL;
8467         }
8468
8469         if ((peer_granularity < PA_GRANULARITY_MIN_VAL) ||
8470             (peer_granularity > PA_GRANULARITY_MAX_VAL)) {
8471                 dev_err(hba->dev, "%s: invalid device PA_GRANULARITY %d",
8472                         __func__, peer_granularity);
8473                 return -EINVAL;
8474         }
8475
8476         ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
8477         if (ret)
8478                 goto out;
8479
8480         ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE),
8481                                   &peer_pa_tactivate);
8482         if (ret)
8483                 goto out;
8484
8485         pa_tactivate_us = pa_tactivate * gran_to_us_table[granularity - 1];
8486         peer_pa_tactivate_us = peer_pa_tactivate *
8487                              gran_to_us_table[peer_granularity - 1];
8488
8489         if (pa_tactivate_us >= peer_pa_tactivate_us) {
8490                 u32 new_peer_pa_tactivate;
8491
8492                 new_peer_pa_tactivate = pa_tactivate_us /
8493                                       gran_to_us_table[peer_granularity - 1];
8494                 new_peer_pa_tactivate++;
8495                 ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
8496                                           new_peer_pa_tactivate);
8497         }
8498
8499 out:
8500         return ret;
8501 }
8502
8503 static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
8504 {
8505         if (ufshcd_is_unipro_pa_params_tuning_req(hba)) {
8506                 ufshcd_tune_pa_tactivate(hba);
8507                 ufshcd_tune_pa_hibern8time(hba);
8508         }
8509
8510         ufshcd_vops_apply_dev_quirks(hba);
8511
8512         if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE)
8513                 /* set 1ms timeout for PA_TACTIVATE */
8514                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10);
8515
8516         if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
8517                 ufshcd_quirk_tune_host_pa_tactivate(hba);
8518 }
8519
8520 static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
8521 {
8522         hba->ufs_stats.hibern8_exit_cnt = 0;
8523         hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
8524         hba->req_abort_count = 0;
8525 }
8526
8527 static int ufshcd_device_geo_params_init(struct ufs_hba *hba)
8528 {
8529         int err;
8530         u8 *desc_buf;
8531
8532         desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
8533         if (!desc_buf) {
8534                 err = -ENOMEM;
8535                 goto out;
8536         }
8537
8538         err = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_GEOMETRY, 0, 0,
8539                                      desc_buf, QUERY_DESC_MAX_SIZE);
8540         if (err) {
8541                 dev_err(hba->dev, "%s: Failed reading Geometry Desc. err = %d\n",
8542                                 __func__, err);
8543                 goto out;
8544         }
8545
8546         if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 1)
8547                 hba->dev_info.max_lu_supported = 32;
8548         else if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 0)
8549                 hba->dev_info.max_lu_supported = 8;
8550
8551 out:
8552         kfree(desc_buf);
8553         return err;
8554 }
8555
8556 struct ufs_ref_clk {
8557         unsigned long freq_hz;
8558         enum ufs_ref_clk_freq val;
8559 };
8560
8561 static const struct ufs_ref_clk ufs_ref_clk_freqs[] = {
8562         {19200000, REF_CLK_FREQ_19_2_MHZ},
8563         {26000000, REF_CLK_FREQ_26_MHZ},
8564         {38400000, REF_CLK_FREQ_38_4_MHZ},
8565         {52000000, REF_CLK_FREQ_52_MHZ},
8566         {0, REF_CLK_FREQ_INVAL},
8567 };
8568
8569 static enum ufs_ref_clk_freq
8570 ufs_get_bref_clk_from_hz(unsigned long freq)
8571 {
8572         int i;
8573
8574         for (i = 0; ufs_ref_clk_freqs[i].freq_hz; i++)
8575                 if (ufs_ref_clk_freqs[i].freq_hz == freq)
8576                         return ufs_ref_clk_freqs[i].val;
8577
8578         return REF_CLK_FREQ_INVAL;
8579 }
8580
8581 void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk)
8582 {
8583         unsigned long freq;
8584
8585         freq = clk_get_rate(refclk);
8586
8587         hba->dev_ref_clk_freq =
8588                 ufs_get_bref_clk_from_hz(freq);
8589
8590         if (hba->dev_ref_clk_freq == REF_CLK_FREQ_INVAL)
8591                 dev_err(hba->dev,
8592                 "invalid ref_clk setting = %ld\n", freq);
8593 }
8594
8595 static int ufshcd_set_dev_ref_clk(struct ufs_hba *hba)
8596 {
8597         int err;
8598         u32 ref_clk;
8599         u32 freq = hba->dev_ref_clk_freq;
8600
8601         err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
8602                         QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &ref_clk);
8603
8604         if (err) {
8605                 dev_err(hba->dev, "failed reading bRefClkFreq. err = %d\n",
8606                         err);
8607                 goto out;
8608         }
8609
8610         if (ref_clk == freq)
8611                 goto out; /* nothing to update */
8612
8613         err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
8614                         QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &freq);
8615
8616         if (err) {
8617                 dev_err(hba->dev, "bRefClkFreq setting to %lu Hz failed\n",
8618                         ufs_ref_clk_freqs[freq].freq_hz);
8619                 goto out;
8620         }
8621
8622         dev_dbg(hba->dev, "bRefClkFreq setting to %lu Hz succeeded\n",
8623                         ufs_ref_clk_freqs[freq].freq_hz);
8624
8625 out:
8626         return err;
8627 }
8628
8629 static int ufshcd_device_params_init(struct ufs_hba *hba)
8630 {
8631         bool flag;
8632         int ret;
8633
8634         /* Init UFS geometry descriptor related parameters */
8635         ret = ufshcd_device_geo_params_init(hba);
8636         if (ret)
8637                 goto out;
8638
8639         /* Check and apply UFS device quirks */
8640         ret = ufs_get_device_desc(hba);
8641         if (ret) {
8642                 dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
8643                         __func__, ret);
8644                 goto out;
8645         }
8646
8647         ufshcd_get_ref_clk_gating_wait(hba);
8648
8649         if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
8650                         QUERY_FLAG_IDN_PWR_ON_WPE, 0, &flag))
8651                 hba->dev_info.f_power_on_wp_en = flag;
8652
8653         /* Probe maximum power mode co-supported by both UFS host and device */
8654         if (ufshcd_get_max_pwr_mode(hba))
8655                 dev_err(hba->dev,
8656                         "%s: Failed getting max supported power mode\n",
8657                         __func__);
8658 out:
8659         return ret;
8660 }
8661
8662 static void ufshcd_set_timestamp_attr(struct ufs_hba *hba)
8663 {
8664         int err;
8665         struct ufs_query_req *request = NULL;
8666         struct ufs_query_res *response = NULL;
8667         struct ufs_dev_info *dev_info = &hba->dev_info;
8668         struct utp_upiu_query_v4_0 *upiu_data;
8669
8670         if (dev_info->wspecversion < 0x400)
8671                 return;
8672
8673         ufshcd_hold(hba);
8674
8675         mutex_lock(&hba->dev_cmd.lock);
8676
8677         ufshcd_init_query(hba, &request, &response,
8678                           UPIU_QUERY_OPCODE_WRITE_ATTR,
8679                           QUERY_ATTR_IDN_TIMESTAMP, 0, 0);
8680
8681         request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
8682
8683         upiu_data = (struct utp_upiu_query_v4_0 *)&request->upiu_req;
8684
8685         put_unaligned_be64(ktime_get_real_ns(), &upiu_data->osf3);
8686
8687         err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
8688
8689         if (err)
8690                 dev_err(hba->dev, "%s: failed to set timestamp %d\n",
8691                         __func__, err);
8692
8693         mutex_unlock(&hba->dev_cmd.lock);
8694         ufshcd_release(hba);
8695 }
8696
8697 /**
8698  * ufshcd_add_lus - probe and add UFS logical units
8699  * @hba: per-adapter instance
8700  *
8701  * Return: 0 upon success; < 0 upon failure.
8702  */
8703 static int ufshcd_add_lus(struct ufs_hba *hba)
8704 {
8705         int ret;
8706
8707         /* Add required well known logical units to scsi mid layer */
8708         ret = ufshcd_scsi_add_wlus(hba);
8709         if (ret)
8710                 goto out;
8711
8712         /* Initialize devfreq after UFS device is detected */
8713         if (ufshcd_is_clkscaling_supported(hba)) {
8714                 memcpy(&hba->clk_scaling.saved_pwr_info,
8715                         &hba->pwr_info,
8716                         sizeof(struct ufs_pa_layer_attr));
8717                 hba->clk_scaling.is_allowed = true;
8718
8719                 ret = ufshcd_devfreq_init(hba);
8720                 if (ret)
8721                         goto out;
8722
8723                 hba->clk_scaling.is_enabled = true;
8724                 ufshcd_init_clk_scaling_sysfs(hba);
8725         }
8726
8727         ufs_bsg_probe(hba);
8728         scsi_scan_host(hba->host);
8729
8730 out:
8731         return ret;
8732 }
8733
8734 /* SDB - Single Doorbell */
8735 static void ufshcd_release_sdb_queue(struct ufs_hba *hba, int nutrs)
8736 {
8737         size_t ucdl_size, utrdl_size;
8738
8739         ucdl_size = ufshcd_get_ucd_size(hba) * nutrs;
8740         dmam_free_coherent(hba->dev, ucdl_size, hba->ucdl_base_addr,
8741                            hba->ucdl_dma_addr);
8742
8743         utrdl_size = sizeof(struct utp_transfer_req_desc) * nutrs;
8744         dmam_free_coherent(hba->dev, utrdl_size, hba->utrdl_base_addr,
8745                            hba->utrdl_dma_addr);
8746
8747         devm_kfree(hba->dev, hba->lrb);
8748 }
8749
8750 static int ufshcd_alloc_mcq(struct ufs_hba *hba)
8751 {
8752         int ret;
8753         int old_nutrs = hba->nutrs;
8754
8755         ret = ufshcd_mcq_decide_queue_depth(hba);
8756         if (ret < 0)
8757                 return ret;
8758
8759         hba->nutrs = ret;
8760         ret = ufshcd_mcq_init(hba);
8761         if (ret)
8762                 goto err;
8763
8764         /*
8765          * Previously allocated memory for nutrs may not be enough in MCQ mode.
8766          * Number of supported tags in MCQ mode may be larger than SDB mode.
8767          */
8768         if (hba->nutrs != old_nutrs) {
8769                 ufshcd_release_sdb_queue(hba, old_nutrs);
8770                 ret = ufshcd_memory_alloc(hba);
8771                 if (ret)
8772                         goto err;
8773                 ufshcd_host_memory_configure(hba);
8774         }
8775
8776         ret = ufshcd_mcq_memory_alloc(hba);
8777         if (ret)
8778                 goto err;
8779
8780         return 0;
8781 err:
8782         hba->nutrs = old_nutrs;
8783         return ret;
8784 }
8785
8786 static void ufshcd_config_mcq(struct ufs_hba *hba)
8787 {
8788         int ret;
8789         u32 intrs;
8790
8791         ret = ufshcd_mcq_vops_config_esi(hba);
8792         dev_info(hba->dev, "ESI %sconfigured\n", ret ? "is not " : "");
8793
8794         intrs = UFSHCD_ENABLE_MCQ_INTRS;
8795         if (hba->quirks & UFSHCD_QUIRK_MCQ_BROKEN_INTR)
8796                 intrs &= ~MCQ_CQ_EVENT_STATUS;
8797         ufshcd_enable_intr(hba, intrs);
8798         ufshcd_mcq_make_queues_operational(hba);
8799         ufshcd_mcq_config_mac(hba, hba->nutrs);
8800
8801         hba->host->can_queue = hba->nutrs - UFSHCD_NUM_RESERVED;
8802         hba->reserved_slot = hba->nutrs - UFSHCD_NUM_RESERVED;
8803
8804         /* Select MCQ mode */
8805         ufshcd_writel(hba, ufshcd_readl(hba, REG_UFS_MEM_CFG) | 0x1,
8806                       REG_UFS_MEM_CFG);
8807         hba->mcq_enabled = true;
8808
8809         dev_info(hba->dev, "MCQ configured, nr_queues=%d, io_queues=%d, read_queue=%d, poll_queues=%d, queue_depth=%d\n",
8810                  hba->nr_hw_queues, hba->nr_queues[HCTX_TYPE_DEFAULT],
8811                  hba->nr_queues[HCTX_TYPE_READ], hba->nr_queues[HCTX_TYPE_POLL],
8812                  hba->nutrs);
8813 }
8814
8815 static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params)
8816 {
8817         int ret;
8818         struct Scsi_Host *host = hba->host;
8819
8820         hba->ufshcd_state = UFSHCD_STATE_RESET;
8821
8822         ret = ufshcd_link_startup(hba);
8823         if (ret)
8824                 return ret;
8825
8826         if (hba->quirks & UFSHCD_QUIRK_SKIP_PH_CONFIGURATION)
8827                 return ret;
8828
8829         /* Debug counters initialization */
8830         ufshcd_clear_dbg_ufs_stats(hba);
8831
8832         /* UniPro link is active now */
8833         ufshcd_set_link_active(hba);
8834
8835         /* Reconfigure MCQ upon reset */
8836         if (is_mcq_enabled(hba) && !init_dev_params)
8837                 ufshcd_config_mcq(hba);
8838
8839         /* Verify device initialization by sending NOP OUT UPIU */
8840         ret = ufshcd_verify_dev_init(hba);
8841         if (ret)
8842                 return ret;
8843
8844         /* Initiate UFS initialization, and waiting until completion */
8845         ret = ufshcd_complete_dev_init(hba);
8846         if (ret)
8847                 return ret;
8848
8849         /*
8850          * Initialize UFS device parameters used by driver, these
8851          * parameters are associated with UFS descriptors.
8852          */
8853         if (init_dev_params) {
8854                 ret = ufshcd_device_params_init(hba);
8855                 if (ret)
8856                         return ret;
8857                 if (is_mcq_supported(hba) && !hba->scsi_host_added) {
8858                         ret = ufshcd_alloc_mcq(hba);
8859                         if (!ret) {
8860                                 ufshcd_config_mcq(hba);
8861                         } else {
8862                                 /* Continue with SDB mode */
8863                                 use_mcq_mode = false;
8864                                 dev_err(hba->dev, "MCQ mode is disabled, err=%d\n",
8865                                          ret);
8866                         }
8867                         ret = scsi_add_host(host, hba->dev);
8868                         if (ret) {
8869                                 dev_err(hba->dev, "scsi_add_host failed\n");
8870                                 return ret;
8871                         }
8872                         hba->scsi_host_added = true;
8873                 } else if (is_mcq_supported(hba)) {
8874                         /* UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH is set */
8875                         ufshcd_config_mcq(hba);
8876                 }
8877         }
8878
8879         ufshcd_tune_unipro_params(hba);
8880
8881         /* UFS device is also active now */
8882         ufshcd_set_ufs_dev_active(hba);
8883         ufshcd_force_reset_auto_bkops(hba);
8884
8885         ufshcd_set_timestamp_attr(hba);
8886         schedule_delayed_work(&hba->ufs_rtc_update_work,
8887                               msecs_to_jiffies(UFS_RTC_UPDATE_INTERVAL_MS));
8888
8889         /* Gear up to HS gear if supported */
8890         if (hba->max_pwr_info.is_valid) {
8891                 /*
8892                  * Set the right value to bRefClkFreq before attempting to
8893                  * switch to HS gears.
8894                  */
8895                 if (hba->dev_ref_clk_freq != REF_CLK_FREQ_INVAL)
8896                         ufshcd_set_dev_ref_clk(hba);
8897                 ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
8898                 if (ret) {
8899                         dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
8900                                         __func__, ret);
8901                         return ret;
8902                 }
8903         }
8904
8905         return 0;
8906 }
8907
8908 /**
8909  * ufshcd_probe_hba - probe hba to detect device and initialize it
8910  * @hba: per-adapter instance
8911  * @init_dev_params: whether or not to call ufshcd_device_params_init().
8912  *
8913  * Execute link-startup and verify device initialization
8914  *
8915  * Return: 0 upon success; < 0 upon failure.
8916  */
8917 static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params)
8918 {
8919         ktime_t start = ktime_get();
8920         unsigned long flags;
8921         int ret;
8922
8923         ret = ufshcd_device_init(hba, init_dev_params);
8924         if (ret)
8925                 goto out;
8926
8927         if (!hba->pm_op_in_progress &&
8928             (hba->quirks & UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH)) {
8929                 /* Reset the device and controller before doing reinit */
8930                 ufshcd_device_reset(hba);
8931                 ufshcd_hba_stop(hba);
8932                 ufshcd_vops_reinit_notify(hba);
8933                 ret = ufshcd_hba_enable(hba);
8934                 if (ret) {
8935                         dev_err(hba->dev, "Host controller enable failed\n");
8936                         ufshcd_print_evt_hist(hba);
8937                         ufshcd_print_host_state(hba);
8938                         goto out;
8939                 }
8940
8941                 /* Reinit the device */
8942                 ret = ufshcd_device_init(hba, init_dev_params);
8943                 if (ret)
8944                         goto out;
8945         }
8946
8947         ufshcd_print_pwr_info(hba);
8948
8949         /*
8950          * bActiveICCLevel is volatile for UFS device (as per latest v2.1 spec)
8951          * and for removable UFS card as well, hence always set the parameter.
8952          * Note: Error handler may issue the device reset hence resetting
8953          * bActiveICCLevel as well so it is always safe to set this here.
8954          */
8955         ufshcd_set_active_icc_lvl(hba);
8956
8957         /* Enable UFS Write Booster if supported */
8958         ufshcd_configure_wb(hba);
8959
8960         if (hba->ee_usr_mask)
8961                 ufshcd_write_ee_control(hba);
8962         ufshcd_configure_auto_hibern8(hba);
8963
8964 out:
8965         spin_lock_irqsave(hba->host->host_lock, flags);
8966         if (ret)
8967                 hba->ufshcd_state = UFSHCD_STATE_ERROR;
8968         else if (hba->ufshcd_state == UFSHCD_STATE_RESET)
8969                 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
8970         spin_unlock_irqrestore(hba->host->host_lock, flags);
8971
8972         trace_ufshcd_init(dev_name(hba->dev), ret,
8973                 ktime_to_us(ktime_sub(ktime_get(), start)),
8974                 hba->curr_dev_pwr_mode, hba->uic_link_state);
8975         return ret;
8976 }
8977
8978 /**
8979  * ufshcd_async_scan - asynchronous execution for probing hba
8980  * @data: data pointer to pass to this function
8981  * @cookie: cookie data
8982  */
8983 static void ufshcd_async_scan(void *data, async_cookie_t cookie)
8984 {
8985         struct ufs_hba *hba = (struct ufs_hba *)data;
8986         int ret;
8987
8988         down(&hba->host_sem);
8989         /* Initialize hba, detect and initialize UFS device */
8990         ret = ufshcd_probe_hba(hba, true);
8991         up(&hba->host_sem);
8992         if (ret)
8993                 goto out;
8994
8995         /* Probe and add UFS logical units  */
8996         ret = ufshcd_add_lus(hba);
8997
8998 out:
8999         pm_runtime_put_sync(hba->dev);
9000
9001         if (ret)
9002                 dev_err(hba->dev, "%s failed: %d\n", __func__, ret);
9003 }
9004
9005 static enum scsi_timeout_action ufshcd_eh_timed_out(struct scsi_cmnd *scmd)
9006 {
9007         struct ufs_hba *hba = shost_priv(scmd->device->host);
9008
9009         if (!hba->system_suspending) {
9010                 /* Activate the error handler in the SCSI core. */
9011                 return SCSI_EH_NOT_HANDLED;
9012         }
9013
9014         /*
9015          * If we get here we know that no TMFs are outstanding and also that
9016          * the only pending command is a START STOP UNIT command. Handle the
9017          * timeout of that command directly to prevent a deadlock between
9018          * ufshcd_set_dev_pwr_mode() and ufshcd_err_handler().
9019          */
9020         ufshcd_link_recovery(hba);
9021         dev_info(hba->dev, "%s() finished; outstanding_tasks = %#lx.\n",
9022                  __func__, hba->outstanding_tasks);
9023
9024         return hba->outstanding_reqs ? SCSI_EH_RESET_TIMER : SCSI_EH_DONE;
9025 }
9026
9027 static const struct attribute_group *ufshcd_driver_groups[] = {
9028         &ufs_sysfs_unit_descriptor_group,
9029         &ufs_sysfs_lun_attributes_group,
9030         NULL,
9031 };
9032
9033 static struct ufs_hba_variant_params ufs_hba_vps = {
9034         .hba_enable_delay_us            = 1000,
9035         .wb_flush_threshold             = UFS_WB_BUF_REMAIN_PERCENT(40),
9036         .devfreq_profile.polling_ms     = 100,
9037         .devfreq_profile.target         = ufshcd_devfreq_target,
9038         .devfreq_profile.get_dev_status = ufshcd_devfreq_get_dev_status,
9039         .ondemand_data.upthreshold      = 70,
9040         .ondemand_data.downdifferential = 5,
9041 };
9042
9043 static const struct scsi_host_template ufshcd_driver_template = {
9044         .module                 = THIS_MODULE,
9045         .name                   = UFSHCD,
9046         .proc_name              = UFSHCD,
9047         .map_queues             = ufshcd_map_queues,
9048         .queuecommand           = ufshcd_queuecommand,
9049         .mq_poll                = ufshcd_poll,
9050         .slave_alloc            = ufshcd_slave_alloc,
9051         .slave_configure        = ufshcd_slave_configure,
9052         .slave_destroy          = ufshcd_slave_destroy,
9053         .change_queue_depth     = ufshcd_change_queue_depth,
9054         .eh_abort_handler       = ufshcd_abort,
9055         .eh_device_reset_handler = ufshcd_eh_device_reset_handler,
9056         .eh_host_reset_handler   = ufshcd_eh_host_reset_handler,
9057         .eh_timed_out           = ufshcd_eh_timed_out,
9058         .this_id                = -1,
9059         .sg_tablesize           = SG_ALL,
9060         .cmd_per_lun            = UFSHCD_CMD_PER_LUN,
9061         .can_queue              = UFSHCD_CAN_QUEUE,
9062         .max_segment_size       = PRDT_DATA_BYTE_COUNT_MAX,
9063         .max_sectors            = SZ_1M / SECTOR_SIZE,
9064         .max_host_blocked       = 1,
9065         .track_queue_depth      = 1,
9066         .skip_settle_delay      = 1,
9067         .sdev_groups            = ufshcd_driver_groups,
9068         .rpm_autosuspend_delay  = RPM_AUTOSUSPEND_DELAY_MS,
9069 };
9070
9071 static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
9072                                    int ua)
9073 {
9074         int ret;
9075
9076         if (!vreg)
9077                 return 0;
9078
9079         /*
9080          * "set_load" operation shall be required on those regulators
9081          * which specifically configured current limitation. Otherwise
9082          * zero max_uA may cause unexpected behavior when regulator is
9083          * enabled or set as high power mode.
9084          */
9085         if (!vreg->max_uA)
9086                 return 0;
9087
9088         ret = regulator_set_load(vreg->reg, ua);
9089         if (ret < 0) {
9090                 dev_err(dev, "%s: %s set load (ua=%d) failed, err=%d\n",
9091                                 __func__, vreg->name, ua, ret);
9092         }
9093
9094         return ret;
9095 }
9096
9097 static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
9098                                          struct ufs_vreg *vreg)
9099 {
9100         return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA);
9101 }
9102
9103 static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
9104                                          struct ufs_vreg *vreg)
9105 {
9106         if (!vreg)
9107                 return 0;
9108
9109         return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
9110 }
9111
9112 static int ufshcd_config_vreg(struct device *dev,
9113                 struct ufs_vreg *vreg, bool on)
9114 {
9115         if (regulator_count_voltages(vreg->reg) <= 0)
9116                 return 0;
9117
9118         return ufshcd_config_vreg_load(dev, vreg, on ? vreg->max_uA : 0);
9119 }
9120
9121 static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
9122 {
9123         int ret = 0;
9124
9125         if (!vreg || vreg->enabled)
9126                 goto out;
9127
9128         ret = ufshcd_config_vreg(dev, vreg, true);
9129         if (!ret)
9130                 ret = regulator_enable(vreg->reg);
9131
9132         if (!ret)
9133                 vreg->enabled = true;
9134         else
9135                 dev_err(dev, "%s: %s enable failed, err=%d\n",
9136                                 __func__, vreg->name, ret);
9137 out:
9138         return ret;
9139 }
9140
9141 static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
9142 {
9143         int ret = 0;
9144
9145         if (!vreg || !vreg->enabled || vreg->always_on)
9146                 goto out;
9147
9148         ret = regulator_disable(vreg->reg);
9149
9150         if (!ret) {
9151                 /* ignore errors on applying disable config */
9152                 ufshcd_config_vreg(dev, vreg, false);
9153                 vreg->enabled = false;
9154         } else {
9155                 dev_err(dev, "%s: %s disable failed, err=%d\n",
9156                                 __func__, vreg->name, ret);
9157         }
9158 out:
9159         return ret;
9160 }
9161
9162 static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on)
9163 {
9164         int ret = 0;
9165         struct device *dev = hba->dev;
9166         struct ufs_vreg_info *info = &hba->vreg_info;
9167
9168         ret = ufshcd_toggle_vreg(dev, info->vcc, on);
9169         if (ret)
9170                 goto out;
9171
9172         ret = ufshcd_toggle_vreg(dev, info->vccq, on);
9173         if (ret)
9174                 goto out;
9175
9176         ret = ufshcd_toggle_vreg(dev, info->vccq2, on);
9177
9178 out:
9179         if (ret) {
9180                 ufshcd_toggle_vreg(dev, info->vccq2, false);
9181                 ufshcd_toggle_vreg(dev, info->vccq, false);
9182                 ufshcd_toggle_vreg(dev, info->vcc, false);
9183         }
9184         return ret;
9185 }
9186
9187 static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on)
9188 {
9189         struct ufs_vreg_info *info = &hba->vreg_info;
9190
9191         return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
9192 }
9193
9194 int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
9195 {
9196         int ret = 0;
9197
9198         if (!vreg)
9199                 goto out;
9200
9201         vreg->reg = devm_regulator_get(dev, vreg->name);
9202         if (IS_ERR(vreg->reg)) {
9203                 ret = PTR_ERR(vreg->reg);
9204                 dev_err(dev, "%s: %s get failed, err=%d\n",
9205                                 __func__, vreg->name, ret);
9206         }
9207 out:
9208         return ret;
9209 }
9210 EXPORT_SYMBOL_GPL(ufshcd_get_vreg);
9211
9212 static int ufshcd_init_vreg(struct ufs_hba *hba)
9213 {
9214         int ret = 0;
9215         struct device *dev = hba->dev;
9216         struct ufs_vreg_info *info = &hba->vreg_info;
9217
9218         ret = ufshcd_get_vreg(dev, info->vcc);
9219         if (ret)
9220                 goto out;
9221
9222         ret = ufshcd_get_vreg(dev, info->vccq);
9223         if (!ret)
9224                 ret = ufshcd_get_vreg(dev, info->vccq2);
9225 out:
9226         return ret;
9227 }
9228
9229 static int ufshcd_init_hba_vreg(struct ufs_hba *hba)
9230 {
9231         struct ufs_vreg_info *info = &hba->vreg_info;
9232
9233         return ufshcd_get_vreg(hba->dev, info->vdd_hba);
9234 }
9235
9236 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
9237 {
9238         int ret = 0;
9239         struct ufs_clk_info *clki;
9240         struct list_head *head = &hba->clk_list_head;
9241         unsigned long flags;
9242         ktime_t start = ktime_get();
9243         bool clk_state_changed = false;
9244
9245         if (list_empty(head))
9246                 goto out;
9247
9248         ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE);
9249         if (ret)
9250                 return ret;
9251
9252         list_for_each_entry(clki, head, list) {
9253                 if (!IS_ERR_OR_NULL(clki->clk)) {
9254                         /*
9255                          * Don't disable clocks which are needed
9256                          * to keep the link active.
9257                          */
9258                         if (ufshcd_is_link_active(hba) &&
9259                             clki->keep_link_active)
9260                                 continue;
9261
9262                         clk_state_changed = on ^ clki->enabled;
9263                         if (on && !clki->enabled) {
9264                                 ret = clk_prepare_enable(clki->clk);
9265                                 if (ret) {
9266                                         dev_err(hba->dev, "%s: %s prepare enable failed, %d\n",
9267                                                 __func__, clki->name, ret);
9268                                         goto out;
9269                                 }
9270                         } else if (!on && clki->enabled) {
9271                                 clk_disable_unprepare(clki->clk);
9272                         }
9273                         clki->enabled = on;
9274                         dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__,
9275                                         clki->name, on ? "en" : "dis");
9276                 }
9277         }
9278
9279         ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE);
9280         if (ret)
9281                 return ret;
9282
9283 out:
9284         if (ret) {
9285                 list_for_each_entry(clki, head, list) {
9286                         if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
9287                                 clk_disable_unprepare(clki->clk);
9288                 }
9289         } else if (!ret && on) {
9290                 spin_lock_irqsave(hba->host->host_lock, flags);
9291                 hba->clk_gating.state = CLKS_ON;
9292                 trace_ufshcd_clk_gating(dev_name(hba->dev),
9293                                         hba->clk_gating.state);
9294                 spin_unlock_irqrestore(hba->host->host_lock, flags);
9295         }
9296
9297         if (clk_state_changed)
9298                 trace_ufshcd_profile_clk_gating(dev_name(hba->dev),
9299                         (on ? "on" : "off"),
9300                         ktime_to_us(ktime_sub(ktime_get(), start)), ret);
9301         return ret;
9302 }
9303
9304 static enum ufs_ref_clk_freq ufshcd_parse_ref_clk_property(struct ufs_hba *hba)
9305 {
9306         u32 freq;
9307         int ret = device_property_read_u32(hba->dev, "ref-clk-freq", &freq);
9308
9309         if (ret) {
9310                 dev_dbg(hba->dev, "Cannot query 'ref-clk-freq' property = %d", ret);
9311                 return REF_CLK_FREQ_INVAL;
9312         }
9313
9314         return ufs_get_bref_clk_from_hz(freq);
9315 }
9316
9317 static int ufshcd_init_clocks(struct ufs_hba *hba)
9318 {
9319         int ret = 0;
9320         struct ufs_clk_info *clki;
9321         struct device *dev = hba->dev;
9322         struct list_head *head = &hba->clk_list_head;
9323
9324         if (list_empty(head))
9325                 goto out;
9326
9327         list_for_each_entry(clki, head, list) {
9328                 if (!clki->name)
9329                         continue;
9330
9331                 clki->clk = devm_clk_get(dev, clki->name);
9332                 if (IS_ERR(clki->clk)) {
9333                         ret = PTR_ERR(clki->clk);
9334                         dev_err(dev, "%s: %s clk get failed, %d\n",
9335                                         __func__, clki->name, ret);
9336                         goto out;
9337                 }
9338
9339                 /*
9340                  * Parse device ref clk freq as per device tree "ref_clk".
9341                  * Default dev_ref_clk_freq is set to REF_CLK_FREQ_INVAL
9342                  * in ufshcd_alloc_host().
9343                  */
9344                 if (!strcmp(clki->name, "ref_clk"))
9345                         ufshcd_parse_dev_ref_clk_freq(hba, clki->clk);
9346
9347                 if (clki->max_freq) {
9348                         ret = clk_set_rate(clki->clk, clki->max_freq);
9349                         if (ret) {
9350                                 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
9351                                         __func__, clki->name,
9352                                         clki->max_freq, ret);
9353                                 goto out;
9354                         }
9355                         clki->curr_freq = clki->max_freq;
9356                 }
9357                 dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__,
9358                                 clki->name, clk_get_rate(clki->clk));
9359         }
9360
9361         /* Set Max. frequency for all clocks */
9362         if (hba->use_pm_opp) {
9363                 ret = ufshcd_opp_set_rate(hba, ULONG_MAX);
9364                 if (ret) {
9365                         dev_err(hba->dev, "%s: failed to set OPP: %d", __func__,
9366                                 ret);
9367                         goto out;
9368                 }
9369         }
9370
9371 out:
9372         return ret;
9373 }
9374
9375 static int ufshcd_variant_hba_init(struct ufs_hba *hba)
9376 {
9377         int err = 0;
9378
9379         if (!hba->vops)
9380                 goto out;
9381
9382         err = ufshcd_vops_init(hba);
9383         if (err)
9384                 dev_err_probe(hba->dev, err,
9385                               "%s: variant %s init failed with err %d\n",
9386                               __func__, ufshcd_get_var_name(hba), err);
9387 out:
9388         return err;
9389 }
9390
9391 static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
9392 {
9393         if (!hba->vops)
9394                 return;
9395
9396         ufshcd_vops_exit(hba);
9397 }
9398
9399 static int ufshcd_hba_init(struct ufs_hba *hba)
9400 {
9401         int err;
9402
9403         /*
9404          * Handle host controller power separately from the UFS device power
9405          * rails as it will help controlling the UFS host controller power
9406          * collapse easily which is different than UFS device power collapse.
9407          * Also, enable the host controller power before we go ahead with rest
9408          * of the initialization here.
9409          */
9410         err = ufshcd_init_hba_vreg(hba);
9411         if (err)
9412                 goto out;
9413
9414         err = ufshcd_setup_hba_vreg(hba, true);
9415         if (err)
9416                 goto out;
9417
9418         err = ufshcd_init_clocks(hba);
9419         if (err)
9420                 goto out_disable_hba_vreg;
9421
9422         if (hba->dev_ref_clk_freq == REF_CLK_FREQ_INVAL)
9423                 hba->dev_ref_clk_freq = ufshcd_parse_ref_clk_property(hba);
9424
9425         err = ufshcd_setup_clocks(hba, true);
9426         if (err)
9427                 goto out_disable_hba_vreg;
9428
9429         err = ufshcd_init_vreg(hba);
9430         if (err)
9431                 goto out_disable_clks;
9432
9433         err = ufshcd_setup_vreg(hba, true);
9434         if (err)
9435                 goto out_disable_clks;
9436
9437         err = ufshcd_variant_hba_init(hba);
9438         if (err)
9439                 goto out_disable_vreg;
9440
9441         ufs_debugfs_hba_init(hba);
9442         ufs_fault_inject_hba_init(hba);
9443
9444         hba->is_powered = true;
9445         goto out;
9446
9447 out_disable_vreg:
9448         ufshcd_setup_vreg(hba, false);
9449 out_disable_clks:
9450         ufshcd_setup_clocks(hba, false);
9451 out_disable_hba_vreg:
9452         ufshcd_setup_hba_vreg(hba, false);
9453 out:
9454         return err;
9455 }
9456
9457 static void ufshcd_hba_exit(struct ufs_hba *hba)
9458 {
9459         if (hba->is_powered) {
9460                 ufshcd_exit_clk_scaling(hba);
9461                 ufshcd_exit_clk_gating(hba);
9462                 if (hba->eh_wq)
9463                         destroy_workqueue(hba->eh_wq);
9464                 ufs_debugfs_hba_exit(hba);
9465                 ufshcd_variant_hba_exit(hba);
9466                 ufshcd_setup_vreg(hba, false);
9467                 ufshcd_setup_clocks(hba, false);
9468                 ufshcd_setup_hba_vreg(hba, false);
9469                 hba->is_powered = false;
9470                 ufs_put_device_desc(hba);
9471         }
9472 }
9473
9474 static int ufshcd_execute_start_stop(struct scsi_device *sdev,
9475                                      enum ufs_dev_pwr_mode pwr_mode,
9476                                      struct scsi_sense_hdr *sshdr)
9477 {
9478         const unsigned char cdb[6] = { START_STOP, 0, 0, 0, pwr_mode << 4, 0 };
9479         const struct scsi_exec_args args = {
9480                 .sshdr = sshdr,
9481                 .req_flags = BLK_MQ_REQ_PM,
9482                 .scmd_flags = SCMD_FAIL_IF_RECOVERING,
9483         };
9484
9485         return scsi_execute_cmd(sdev, cdb, REQ_OP_DRV_IN, /*buffer=*/NULL,
9486                         /*bufflen=*/0, /*timeout=*/10 * HZ, /*retries=*/0,
9487                         &args);
9488 }
9489
9490 /**
9491  * ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device
9492  *                           power mode
9493  * @hba: per adapter instance
9494  * @pwr_mode: device power mode to set
9495  *
9496  * Return: 0 if requested power mode is set successfully;
9497  *         < 0 if failed to set the requested power mode.
9498  */
9499 static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
9500                                      enum ufs_dev_pwr_mode pwr_mode)
9501 {
9502         struct scsi_sense_hdr sshdr;
9503         struct scsi_device *sdp;
9504         unsigned long flags;
9505         int ret, retries;
9506
9507         spin_lock_irqsave(hba->host->host_lock, flags);
9508         sdp = hba->ufs_device_wlun;
9509         if (sdp && scsi_device_online(sdp))
9510                 ret = scsi_device_get(sdp);
9511         else
9512                 ret = -ENODEV;
9513         spin_unlock_irqrestore(hba->host->host_lock, flags);
9514
9515         if (ret)
9516                 return ret;
9517
9518         /*
9519          * If scsi commands fail, the scsi mid-layer schedules scsi error-
9520          * handling, which would wait for host to be resumed. Since we know
9521          * we are functional while we are here, skip host resume in error
9522          * handling context.
9523          */
9524         hba->host->eh_noresume = 1;
9525
9526         /*
9527          * Current function would be generally called from the power management
9528          * callbacks hence set the RQF_PM flag so that it doesn't resume the
9529          * already suspended childs.
9530          */
9531         for (retries = 3; retries > 0; --retries) {
9532                 ret = ufshcd_execute_start_stop(sdp, pwr_mode, &sshdr);
9533                 /*
9534                  * scsi_execute() only returns a negative value if the request
9535                  * queue is dying.
9536                  */
9537                 if (ret <= 0)
9538                         break;
9539         }
9540         if (ret) {
9541                 sdev_printk(KERN_WARNING, sdp,
9542                             "START_STOP failed for power mode: %d, result %x\n",
9543                             pwr_mode, ret);
9544                 if (ret > 0) {
9545                         if (scsi_sense_valid(&sshdr))
9546                                 scsi_print_sense_hdr(sdp, NULL, &sshdr);
9547                         ret = -EIO;
9548                 }
9549         } else {
9550                 hba->curr_dev_pwr_mode = pwr_mode;
9551         }
9552
9553         scsi_device_put(sdp);
9554         hba->host->eh_noresume = 0;
9555         return ret;
9556 }
9557
9558 static int ufshcd_link_state_transition(struct ufs_hba *hba,
9559                                         enum uic_link_state req_link_state,
9560                                         bool check_for_bkops)
9561 {
9562         int ret = 0;
9563
9564         if (req_link_state == hba->uic_link_state)
9565                 return 0;
9566
9567         if (req_link_state == UIC_LINK_HIBERN8_STATE) {
9568                 ret = ufshcd_uic_hibern8_enter(hba);
9569                 if (!ret) {
9570                         ufshcd_set_link_hibern8(hba);
9571                 } else {
9572                         dev_err(hba->dev, "%s: hibern8 enter failed %d\n",
9573                                         __func__, ret);
9574                         goto out;
9575                 }
9576         }
9577         /*
9578          * If autobkops is enabled, link can't be turned off because
9579          * turning off the link would also turn off the device, except in the
9580          * case of DeepSleep where the device is expected to remain powered.
9581          */
9582         else if ((req_link_state == UIC_LINK_OFF_STATE) &&
9583                  (!check_for_bkops || !hba->auto_bkops_enabled)) {
9584                 /*
9585                  * Let's make sure that link is in low power mode, we are doing
9586                  * this currently by putting the link in Hibern8. Otherway to
9587                  * put the link in low power mode is to send the DME end point
9588                  * to device and then send the DME reset command to local
9589                  * unipro. But putting the link in hibern8 is much faster.
9590                  *
9591                  * Note also that putting the link in Hibern8 is a requirement
9592                  * for entering DeepSleep.
9593                  */
9594                 ret = ufshcd_uic_hibern8_enter(hba);
9595                 if (ret) {
9596                         dev_err(hba->dev, "%s: hibern8 enter failed %d\n",
9597                                         __func__, ret);
9598                         goto out;
9599                 }
9600                 /*
9601                  * Change controller state to "reset state" which
9602                  * should also put the link in off/reset state
9603                  */
9604                 ufshcd_hba_stop(hba);
9605                 /*
9606                  * TODO: Check if we need any delay to make sure that
9607                  * controller is reset
9608                  */
9609                 ufshcd_set_link_off(hba);
9610         }
9611
9612 out:
9613         return ret;
9614 }
9615
9616 static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
9617 {
9618         bool vcc_off = false;
9619
9620         /*
9621          * It seems some UFS devices may keep drawing more than sleep current
9622          * (atleast for 500us) from UFS rails (especially from VCCQ rail).
9623          * To avoid this situation, add 2ms delay before putting these UFS
9624          * rails in LPM mode.
9625          */
9626         if (!ufshcd_is_link_active(hba) &&
9627             hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM)
9628                 usleep_range(2000, 2100);
9629
9630         /*
9631          * If UFS device is either in UFS_Sleep turn off VCC rail to save some
9632          * power.
9633          *
9634          * If UFS device and link is in OFF state, all power supplies (VCC,
9635          * VCCQ, VCCQ2) can be turned off if power on write protect is not
9636          * required. If UFS link is inactive (Hibern8 or OFF state) and device
9637          * is in sleep state, put VCCQ & VCCQ2 rails in LPM mode.
9638          *
9639          * Ignore the error returned by ufshcd_toggle_vreg() as device is anyway
9640          * in low power state which would save some power.
9641          *
9642          * If Write Booster is enabled and the device needs to flush the WB
9643          * buffer OR if bkops status is urgent for WB, keep Vcc on.
9644          */
9645         if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
9646             !hba->dev_info.is_lu_power_on_wp) {
9647                 ufshcd_setup_vreg(hba, false);
9648                 vcc_off = true;
9649         } else if (!ufshcd_is_ufs_dev_active(hba)) {
9650                 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
9651                 vcc_off = true;
9652                 if (ufshcd_is_link_hibern8(hba) || ufshcd_is_link_off(hba)) {
9653                         ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
9654                         ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2);
9655                 }
9656         }
9657
9658         /*
9659          * Some UFS devices require delay after VCC power rail is turned-off.
9660          */
9661         if (vcc_off && hba->vreg_info.vcc &&
9662                 hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM)
9663                 usleep_range(5000, 5100);
9664 }
9665
9666 #ifdef CONFIG_PM
9667 static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
9668 {
9669         int ret = 0;
9670
9671         if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
9672             !hba->dev_info.is_lu_power_on_wp) {
9673                 ret = ufshcd_setup_vreg(hba, true);
9674         } else if (!ufshcd_is_ufs_dev_active(hba)) {
9675                 if (!ufshcd_is_link_active(hba)) {
9676                         ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
9677                         if (ret)
9678                                 goto vcc_disable;
9679                         ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
9680                         if (ret)
9681                                 goto vccq_lpm;
9682                 }
9683                 ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
9684         }
9685         goto out;
9686
9687 vccq_lpm:
9688         ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
9689 vcc_disable:
9690         ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
9691 out:
9692         return ret;
9693 }
9694 #endif /* CONFIG_PM */
9695
9696 static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba)
9697 {
9698         if (ufshcd_is_link_off(hba) || ufshcd_can_aggressive_pc(hba))
9699                 ufshcd_setup_hba_vreg(hba, false);
9700 }
9701
9702 static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
9703 {
9704         if (ufshcd_is_link_off(hba) || ufshcd_can_aggressive_pc(hba))
9705                 ufshcd_setup_hba_vreg(hba, true);
9706 }
9707
9708 static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
9709 {
9710         int ret = 0;
9711         bool check_for_bkops;
9712         enum ufs_pm_level pm_lvl;
9713         enum ufs_dev_pwr_mode req_dev_pwr_mode;
9714         enum uic_link_state req_link_state;
9715
9716         hba->pm_op_in_progress = true;
9717         if (pm_op != UFS_SHUTDOWN_PM) {
9718                 pm_lvl = pm_op == UFS_RUNTIME_PM ?
9719                          hba->rpm_lvl : hba->spm_lvl;
9720                 req_dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl);
9721                 req_link_state = ufs_get_pm_lvl_to_link_pwr_state(pm_lvl);
9722         } else {
9723                 req_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE;
9724                 req_link_state = UIC_LINK_OFF_STATE;
9725         }
9726
9727         /*
9728          * If we can't transition into any of the low power modes
9729          * just gate the clocks.
9730          */
9731         ufshcd_hold(hba);
9732         hba->clk_gating.is_suspended = true;
9733
9734         if (ufshcd_is_clkscaling_supported(hba))
9735                 ufshcd_clk_scaling_suspend(hba, true);
9736
9737         if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
9738                         req_link_state == UIC_LINK_ACTIVE_STATE) {
9739                 goto vops_suspend;
9740         }
9741
9742         if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
9743             (req_link_state == hba->uic_link_state))
9744                 goto enable_scaling;
9745
9746         /* UFS device & link must be active before we enter in this function */
9747         if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
9748                 ret = -EINVAL;
9749                 goto enable_scaling;
9750         }
9751
9752         if (pm_op == UFS_RUNTIME_PM) {
9753                 if (ufshcd_can_autobkops_during_suspend(hba)) {
9754                         /*
9755                          * The device is idle with no requests in the queue,
9756                          * allow background operations if bkops status shows
9757                          * that performance might be impacted.
9758                          */
9759                         ret = ufshcd_urgent_bkops(hba);
9760                         if (ret) {
9761                                 /*
9762                                  * If return err in suspend flow, IO will hang.
9763                                  * Trigger error handler and break suspend for
9764                                  * error recovery.
9765                                  */
9766                                 ufshcd_force_error_recovery(hba);
9767                                 ret = -EBUSY;
9768                                 goto enable_scaling;
9769                         }
9770                 } else {
9771                         /* make sure that auto bkops is disabled */
9772                         ufshcd_disable_auto_bkops(hba);
9773                 }
9774                 /*
9775                  * If device needs to do BKOP or WB buffer flush during
9776                  * Hibern8, keep device power mode as "active power mode"
9777                  * and VCC supply.
9778                  */
9779                 hba->dev_info.b_rpm_dev_flush_capable =
9780                         hba->auto_bkops_enabled ||
9781                         (((req_link_state == UIC_LINK_HIBERN8_STATE) ||
9782                         ((req_link_state == UIC_LINK_ACTIVE_STATE) &&
9783                         ufshcd_is_auto_hibern8_enabled(hba))) &&
9784                         ufshcd_wb_need_flush(hba));
9785         }
9786
9787         flush_work(&hba->eeh_work);
9788
9789         ret = ufshcd_vops_suspend(hba, pm_op, PRE_CHANGE);
9790         if (ret)
9791                 goto enable_scaling;
9792
9793         if (req_dev_pwr_mode != hba->curr_dev_pwr_mode) {
9794                 if (pm_op != UFS_RUNTIME_PM)
9795                         /* ensure that bkops is disabled */
9796                         ufshcd_disable_auto_bkops(hba);
9797
9798                 if (!hba->dev_info.b_rpm_dev_flush_capable) {
9799                         ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
9800                         if (ret && pm_op != UFS_SHUTDOWN_PM) {
9801                                 /*
9802                                  * If return err in suspend flow, IO will hang.
9803                                  * Trigger error handler and break suspend for
9804                                  * error recovery.
9805                                  */
9806                                 ufshcd_force_error_recovery(hba);
9807                                 ret = -EBUSY;
9808                         }
9809                         if (ret)
9810                                 goto enable_scaling;
9811                 }
9812         }
9813
9814         /*
9815          * In the case of DeepSleep, the device is expected to remain powered
9816          * with the link off, so do not check for bkops.
9817          */
9818         check_for_bkops = !ufshcd_is_ufs_dev_deepsleep(hba);
9819         ret = ufshcd_link_state_transition(hba, req_link_state, check_for_bkops);
9820         if (ret && pm_op != UFS_SHUTDOWN_PM) {
9821                 /*
9822                  * If return err in suspend flow, IO will hang.
9823                  * Trigger error handler and break suspend for
9824                  * error recovery.
9825                  */
9826                 ufshcd_force_error_recovery(hba);
9827                 ret = -EBUSY;
9828         }
9829         if (ret)
9830                 goto set_dev_active;
9831
9832 vops_suspend:
9833         /*
9834          * Call vendor specific suspend callback. As these callbacks may access
9835          * vendor specific host controller register space call them before the
9836          * host clocks are ON.
9837          */
9838         ret = ufshcd_vops_suspend(hba, pm_op, POST_CHANGE);
9839         if (ret)
9840                 goto set_link_active;
9841
9842         cancel_delayed_work_sync(&hba->ufs_rtc_update_work);
9843         goto out;
9844
9845 set_link_active:
9846         /*
9847          * Device hardware reset is required to exit DeepSleep. Also, for
9848          * DeepSleep, the link is off so host reset and restore will be done
9849          * further below.
9850          */
9851         if (ufshcd_is_ufs_dev_deepsleep(hba)) {
9852                 ufshcd_device_reset(hba);
9853                 WARN_ON(!ufshcd_is_link_off(hba));
9854         }
9855         if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
9856                 ufshcd_set_link_active(hba);
9857         else if (ufshcd_is_link_off(hba))
9858                 ufshcd_host_reset_and_restore(hba);
9859 set_dev_active:
9860         /* Can also get here needing to exit DeepSleep */
9861         if (ufshcd_is_ufs_dev_deepsleep(hba)) {
9862                 ufshcd_device_reset(hba);
9863                 ufshcd_host_reset_and_restore(hba);
9864         }
9865         if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
9866                 ufshcd_disable_auto_bkops(hba);
9867 enable_scaling:
9868         if (ufshcd_is_clkscaling_supported(hba))
9869                 ufshcd_clk_scaling_suspend(hba, false);
9870
9871         hba->dev_info.b_rpm_dev_flush_capable = false;
9872 out:
9873         if (hba->dev_info.b_rpm_dev_flush_capable) {
9874                 schedule_delayed_work(&hba->rpm_dev_flush_recheck_work,
9875                         msecs_to_jiffies(RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS));
9876         }
9877
9878         if (ret) {
9879                 ufshcd_update_evt_hist(hba, UFS_EVT_WL_SUSP_ERR, (u32)ret);
9880                 hba->clk_gating.is_suspended = false;
9881                 ufshcd_release(hba);
9882         }
9883         hba->pm_op_in_progress = false;
9884         return ret;
9885 }
9886
9887 #ifdef CONFIG_PM
9888 static int __ufshcd_wl_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
9889 {
9890         int ret;
9891         enum uic_link_state old_link_state = hba->uic_link_state;
9892
9893         hba->pm_op_in_progress = true;
9894
9895         /*
9896          * Call vendor specific resume callback. As these callbacks may access
9897          * vendor specific host controller register space call them when the
9898          * host clocks are ON.
9899          */
9900         ret = ufshcd_vops_resume(hba, pm_op);
9901         if (ret)
9902                 goto out;
9903
9904         /* For DeepSleep, the only supported option is to have the link off */
9905         WARN_ON(ufshcd_is_ufs_dev_deepsleep(hba) && !ufshcd_is_link_off(hba));
9906
9907         if (ufshcd_is_link_hibern8(hba)) {
9908                 ret = ufshcd_uic_hibern8_exit(hba);
9909                 if (!ret) {
9910                         ufshcd_set_link_active(hba);
9911                 } else {
9912                         dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
9913                                         __func__, ret);
9914                         goto vendor_suspend;
9915                 }
9916         } else if (ufshcd_is_link_off(hba)) {
9917                 /*
9918                  * A full initialization of the host and the device is
9919                  * required since the link was put to off during suspend.
9920                  * Note, in the case of DeepSleep, the device will exit
9921                  * DeepSleep due to device reset.
9922                  */
9923                 ret = ufshcd_reset_and_restore(hba);
9924                 /*
9925                  * ufshcd_reset_and_restore() should have already
9926                  * set the link state as active
9927                  */
9928                 if (ret || !ufshcd_is_link_active(hba))
9929                         goto vendor_suspend;
9930         }
9931
9932         if (!ufshcd_is_ufs_dev_active(hba)) {
9933                 ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE);
9934                 if (ret)
9935                         goto set_old_link_state;
9936                 ufshcd_set_timestamp_attr(hba);
9937                 schedule_delayed_work(&hba->ufs_rtc_update_work,
9938                                       msecs_to_jiffies(UFS_RTC_UPDATE_INTERVAL_MS));
9939         }
9940
9941         if (ufshcd_keep_autobkops_enabled_except_suspend(hba))
9942                 ufshcd_enable_auto_bkops(hba);
9943         else
9944                 /*
9945                  * If BKOPs operations are urgently needed at this moment then
9946                  * keep auto-bkops enabled or else disable it.
9947                  */
9948                 ufshcd_urgent_bkops(hba);
9949
9950         if (hba->ee_usr_mask)
9951                 ufshcd_write_ee_control(hba);
9952
9953         if (ufshcd_is_clkscaling_supported(hba))
9954                 ufshcd_clk_scaling_suspend(hba, false);
9955
9956         if (hba->dev_info.b_rpm_dev_flush_capable) {
9957                 hba->dev_info.b_rpm_dev_flush_capable = false;
9958                 cancel_delayed_work(&hba->rpm_dev_flush_recheck_work);
9959         }
9960
9961         ufshcd_configure_auto_hibern8(hba);
9962
9963         goto out;
9964
9965 set_old_link_state:
9966         ufshcd_link_state_transition(hba, old_link_state, 0);
9967 vendor_suspend:
9968         ufshcd_vops_suspend(hba, pm_op, PRE_CHANGE);
9969         ufshcd_vops_suspend(hba, pm_op, POST_CHANGE);
9970 out:
9971         if (ret)
9972                 ufshcd_update_evt_hist(hba, UFS_EVT_WL_RES_ERR, (u32)ret);
9973         hba->clk_gating.is_suspended = false;
9974         ufshcd_release(hba);
9975         hba->pm_op_in_progress = false;
9976         return ret;
9977 }
9978
9979 static int ufshcd_wl_runtime_suspend(struct device *dev)
9980 {
9981         struct scsi_device *sdev = to_scsi_device(dev);
9982         struct ufs_hba *hba;
9983         int ret;
9984         ktime_t start = ktime_get();
9985
9986         hba = shost_priv(sdev->host);
9987
9988         ret = __ufshcd_wl_suspend(hba, UFS_RUNTIME_PM);
9989         if (ret)
9990                 dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
9991
9992         trace_ufshcd_wl_runtime_suspend(dev_name(dev), ret,
9993                 ktime_to_us(ktime_sub(ktime_get(), start)),
9994                 hba->curr_dev_pwr_mode, hba->uic_link_state);
9995
9996         return ret;
9997 }
9998
9999 static int ufshcd_wl_runtime_resume(struct device *dev)
10000 {
10001         struct scsi_device *sdev = to_scsi_device(dev);
10002         struct ufs_hba *hba;
10003         int ret = 0;
10004         ktime_t start = ktime_get();
10005
10006         hba = shost_priv(sdev->host);
10007
10008         ret = __ufshcd_wl_resume(hba, UFS_RUNTIME_PM);
10009         if (ret)
10010                 dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
10011
10012         trace_ufshcd_wl_runtime_resume(dev_name(dev), ret,
10013                 ktime_to_us(ktime_sub(ktime_get(), start)),
10014                 hba->curr_dev_pwr_mode, hba->uic_link_state);
10015
10016         return ret;
10017 }
10018 #endif
10019
10020 #ifdef CONFIG_PM_SLEEP
10021 static int ufshcd_wl_suspend(struct device *dev)
10022 {
10023         struct scsi_device *sdev = to_scsi_device(dev);
10024         struct ufs_hba *hba;
10025         int ret = 0;
10026         ktime_t start = ktime_get();
10027
10028         hba = shost_priv(sdev->host);
10029         down(&hba->host_sem);
10030         hba->system_suspending = true;
10031
10032         if (pm_runtime_suspended(dev))
10033                 goto out;
10034
10035         ret = __ufshcd_wl_suspend(hba, UFS_SYSTEM_PM);
10036         if (ret) {
10037                 dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__,  ret);
10038                 up(&hba->host_sem);
10039         }
10040
10041 out:
10042         if (!ret)
10043                 hba->is_sys_suspended = true;
10044         trace_ufshcd_wl_suspend(dev_name(dev), ret,
10045                 ktime_to_us(ktime_sub(ktime_get(), start)),
10046                 hba->curr_dev_pwr_mode, hba->uic_link_state);
10047
10048         return ret;
10049 }
10050
10051 static int ufshcd_wl_resume(struct device *dev)
10052 {
10053         struct scsi_device *sdev = to_scsi_device(dev);
10054         struct ufs_hba *hba;
10055         int ret = 0;
10056         ktime_t start = ktime_get();
10057
10058         hba = shost_priv(sdev->host);
10059
10060         if (pm_runtime_suspended(dev))
10061                 goto out;
10062
10063         ret = __ufshcd_wl_resume(hba, UFS_SYSTEM_PM);
10064         if (ret)
10065                 dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
10066 out:
10067         trace_ufshcd_wl_resume(dev_name(dev), ret,
10068                 ktime_to_us(ktime_sub(ktime_get(), start)),
10069                 hba->curr_dev_pwr_mode, hba->uic_link_state);
10070         if (!ret)
10071                 hba->is_sys_suspended = false;
10072         hba->system_suspending = false;
10073         up(&hba->host_sem);
10074         return ret;
10075 }
10076 #endif
10077
10078 /**
10079  * ufshcd_suspend - helper function for suspend operations
10080  * @hba: per adapter instance
10081  *
10082  * This function will put disable irqs, turn off clocks
10083  * and set vreg and hba-vreg in lpm mode.
10084  *
10085  * Return: 0 upon success; < 0 upon failure.
10086  */
10087 static int ufshcd_suspend(struct ufs_hba *hba)
10088 {
10089         int ret;
10090
10091         if (!hba->is_powered)
10092                 return 0;
10093         /*
10094          * Disable the host irq as host controller as there won't be any
10095          * host controller transaction expected till resume.
10096          */
10097         ufshcd_disable_irq(hba);
10098         ret = ufshcd_setup_clocks(hba, false);
10099         if (ret) {
10100                 ufshcd_enable_irq(hba);
10101                 return ret;
10102         }
10103         if (ufshcd_is_clkgating_allowed(hba)) {
10104                 hba->clk_gating.state = CLKS_OFF;
10105                 trace_ufshcd_clk_gating(dev_name(hba->dev),
10106                                         hba->clk_gating.state);
10107         }
10108
10109         ufshcd_vreg_set_lpm(hba);
10110         /* Put the host controller in low power mode if possible */
10111         ufshcd_hba_vreg_set_lpm(hba);
10112         return ret;
10113 }
10114
10115 #ifdef CONFIG_PM
10116 /**
10117  * ufshcd_resume - helper function for resume operations
10118  * @hba: per adapter instance
10119  *
10120  * This function basically turns on the regulators, clocks and
10121  * irqs of the hba.
10122  *
10123  * Return: 0 for success and non-zero for failure.
10124  */
10125 static int ufshcd_resume(struct ufs_hba *hba)
10126 {
10127         int ret;
10128
10129         if (!hba->is_powered)
10130                 return 0;
10131
10132         ufshcd_hba_vreg_set_hpm(hba);
10133         ret = ufshcd_vreg_set_hpm(hba);
10134         if (ret)
10135                 goto out;
10136
10137         /* Make sure clocks are enabled before accessing controller */
10138         ret = ufshcd_setup_clocks(hba, true);
10139         if (ret)
10140                 goto disable_vreg;
10141
10142         /* enable the host irq as host controller would be active soon */
10143         ufshcd_enable_irq(hba);
10144
10145         goto out;
10146
10147 disable_vreg:
10148         ufshcd_vreg_set_lpm(hba);
10149 out:
10150         if (ret)
10151                 ufshcd_update_evt_hist(hba, UFS_EVT_RESUME_ERR, (u32)ret);
10152         return ret;
10153 }
10154 #endif /* CONFIG_PM */
10155
10156 #ifdef CONFIG_PM_SLEEP
10157 /**
10158  * ufshcd_system_suspend - system suspend callback
10159  * @dev: Device associated with the UFS controller.
10160  *
10161  * Executed before putting the system into a sleep state in which the contents
10162  * of main memory are preserved.
10163  *
10164  * Return: 0 for success and non-zero for failure.
10165  */
10166 int ufshcd_system_suspend(struct device *dev)
10167 {
10168         struct ufs_hba *hba = dev_get_drvdata(dev);
10169         int ret = 0;
10170         ktime_t start = ktime_get();
10171
10172         if (pm_runtime_suspended(hba->dev))
10173                 goto out;
10174
10175         ret = ufshcd_suspend(hba);
10176 out:
10177         trace_ufshcd_system_suspend(dev_name(hba->dev), ret,
10178                 ktime_to_us(ktime_sub(ktime_get(), start)),
10179                 hba->curr_dev_pwr_mode, hba->uic_link_state);
10180         return ret;
10181 }
10182 EXPORT_SYMBOL(ufshcd_system_suspend);
10183
10184 /**
10185  * ufshcd_system_resume - system resume callback
10186  * @dev: Device associated with the UFS controller.
10187  *
10188  * Executed after waking the system up from a sleep state in which the contents
10189  * of main memory were preserved.
10190  *
10191  * Return: 0 for success and non-zero for failure.
10192  */
10193 int ufshcd_system_resume(struct device *dev)
10194 {
10195         struct ufs_hba *hba = dev_get_drvdata(dev);
10196         ktime_t start = ktime_get();
10197         int ret = 0;
10198
10199         if (pm_runtime_suspended(hba->dev))
10200                 goto out;
10201
10202         ret = ufshcd_resume(hba);
10203
10204 out:
10205         trace_ufshcd_system_resume(dev_name(hba->dev), ret,
10206                 ktime_to_us(ktime_sub(ktime_get(), start)),
10207                 hba->curr_dev_pwr_mode, hba->uic_link_state);
10208
10209         return ret;
10210 }
10211 EXPORT_SYMBOL(ufshcd_system_resume);
10212 #endif /* CONFIG_PM_SLEEP */
10213
10214 #ifdef CONFIG_PM
10215 /**
10216  * ufshcd_runtime_suspend - runtime suspend callback
10217  * @dev: Device associated with the UFS controller.
10218  *
10219  * Check the description of ufshcd_suspend() function for more details.
10220  *
10221  * Return: 0 for success and non-zero for failure.
10222  */
10223 int ufshcd_runtime_suspend(struct device *dev)
10224 {
10225         struct ufs_hba *hba = dev_get_drvdata(dev);
10226         int ret;
10227         ktime_t start = ktime_get();
10228
10229         ret = ufshcd_suspend(hba);
10230
10231         trace_ufshcd_runtime_suspend(dev_name(hba->dev), ret,
10232                 ktime_to_us(ktime_sub(ktime_get(), start)),
10233                 hba->curr_dev_pwr_mode, hba->uic_link_state);
10234         return ret;
10235 }
10236 EXPORT_SYMBOL(ufshcd_runtime_suspend);
10237
10238 /**
10239  * ufshcd_runtime_resume - runtime resume routine
10240  * @dev: Device associated with the UFS controller.
10241  *
10242  * This function basically brings controller
10243  * to active state. Following operations are done in this function:
10244  *
10245  * 1. Turn on all the controller related clocks
10246  * 2. Turn ON VCC rail
10247  *
10248  * Return: 0 upon success; < 0 upon failure.
10249  */
10250 int ufshcd_runtime_resume(struct device *dev)
10251 {
10252         struct ufs_hba *hba = dev_get_drvdata(dev);
10253         int ret;
10254         ktime_t start = ktime_get();
10255
10256         ret = ufshcd_resume(hba);
10257
10258         trace_ufshcd_runtime_resume(dev_name(hba->dev), ret,
10259                 ktime_to_us(ktime_sub(ktime_get(), start)),
10260                 hba->curr_dev_pwr_mode, hba->uic_link_state);
10261         return ret;
10262 }
10263 EXPORT_SYMBOL(ufshcd_runtime_resume);
10264 #endif /* CONFIG_PM */
10265
10266 static void ufshcd_wl_shutdown(struct device *dev)
10267 {
10268         struct scsi_device *sdev = to_scsi_device(dev);
10269         struct ufs_hba *hba = shost_priv(sdev->host);
10270
10271         down(&hba->host_sem);
10272         hba->shutting_down = true;
10273         up(&hba->host_sem);
10274
10275         /* Turn on everything while shutting down */
10276         ufshcd_rpm_get_sync(hba);
10277         scsi_device_quiesce(sdev);
10278         shost_for_each_device(sdev, hba->host) {
10279                 if (sdev == hba->ufs_device_wlun)
10280                         continue;
10281                 scsi_device_quiesce(sdev);
10282         }
10283         __ufshcd_wl_suspend(hba, UFS_SHUTDOWN_PM);
10284
10285         /*
10286          * Next, turn off the UFS controller and the UFS regulators. Disable
10287          * clocks.
10288          */
10289         if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
10290                 ufshcd_suspend(hba);
10291
10292         hba->is_powered = false;
10293 }
10294
10295 /**
10296  * ufshcd_remove - de-allocate SCSI host and host memory space
10297  *              data structure memory
10298  * @hba: per adapter instance
10299  */
10300 void ufshcd_remove(struct ufs_hba *hba)
10301 {
10302         if (hba->ufs_device_wlun)
10303                 ufshcd_rpm_get_sync(hba);
10304         ufs_hwmon_remove(hba);
10305         ufs_bsg_remove(hba);
10306         ufs_sysfs_remove_nodes(hba->dev);
10307         blk_mq_destroy_queue(hba->tmf_queue);
10308         blk_put_queue(hba->tmf_queue);
10309         blk_mq_free_tag_set(&hba->tmf_tag_set);
10310         scsi_remove_host(hba->host);
10311         /* disable interrupts */
10312         ufshcd_disable_intr(hba, hba->intr_mask);
10313         ufshcd_hba_stop(hba);
10314         ufshcd_hba_exit(hba);
10315 }
10316 EXPORT_SYMBOL_GPL(ufshcd_remove);
10317
10318 #ifdef CONFIG_PM_SLEEP
10319 int ufshcd_system_freeze(struct device *dev)
10320 {
10321
10322         return ufshcd_system_suspend(dev);
10323
10324 }
10325 EXPORT_SYMBOL_GPL(ufshcd_system_freeze);
10326
10327 int ufshcd_system_restore(struct device *dev)
10328 {
10329
10330         struct ufs_hba *hba = dev_get_drvdata(dev);
10331         int ret;
10332
10333         ret = ufshcd_system_resume(dev);
10334         if (ret)
10335                 return ret;
10336
10337         /* Configure UTRL and UTMRL base address registers */
10338         ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
10339                         REG_UTP_TRANSFER_REQ_LIST_BASE_L);
10340         ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
10341                         REG_UTP_TRANSFER_REQ_LIST_BASE_H);
10342         ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
10343                         REG_UTP_TASK_REQ_LIST_BASE_L);
10344         ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
10345                         REG_UTP_TASK_REQ_LIST_BASE_H);
10346         /*
10347          * Make sure that UTRL and UTMRL base address registers
10348          * are updated with the latest queue addresses. Only after
10349          * updating these addresses, we can queue the new commands.
10350          */
10351         mb();
10352
10353         /* Resuming from hibernate, assume that link was OFF */
10354         ufshcd_set_link_off(hba);
10355
10356         return 0;
10357
10358 }
10359 EXPORT_SYMBOL_GPL(ufshcd_system_restore);
10360
10361 int ufshcd_system_thaw(struct device *dev)
10362 {
10363         return ufshcd_system_resume(dev);
10364 }
10365 EXPORT_SYMBOL_GPL(ufshcd_system_thaw);
10366 #endif /* CONFIG_PM_SLEEP  */
10367
10368 /**
10369  * ufshcd_dealloc_host - deallocate Host Bus Adapter (HBA)
10370  * @hba: pointer to Host Bus Adapter (HBA)
10371  */
10372 void ufshcd_dealloc_host(struct ufs_hba *hba)
10373 {
10374         scsi_host_put(hba->host);
10375 }
10376 EXPORT_SYMBOL_GPL(ufshcd_dealloc_host);
10377
10378 /**
10379  * ufshcd_set_dma_mask - Set dma mask based on the controller
10380  *                       addressing capability
10381  * @hba: per adapter instance
10382  *
10383  * Return: 0 for success, non-zero for failure.
10384  */
10385 static int ufshcd_set_dma_mask(struct ufs_hba *hba)
10386 {
10387         if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) {
10388                 if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64)))
10389                         return 0;
10390         }
10391         return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32));
10392 }
10393
10394 /**
10395  * ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
10396  * @dev: pointer to device handle
10397  * @hba_handle: driver private handle
10398  *
10399  * Return: 0 on success, non-zero value on failure.
10400  */
10401 int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
10402 {
10403         struct Scsi_Host *host;
10404         struct ufs_hba *hba;
10405         int err = 0;
10406
10407         if (!dev) {
10408                 dev_err(dev,
10409                 "Invalid memory reference for dev is NULL\n");
10410                 err = -ENODEV;
10411                 goto out_error;
10412         }
10413
10414         host = scsi_host_alloc(&ufshcd_driver_template,
10415                                 sizeof(struct ufs_hba));
10416         if (!host) {
10417                 dev_err(dev, "scsi_host_alloc failed\n");
10418                 err = -ENOMEM;
10419                 goto out_error;
10420         }
10421         host->nr_maps = HCTX_TYPE_POLL + 1;
10422         hba = shost_priv(host);
10423         hba->host = host;
10424         hba->dev = dev;
10425         hba->dev_ref_clk_freq = REF_CLK_FREQ_INVAL;
10426         hba->nop_out_timeout = NOP_OUT_TIMEOUT;
10427         ufshcd_set_sg_entry_size(hba, sizeof(struct ufshcd_sg_entry));
10428         INIT_LIST_HEAD(&hba->clk_list_head);
10429         spin_lock_init(&hba->outstanding_lock);
10430
10431         *hba_handle = hba;
10432
10433 out_error:
10434         return err;
10435 }
10436 EXPORT_SYMBOL(ufshcd_alloc_host);
10437
10438 /* This function exists because blk_mq_alloc_tag_set() requires this. */
10439 static blk_status_t ufshcd_queue_tmf(struct blk_mq_hw_ctx *hctx,
10440                                      const struct blk_mq_queue_data *qd)
10441 {
10442         WARN_ON_ONCE(true);
10443         return BLK_STS_NOTSUPP;
10444 }
10445
10446 static const struct blk_mq_ops ufshcd_tmf_ops = {
10447         .queue_rq = ufshcd_queue_tmf,
10448 };
10449
10450 /**
10451  * ufshcd_init - Driver initialization routine
10452  * @hba: per-adapter instance
10453  * @mmio_base: base register address
10454  * @irq: Interrupt line of device
10455  *
10456  * Return: 0 on success, non-zero value on failure.
10457  */
10458 int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
10459 {
10460         int err;
10461         struct Scsi_Host *host = hba->host;
10462         struct device *dev = hba->dev;
10463         char eh_wq_name[sizeof("ufs_eh_wq_00")];
10464
10465         /*
10466          * dev_set_drvdata() must be called before any callbacks are registered
10467          * that use dev_get_drvdata() (frequency scaling, clock scaling, hwmon,
10468          * sysfs).
10469          */
10470         dev_set_drvdata(dev, hba);
10471
10472         if (!mmio_base) {
10473                 dev_err(hba->dev,
10474                 "Invalid memory reference for mmio_base is NULL\n");
10475                 err = -ENODEV;
10476                 goto out_error;
10477         }
10478
10479         hba->mmio_base = mmio_base;
10480         hba->irq = irq;
10481         hba->vps = &ufs_hba_vps;
10482
10483         err = ufshcd_hba_init(hba);
10484         if (err)
10485                 goto out_error;
10486
10487         /* Read capabilities registers */
10488         err = ufshcd_hba_capabilities(hba);
10489         if (err)
10490                 goto out_disable;
10491
10492         /* Get UFS version supported by the controller */
10493         hba->ufs_version = ufshcd_get_ufs_version(hba);
10494
10495         /* Get Interrupt bit mask per version */
10496         hba->intr_mask = ufshcd_get_intr_mask(hba);
10497
10498         err = ufshcd_set_dma_mask(hba);
10499         if (err) {
10500                 dev_err(hba->dev, "set dma mask failed\n");
10501                 goto out_disable;
10502         }
10503
10504         /* Allocate memory for host memory space */
10505         err = ufshcd_memory_alloc(hba);
10506         if (err) {
10507                 dev_err(hba->dev, "Memory allocation failed\n");
10508                 goto out_disable;
10509         }
10510
10511         /* Configure LRB */
10512         ufshcd_host_memory_configure(hba);
10513
10514         host->can_queue = hba->nutrs - UFSHCD_NUM_RESERVED;
10515         host->cmd_per_lun = hba->nutrs - UFSHCD_NUM_RESERVED;
10516         host->max_id = UFSHCD_MAX_ID;
10517         host->max_lun = UFS_MAX_LUNS;
10518         host->max_channel = UFSHCD_MAX_CHANNEL;
10519         host->unique_id = host->host_no;
10520         host->max_cmd_len = UFS_CDB_SIZE;
10521         host->queuecommand_may_block = !!(hba->caps & UFSHCD_CAP_CLK_GATING);
10522
10523         hba->max_pwr_info.is_valid = false;
10524
10525         /* Initialize work queues */
10526         snprintf(eh_wq_name, sizeof(eh_wq_name), "ufs_eh_wq_%d",
10527                  hba->host->host_no);
10528         hba->eh_wq = create_singlethread_workqueue(eh_wq_name);
10529         if (!hba->eh_wq) {
10530                 dev_err(hba->dev, "%s: failed to create eh workqueue\n",
10531                         __func__);
10532                 err = -ENOMEM;
10533                 goto out_disable;
10534         }
10535         INIT_WORK(&hba->eh_work, ufshcd_err_handler);
10536         INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
10537
10538         sema_init(&hba->host_sem, 1);
10539
10540         /* Initialize UIC command mutex */
10541         mutex_init(&hba->uic_cmd_mutex);
10542
10543         /* Initialize mutex for device management commands */
10544         mutex_init(&hba->dev_cmd.lock);
10545
10546         /* Initialize mutex for exception event control */
10547         mutex_init(&hba->ee_ctrl_mutex);
10548
10549         mutex_init(&hba->wb_mutex);
10550         init_rwsem(&hba->clk_scaling_lock);
10551
10552         ufshcd_init_clk_gating(hba);
10553
10554         ufshcd_init_clk_scaling(hba);
10555
10556         /*
10557          * In order to avoid any spurious interrupt immediately after
10558          * registering UFS controller interrupt handler, clear any pending UFS
10559          * interrupt status and disable all the UFS interrupts.
10560          */
10561         ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS),
10562                       REG_INTERRUPT_STATUS);
10563         ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE);
10564         /*
10565          * Make sure that UFS interrupts are disabled and any pending interrupt
10566          * status is cleared before registering UFS interrupt handler.
10567          */
10568         mb();
10569
10570         /* IRQ registration */
10571         err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
10572         if (err) {
10573                 dev_err(hba->dev, "request irq failed\n");
10574                 goto out_disable;
10575         } else {
10576                 hba->is_irq_enabled = true;
10577         }
10578
10579         if (!is_mcq_supported(hba)) {
10580                 err = scsi_add_host(host, hba->dev);
10581                 if (err) {
10582                         dev_err(hba->dev, "scsi_add_host failed\n");
10583                         goto out_disable;
10584                 }
10585         }
10586
10587         hba->tmf_tag_set = (struct blk_mq_tag_set) {
10588                 .nr_hw_queues   = 1,
10589                 .queue_depth    = hba->nutmrs,
10590                 .ops            = &ufshcd_tmf_ops,
10591                 .flags          = BLK_MQ_F_NO_SCHED,
10592         };
10593         err = blk_mq_alloc_tag_set(&hba->tmf_tag_set);
10594         if (err < 0)
10595                 goto out_remove_scsi_host;
10596         hba->tmf_queue = blk_mq_alloc_queue(&hba->tmf_tag_set, NULL, NULL);
10597         if (IS_ERR(hba->tmf_queue)) {
10598                 err = PTR_ERR(hba->tmf_queue);
10599                 goto free_tmf_tag_set;
10600         }
10601         hba->tmf_rqs = devm_kcalloc(hba->dev, hba->nutmrs,
10602                                     sizeof(*hba->tmf_rqs), GFP_KERNEL);
10603         if (!hba->tmf_rqs) {
10604                 err = -ENOMEM;
10605                 goto free_tmf_queue;
10606         }
10607
10608         /* Reset the attached device */
10609         ufshcd_device_reset(hba);
10610
10611         ufshcd_init_crypto(hba);
10612
10613         /* Host controller enable */
10614         err = ufshcd_hba_enable(hba);
10615         if (err) {
10616                 dev_err(hba->dev, "Host controller enable failed\n");
10617                 ufshcd_print_evt_hist(hba);
10618                 ufshcd_print_host_state(hba);
10619                 goto free_tmf_queue;
10620         }
10621
10622         /*
10623          * Set the default power management level for runtime and system PM.
10624          * Default power saving mode is to keep UFS link in Hibern8 state
10625          * and UFS device in sleep state.
10626          */
10627         hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
10628                                                 UFS_SLEEP_PWR_MODE,
10629                                                 UIC_LINK_HIBERN8_STATE);
10630         hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
10631                                                 UFS_SLEEP_PWR_MODE,
10632                                                 UIC_LINK_HIBERN8_STATE);
10633
10634         INIT_DELAYED_WORK(&hba->rpm_dev_flush_recheck_work, ufshcd_rpm_dev_flush_recheck_work);
10635         INIT_DELAYED_WORK(&hba->ufs_rtc_update_work, ufshcd_rtc_work);
10636
10637         /* Set the default auto-hiberate idle timer value to 150 ms */
10638         if (ufshcd_is_auto_hibern8_supported(hba) && !hba->ahit) {
10639                 hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 150) |
10640                             FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3);
10641         }
10642
10643         /* Hold auto suspend until async scan completes */
10644         pm_runtime_get_sync(dev);
10645         atomic_set(&hba->scsi_block_reqs_cnt, 0);
10646         /*
10647          * We are assuming that device wasn't put in sleep/power-down
10648          * state exclusively during the boot stage before kernel.
10649          * This assumption helps avoid doing link startup twice during
10650          * ufshcd_probe_hba().
10651          */
10652         ufshcd_set_ufs_dev_active(hba);
10653
10654         async_schedule(ufshcd_async_scan, hba);
10655         ufs_sysfs_add_nodes(hba->dev);
10656
10657         device_enable_async_suspend(dev);
10658         return 0;
10659
10660 free_tmf_queue:
10661         blk_mq_destroy_queue(hba->tmf_queue);
10662         blk_put_queue(hba->tmf_queue);
10663 free_tmf_tag_set:
10664         blk_mq_free_tag_set(&hba->tmf_tag_set);
10665 out_remove_scsi_host:
10666         scsi_remove_host(hba->host);
10667 out_disable:
10668         hba->is_irq_enabled = false;
10669         ufshcd_hba_exit(hba);
10670 out_error:
10671         return err;
10672 }
10673 EXPORT_SYMBOL_GPL(ufshcd_init);
10674
10675 void ufshcd_resume_complete(struct device *dev)
10676 {
10677         struct ufs_hba *hba = dev_get_drvdata(dev);
10678
10679         if (hba->complete_put) {
10680                 ufshcd_rpm_put(hba);
10681                 hba->complete_put = false;
10682         }
10683 }
10684 EXPORT_SYMBOL_GPL(ufshcd_resume_complete);
10685
10686 static bool ufshcd_rpm_ok_for_spm(struct ufs_hba *hba)
10687 {
10688         struct device *dev = &hba->ufs_device_wlun->sdev_gendev;
10689         enum ufs_dev_pwr_mode dev_pwr_mode;
10690         enum uic_link_state link_state;
10691         unsigned long flags;
10692         bool res;
10693
10694         spin_lock_irqsave(&dev->power.lock, flags);
10695         dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl);
10696         link_state = ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl);
10697         res = pm_runtime_suspended(dev) &&
10698               hba->curr_dev_pwr_mode == dev_pwr_mode &&
10699               hba->uic_link_state == link_state &&
10700               !hba->dev_info.b_rpm_dev_flush_capable;
10701         spin_unlock_irqrestore(&dev->power.lock, flags);
10702
10703         return res;
10704 }
10705
10706 int __ufshcd_suspend_prepare(struct device *dev, bool rpm_ok_for_spm)
10707 {
10708         struct ufs_hba *hba = dev_get_drvdata(dev);
10709         int ret;
10710
10711         /*
10712          * SCSI assumes that runtime-pm and system-pm for scsi drivers
10713          * are same. And it doesn't wake up the device for system-suspend
10714          * if it's runtime suspended. But ufs doesn't follow that.
10715          * Refer ufshcd_resume_complete()
10716          */
10717         if (hba->ufs_device_wlun) {
10718                 /* Prevent runtime suspend */
10719                 ufshcd_rpm_get_noresume(hba);
10720                 /*
10721                  * Check if already runtime suspended in same state as system
10722                  * suspend would be.
10723                  */
10724                 if (!rpm_ok_for_spm || !ufshcd_rpm_ok_for_spm(hba)) {
10725                         /* RPM state is not ok for SPM, so runtime resume */
10726                         ret = ufshcd_rpm_resume(hba);
10727                         if (ret < 0 && ret != -EACCES) {
10728                                 ufshcd_rpm_put(hba);
10729                                 return ret;
10730                         }
10731                 }
10732                 hba->complete_put = true;
10733         }
10734         return 0;
10735 }
10736 EXPORT_SYMBOL_GPL(__ufshcd_suspend_prepare);
10737
10738 int ufshcd_suspend_prepare(struct device *dev)
10739 {
10740         return __ufshcd_suspend_prepare(dev, true);
10741 }
10742 EXPORT_SYMBOL_GPL(ufshcd_suspend_prepare);
10743
10744 #ifdef CONFIG_PM_SLEEP
10745 static int ufshcd_wl_poweroff(struct device *dev)
10746 {
10747         struct scsi_device *sdev = to_scsi_device(dev);
10748         struct ufs_hba *hba = shost_priv(sdev->host);
10749
10750         __ufshcd_wl_suspend(hba, UFS_SHUTDOWN_PM);
10751         return 0;
10752 }
10753 #endif
10754
10755 static int ufshcd_wl_probe(struct device *dev)
10756 {
10757         struct scsi_device *sdev = to_scsi_device(dev);
10758
10759         if (!is_device_wlun(sdev))
10760                 return -ENODEV;
10761
10762         blk_pm_runtime_init(sdev->request_queue, dev);
10763         pm_runtime_set_autosuspend_delay(dev, 0);
10764         pm_runtime_allow(dev);
10765
10766         return  0;
10767 }
10768
10769 static int ufshcd_wl_remove(struct device *dev)
10770 {
10771         pm_runtime_forbid(dev);
10772         return 0;
10773 }
10774
10775 static const struct dev_pm_ops ufshcd_wl_pm_ops = {
10776 #ifdef CONFIG_PM_SLEEP
10777         .suspend = ufshcd_wl_suspend,
10778         .resume = ufshcd_wl_resume,
10779         .freeze = ufshcd_wl_suspend,
10780         .thaw = ufshcd_wl_resume,
10781         .poweroff = ufshcd_wl_poweroff,
10782         .restore = ufshcd_wl_resume,
10783 #endif
10784         SET_RUNTIME_PM_OPS(ufshcd_wl_runtime_suspend, ufshcd_wl_runtime_resume, NULL)
10785 };
10786
10787 static void ufshcd_check_header_layout(void)
10788 {
10789         /*
10790          * gcc compilers before version 10 cannot do constant-folding for
10791          * sub-byte bitfields. Hence skip the layout checks for gcc 9 and
10792          * before.
10793          */
10794         if (IS_ENABLED(CONFIG_CC_IS_GCC) && CONFIG_GCC_VERSION < 100000)
10795                 return;
10796
10797         BUILD_BUG_ON(((u8 *)&(struct request_desc_header){
10798                                 .cci = 3})[0] != 3);
10799
10800         BUILD_BUG_ON(((u8 *)&(struct request_desc_header){
10801                                 .ehs_length = 2})[1] != 2);
10802
10803         BUILD_BUG_ON(((u8 *)&(struct request_desc_header){
10804                                 .enable_crypto = 1})[2]
10805                      != 0x80);
10806
10807         BUILD_BUG_ON((((u8 *)&(struct request_desc_header){
10808                                         .command_type = 5,
10809                                         .data_direction = 3,
10810                                         .interrupt = 1,
10811                                 })[3]) != ((5 << 4) | (3 << 1) | 1));
10812
10813         BUILD_BUG_ON(((__le32 *)&(struct request_desc_header){
10814                                 .dunl = cpu_to_le32(0xdeadbeef)})[1] !=
10815                 cpu_to_le32(0xdeadbeef));
10816
10817         BUILD_BUG_ON(((u8 *)&(struct request_desc_header){
10818                                 .ocs = 4})[8] != 4);
10819
10820         BUILD_BUG_ON(((u8 *)&(struct request_desc_header){
10821                                 .cds = 5})[9] != 5);
10822
10823         BUILD_BUG_ON(((__le32 *)&(struct request_desc_header){
10824                                 .dunu = cpu_to_le32(0xbadcafe)})[3] !=
10825                 cpu_to_le32(0xbadcafe));
10826
10827         BUILD_BUG_ON(((u8 *)&(struct utp_upiu_header){
10828                              .iid = 0xf })[4] != 0xf0);
10829
10830         BUILD_BUG_ON(((u8 *)&(struct utp_upiu_header){
10831                              .command_set_type = 0xf })[4] != 0xf);
10832 }
10833
10834 /*
10835  * ufs_dev_wlun_template - describes ufs device wlun
10836  * ufs-device wlun - used to send pm commands
10837  * All luns are consumers of ufs-device wlun.
10838  *
10839  * Currently, no sd driver is present for wluns.
10840  * Hence the no specific pm operations are performed.
10841  * With ufs design, SSU should be sent to ufs-device wlun.
10842  * Hence register a scsi driver for ufs wluns only.
10843  */
10844 static struct scsi_driver ufs_dev_wlun_template = {
10845         .gendrv = {
10846                 .name = "ufs_device_wlun",
10847                 .owner = THIS_MODULE,
10848                 .probe = ufshcd_wl_probe,
10849                 .remove = ufshcd_wl_remove,
10850                 .pm = &ufshcd_wl_pm_ops,
10851                 .shutdown = ufshcd_wl_shutdown,
10852         },
10853 };
10854
10855 static int __init ufshcd_core_init(void)
10856 {
10857         int ret;
10858
10859         ufshcd_check_header_layout();
10860
10861         ufs_debugfs_init();
10862
10863         ret = scsi_register_driver(&ufs_dev_wlun_template.gendrv);
10864         if (ret)
10865                 ufs_debugfs_exit();
10866         return ret;
10867 }
10868
10869 static void __exit ufshcd_core_exit(void)
10870 {
10871         ufs_debugfs_exit();
10872         scsi_unregister_driver(&ufs_dev_wlun_template.gendrv);
10873 }
10874
10875 module_init(ufshcd_core_init);
10876 module_exit(ufshcd_core_exit);
10877
10878 MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
10879 MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
10880 MODULE_DESCRIPTION("Generic UFS host controller driver Core");
10881 MODULE_SOFTDEP("pre: governor_simpleondemand");
10882 MODULE_LICENSE("GPL");