97158344b862874963d2e800690badfd738e4a86
[sfrench/cifs-2.6.git] / drivers / mmc / host / sdhci.c
1 /*
2  *  linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver
3  *
4  *  Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or (at
9  * your option) any later version.
10  *
11  * Thanks to the following companies for their support:
12  *
13  *     - JMicron (hardware and technical support)
14  */
15
16 #include <linux/delay.h>
17 #include <linux/ktime.h>
18 #include <linux/highmem.h>
19 #include <linux/io.h>
20 #include <linux/module.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/slab.h>
23 #include <linux/scatterlist.h>
24 #include <linux/sizes.h>
25 #include <linux/swiotlb.h>
26 #include <linux/regulator/consumer.h>
27 #include <linux/pm_runtime.h>
28 #include <linux/of.h>
29
30 #include <linux/leds.h>
31
32 #include <linux/mmc/mmc.h>
33 #include <linux/mmc/host.h>
34 #include <linux/mmc/card.h>
35 #include <linux/mmc/sdio.h>
36 #include <linux/mmc/slot-gpio.h>
37
38 #include "sdhci.h"
39
40 #define DRIVER_NAME "sdhci"
41
42 #define DBG(f, x...) \
43         pr_debug("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
44
45 #define SDHCI_DUMP(f, x...) \
46         pr_err("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
47
48 #define MAX_TUNING_LOOP 40
49
50 static unsigned int debug_quirks = 0;
51 static unsigned int debug_quirks2;
52
53 static void sdhci_finish_data(struct sdhci_host *);
54
55 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
56
57 void sdhci_dumpregs(struct sdhci_host *host)
58 {
59         SDHCI_DUMP("============ SDHCI REGISTER DUMP ===========\n");
60
61         SDHCI_DUMP("Sys addr:  0x%08x | Version:  0x%08x\n",
62                    sdhci_readl(host, SDHCI_DMA_ADDRESS),
63                    sdhci_readw(host, SDHCI_HOST_VERSION));
64         SDHCI_DUMP("Blk size:  0x%08x | Blk cnt:  0x%08x\n",
65                    sdhci_readw(host, SDHCI_BLOCK_SIZE),
66                    sdhci_readw(host, SDHCI_BLOCK_COUNT));
67         SDHCI_DUMP("Argument:  0x%08x | Trn mode: 0x%08x\n",
68                    sdhci_readl(host, SDHCI_ARGUMENT),
69                    sdhci_readw(host, SDHCI_TRANSFER_MODE));
70         SDHCI_DUMP("Present:   0x%08x | Host ctl: 0x%08x\n",
71                    sdhci_readl(host, SDHCI_PRESENT_STATE),
72                    sdhci_readb(host, SDHCI_HOST_CONTROL));
73         SDHCI_DUMP("Power:     0x%08x | Blk gap:  0x%08x\n",
74                    sdhci_readb(host, SDHCI_POWER_CONTROL),
75                    sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL));
76         SDHCI_DUMP("Wake-up:   0x%08x | Clock:    0x%08x\n",
77                    sdhci_readb(host, SDHCI_WAKE_UP_CONTROL),
78                    sdhci_readw(host, SDHCI_CLOCK_CONTROL));
79         SDHCI_DUMP("Timeout:   0x%08x | Int stat: 0x%08x\n",
80                    sdhci_readb(host, SDHCI_TIMEOUT_CONTROL),
81                    sdhci_readl(host, SDHCI_INT_STATUS));
82         SDHCI_DUMP("Int enab:  0x%08x | Sig enab: 0x%08x\n",
83                    sdhci_readl(host, SDHCI_INT_ENABLE),
84                    sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
85         SDHCI_DUMP("ACmd stat: 0x%08x | Slot int: 0x%08x\n",
86                    sdhci_readw(host, SDHCI_AUTO_CMD_STATUS),
87                    sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
88         SDHCI_DUMP("Caps:      0x%08x | Caps_1:   0x%08x\n",
89                    sdhci_readl(host, SDHCI_CAPABILITIES),
90                    sdhci_readl(host, SDHCI_CAPABILITIES_1));
91         SDHCI_DUMP("Cmd:       0x%08x | Max curr: 0x%08x\n",
92                    sdhci_readw(host, SDHCI_COMMAND),
93                    sdhci_readl(host, SDHCI_MAX_CURRENT));
94         SDHCI_DUMP("Resp[0]:   0x%08x | Resp[1]:  0x%08x\n",
95                    sdhci_readl(host, SDHCI_RESPONSE),
96                    sdhci_readl(host, SDHCI_RESPONSE + 4));
97         SDHCI_DUMP("Resp[2]:   0x%08x | Resp[3]:  0x%08x\n",
98                    sdhci_readl(host, SDHCI_RESPONSE + 8),
99                    sdhci_readl(host, SDHCI_RESPONSE + 12));
100         SDHCI_DUMP("Host ctl2: 0x%08x\n",
101                    sdhci_readw(host, SDHCI_HOST_CONTROL2));
102
103         if (host->flags & SDHCI_USE_ADMA) {
104                 if (host->flags & SDHCI_USE_64_BIT_DMA) {
105                         SDHCI_DUMP("ADMA Err:  0x%08x | ADMA Ptr: 0x%08x%08x\n",
106                                    sdhci_readl(host, SDHCI_ADMA_ERROR),
107                                    sdhci_readl(host, SDHCI_ADMA_ADDRESS_HI),
108                                    sdhci_readl(host, SDHCI_ADMA_ADDRESS));
109                 } else {
110                         SDHCI_DUMP("ADMA Err:  0x%08x | ADMA Ptr: 0x%08x\n",
111                                    sdhci_readl(host, SDHCI_ADMA_ERROR),
112                                    sdhci_readl(host, SDHCI_ADMA_ADDRESS));
113                 }
114         }
115
116         SDHCI_DUMP("============================================\n");
117 }
118 EXPORT_SYMBOL_GPL(sdhci_dumpregs);
119
120 /*****************************************************************************\
121  *                                                                           *
122  * Low level functions                                                       *
123  *                                                                           *
124 \*****************************************************************************/
125
126 static void sdhci_do_enable_v4_mode(struct sdhci_host *host)
127 {
128         u16 ctrl2;
129
130         ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
131         if (ctrl2 & SDHCI_CTRL_V4_MODE)
132                 return;
133
134         ctrl2 |= SDHCI_CTRL_V4_MODE;
135         sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
136 }
137
138 /*
139  * This can be called before sdhci_add_host() by Vendor's host controller
140  * driver to enable v4 mode if supported.
141  */
142 void sdhci_enable_v4_mode(struct sdhci_host *host)
143 {
144         host->v4_mode = true;
145         sdhci_do_enable_v4_mode(host);
146 }
147 EXPORT_SYMBOL_GPL(sdhci_enable_v4_mode);
148
149 static inline bool sdhci_data_line_cmd(struct mmc_command *cmd)
150 {
151         return cmd->data || cmd->flags & MMC_RSP_BUSY;
152 }
153
154 static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
155 {
156         u32 present;
157
158         if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
159             !mmc_card_is_removable(host->mmc))
160                 return;
161
162         if (enable) {
163                 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
164                                       SDHCI_CARD_PRESENT;
165
166                 host->ier |= present ? SDHCI_INT_CARD_REMOVE :
167                                        SDHCI_INT_CARD_INSERT;
168         } else {
169                 host->ier &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
170         }
171
172         sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
173         sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
174 }
175
176 static void sdhci_enable_card_detection(struct sdhci_host *host)
177 {
178         sdhci_set_card_detection(host, true);
179 }
180
181 static void sdhci_disable_card_detection(struct sdhci_host *host)
182 {
183         sdhci_set_card_detection(host, false);
184 }
185
186 static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
187 {
188         if (host->bus_on)
189                 return;
190         host->bus_on = true;
191         pm_runtime_get_noresume(host->mmc->parent);
192 }
193
194 static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
195 {
196         if (!host->bus_on)
197                 return;
198         host->bus_on = false;
199         pm_runtime_put_noidle(host->mmc->parent);
200 }
201
202 void sdhci_reset(struct sdhci_host *host, u8 mask)
203 {
204         ktime_t timeout;
205
206         sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
207
208         if (mask & SDHCI_RESET_ALL) {
209                 host->clock = 0;
210                 /* Reset-all turns off SD Bus Power */
211                 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
212                         sdhci_runtime_pm_bus_off(host);
213         }
214
215         /* Wait max 100 ms */
216         timeout = ktime_add_ms(ktime_get(), 100);
217
218         /* hw clears the bit when it's done */
219         while (1) {
220                 bool timedout = ktime_after(ktime_get(), timeout);
221
222                 if (!(sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask))
223                         break;
224                 if (timedout) {
225                         pr_err("%s: Reset 0x%x never completed.\n",
226                                 mmc_hostname(host->mmc), (int)mask);
227                         sdhci_dumpregs(host);
228                         return;
229                 }
230                 udelay(10);
231         }
232 }
233 EXPORT_SYMBOL_GPL(sdhci_reset);
234
235 static void sdhci_do_reset(struct sdhci_host *host, u8 mask)
236 {
237         if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
238                 struct mmc_host *mmc = host->mmc;
239
240                 if (!mmc->ops->get_cd(mmc))
241                         return;
242         }
243
244         host->ops->reset(host, mask);
245
246         if (mask & SDHCI_RESET_ALL) {
247                 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
248                         if (host->ops->enable_dma)
249                                 host->ops->enable_dma(host);
250                 }
251
252                 /* Resetting the controller clears many */
253                 host->preset_enabled = false;
254         }
255 }
256
257 static void sdhci_set_default_irqs(struct sdhci_host *host)
258 {
259         host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
260                     SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT |
261                     SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC |
262                     SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END |
263                     SDHCI_INT_RESPONSE;
264
265         if (host->tuning_mode == SDHCI_TUNING_MODE_2 ||
266             host->tuning_mode == SDHCI_TUNING_MODE_3)
267                 host->ier |= SDHCI_INT_RETUNE;
268
269         sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
270         sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
271 }
272
273 static void sdhci_config_dma(struct sdhci_host *host)
274 {
275         u8 ctrl;
276         u16 ctrl2;
277
278         if (host->version < SDHCI_SPEC_200)
279                 return;
280
281         ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
282
283         /*
284          * Always adjust the DMA selection as some controllers
285          * (e.g. JMicron) can't do PIO properly when the selection
286          * is ADMA.
287          */
288         ctrl &= ~SDHCI_CTRL_DMA_MASK;
289         if (!(host->flags & SDHCI_REQ_USE_DMA))
290                 goto out;
291
292         /* Note if DMA Select is zero then SDMA is selected */
293         if (host->flags & SDHCI_USE_ADMA)
294                 ctrl |= SDHCI_CTRL_ADMA32;
295
296         if (host->flags & SDHCI_USE_64_BIT_DMA) {
297                 /*
298                  * If v4 mode, all supported DMA can be 64-bit addressing if
299                  * controller supports 64-bit system address, otherwise only
300                  * ADMA can support 64-bit addressing.
301                  */
302                 if (host->v4_mode) {
303                         ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
304                         ctrl2 |= SDHCI_CTRL_64BIT_ADDR;
305                         sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
306                 } else if (host->flags & SDHCI_USE_ADMA) {
307                         /*
308                          * Don't need to undo SDHCI_CTRL_ADMA32 in order to
309                          * set SDHCI_CTRL_ADMA64.
310                          */
311                         ctrl |= SDHCI_CTRL_ADMA64;
312                 }
313         }
314
315 out:
316         sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
317 }
318
319 static void sdhci_init(struct sdhci_host *host, int soft)
320 {
321         struct mmc_host *mmc = host->mmc;
322
323         if (soft)
324                 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
325         else
326                 sdhci_do_reset(host, SDHCI_RESET_ALL);
327
328         if (host->v4_mode)
329                 sdhci_do_enable_v4_mode(host);
330
331         sdhci_set_default_irqs(host);
332
333         host->cqe_on = false;
334
335         if (soft) {
336                 /* force clock reconfiguration */
337                 host->clock = 0;
338                 mmc->ops->set_ios(mmc, &mmc->ios);
339         }
340 }
341
342 static void sdhci_reinit(struct sdhci_host *host)
343 {
344         sdhci_init(host, 0);
345         sdhci_enable_card_detection(host);
346 }
347
348 static void __sdhci_led_activate(struct sdhci_host *host)
349 {
350         u8 ctrl;
351
352         if (host->quirks & SDHCI_QUIRK_NO_LED)
353                 return;
354
355         ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
356         ctrl |= SDHCI_CTRL_LED;
357         sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
358 }
359
360 static void __sdhci_led_deactivate(struct sdhci_host *host)
361 {
362         u8 ctrl;
363
364         if (host->quirks & SDHCI_QUIRK_NO_LED)
365                 return;
366
367         ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
368         ctrl &= ~SDHCI_CTRL_LED;
369         sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
370 }
371
372 #if IS_REACHABLE(CONFIG_LEDS_CLASS)
373 static void sdhci_led_control(struct led_classdev *led,
374                               enum led_brightness brightness)
375 {
376         struct sdhci_host *host = container_of(led, struct sdhci_host, led);
377         unsigned long flags;
378
379         spin_lock_irqsave(&host->lock, flags);
380
381         if (host->runtime_suspended)
382                 goto out;
383
384         if (brightness == LED_OFF)
385                 __sdhci_led_deactivate(host);
386         else
387                 __sdhci_led_activate(host);
388 out:
389         spin_unlock_irqrestore(&host->lock, flags);
390 }
391
392 static int sdhci_led_register(struct sdhci_host *host)
393 {
394         struct mmc_host *mmc = host->mmc;
395
396         if (host->quirks & SDHCI_QUIRK_NO_LED)
397                 return 0;
398
399         snprintf(host->led_name, sizeof(host->led_name),
400                  "%s::", mmc_hostname(mmc));
401
402         host->led.name = host->led_name;
403         host->led.brightness = LED_OFF;
404         host->led.default_trigger = mmc_hostname(mmc);
405         host->led.brightness_set = sdhci_led_control;
406
407         return led_classdev_register(mmc_dev(mmc), &host->led);
408 }
409
410 static void sdhci_led_unregister(struct sdhci_host *host)
411 {
412         if (host->quirks & SDHCI_QUIRK_NO_LED)
413                 return;
414
415         led_classdev_unregister(&host->led);
416 }
417
418 static inline void sdhci_led_activate(struct sdhci_host *host)
419 {
420 }
421
422 static inline void sdhci_led_deactivate(struct sdhci_host *host)
423 {
424 }
425
426 #else
427
428 static inline int sdhci_led_register(struct sdhci_host *host)
429 {
430         return 0;
431 }
432
433 static inline void sdhci_led_unregister(struct sdhci_host *host)
434 {
435 }
436
437 static inline void sdhci_led_activate(struct sdhci_host *host)
438 {
439         __sdhci_led_activate(host);
440 }
441
442 static inline void sdhci_led_deactivate(struct sdhci_host *host)
443 {
444         __sdhci_led_deactivate(host);
445 }
446
447 #endif
448
449 static void sdhci_mod_timer(struct sdhci_host *host, struct mmc_request *mrq,
450                             unsigned long timeout)
451 {
452         if (sdhci_data_line_cmd(mrq->cmd))
453                 mod_timer(&host->data_timer, timeout);
454         else
455                 mod_timer(&host->timer, timeout);
456 }
457
458 static void sdhci_del_timer(struct sdhci_host *host, struct mmc_request *mrq)
459 {
460         if (sdhci_data_line_cmd(mrq->cmd))
461                 del_timer(&host->data_timer);
462         else
463                 del_timer(&host->timer);
464 }
465
466 static inline bool sdhci_has_requests(struct sdhci_host *host)
467 {
468         return host->cmd || host->data_cmd;
469 }
470
471 /*****************************************************************************\
472  *                                                                           *
473  * Core functions                                                            *
474  *                                                                           *
475 \*****************************************************************************/
476
477 static void sdhci_read_block_pio(struct sdhci_host *host)
478 {
479         unsigned long flags;
480         size_t blksize, len, chunk;
481         u32 uninitialized_var(scratch);
482         u8 *buf;
483
484         DBG("PIO reading\n");
485
486         blksize = host->data->blksz;
487         chunk = 0;
488
489         local_irq_save(flags);
490
491         while (blksize) {
492                 BUG_ON(!sg_miter_next(&host->sg_miter));
493
494                 len = min(host->sg_miter.length, blksize);
495
496                 blksize -= len;
497                 host->sg_miter.consumed = len;
498
499                 buf = host->sg_miter.addr;
500
501                 while (len) {
502                         if (chunk == 0) {
503                                 scratch = sdhci_readl(host, SDHCI_BUFFER);
504                                 chunk = 4;
505                         }
506
507                         *buf = scratch & 0xFF;
508
509                         buf++;
510                         scratch >>= 8;
511                         chunk--;
512                         len--;
513                 }
514         }
515
516         sg_miter_stop(&host->sg_miter);
517
518         local_irq_restore(flags);
519 }
520
521 static void sdhci_write_block_pio(struct sdhci_host *host)
522 {
523         unsigned long flags;
524         size_t blksize, len, chunk;
525         u32 scratch;
526         u8 *buf;
527
528         DBG("PIO writing\n");
529
530         blksize = host->data->blksz;
531         chunk = 0;
532         scratch = 0;
533
534         local_irq_save(flags);
535
536         while (blksize) {
537                 BUG_ON(!sg_miter_next(&host->sg_miter));
538
539                 len = min(host->sg_miter.length, blksize);
540
541                 blksize -= len;
542                 host->sg_miter.consumed = len;
543
544                 buf = host->sg_miter.addr;
545
546                 while (len) {
547                         scratch |= (u32)*buf << (chunk * 8);
548
549                         buf++;
550                         chunk++;
551                         len--;
552
553                         if ((chunk == 4) || ((len == 0) && (blksize == 0))) {
554                                 sdhci_writel(host, scratch, SDHCI_BUFFER);
555                                 chunk = 0;
556                                 scratch = 0;
557                         }
558                 }
559         }
560
561         sg_miter_stop(&host->sg_miter);
562
563         local_irq_restore(flags);
564 }
565
566 static void sdhci_transfer_pio(struct sdhci_host *host)
567 {
568         u32 mask;
569
570         if (host->blocks == 0)
571                 return;
572
573         if (host->data->flags & MMC_DATA_READ)
574                 mask = SDHCI_DATA_AVAILABLE;
575         else
576                 mask = SDHCI_SPACE_AVAILABLE;
577
578         /*
579          * Some controllers (JMicron JMB38x) mess up the buffer bits
580          * for transfers < 4 bytes. As long as it is just one block,
581          * we can ignore the bits.
582          */
583         if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) &&
584                 (host->data->blocks == 1))
585                 mask = ~0;
586
587         while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
588                 if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY)
589                         udelay(100);
590
591                 if (host->data->flags & MMC_DATA_READ)
592                         sdhci_read_block_pio(host);
593                 else
594                         sdhci_write_block_pio(host);
595
596                 host->blocks--;
597                 if (host->blocks == 0)
598                         break;
599         }
600
601         DBG("PIO transfer complete.\n");
602 }
603
604 static int sdhci_pre_dma_transfer(struct sdhci_host *host,
605                                   struct mmc_data *data, int cookie)
606 {
607         int sg_count;
608
609         /*
610          * If the data buffers are already mapped, return the previous
611          * dma_map_sg() result.
612          */
613         if (data->host_cookie == COOKIE_PRE_MAPPED)
614                 return data->sg_count;
615
616         /* Bounce write requests to the bounce buffer */
617         if (host->bounce_buffer) {
618                 unsigned int length = data->blksz * data->blocks;
619
620                 if (length > host->bounce_buffer_size) {
621                         pr_err("%s: asked for transfer of %u bytes exceeds bounce buffer %u bytes\n",
622                                mmc_hostname(host->mmc), length,
623                                host->bounce_buffer_size);
624                         return -EIO;
625                 }
626                 if (mmc_get_dma_dir(data) == DMA_TO_DEVICE) {
627                         /* Copy the data to the bounce buffer */
628                         sg_copy_to_buffer(data->sg, data->sg_len,
629                                           host->bounce_buffer,
630                                           length);
631                 }
632                 /* Switch ownership to the DMA */
633                 dma_sync_single_for_device(host->mmc->parent,
634                                            host->bounce_addr,
635                                            host->bounce_buffer_size,
636                                            mmc_get_dma_dir(data));
637                 /* Just a dummy value */
638                 sg_count = 1;
639         } else {
640                 /* Just access the data directly from memory */
641                 sg_count = dma_map_sg(mmc_dev(host->mmc),
642                                       data->sg, data->sg_len,
643                                       mmc_get_dma_dir(data));
644         }
645
646         if (sg_count == 0)
647                 return -ENOSPC;
648
649         data->sg_count = sg_count;
650         data->host_cookie = cookie;
651
652         return sg_count;
653 }
654
655 static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
656 {
657         local_irq_save(*flags);
658         return kmap_atomic(sg_page(sg)) + sg->offset;
659 }
660
661 static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags)
662 {
663         kunmap_atomic(buffer);
664         local_irq_restore(*flags);
665 }
666
667 void sdhci_adma_write_desc(struct sdhci_host *host, void **desc,
668                            dma_addr_t addr, int len, unsigned int cmd)
669 {
670         struct sdhci_adma2_64_desc *dma_desc = *desc;
671
672         /* 32-bit and 64-bit descriptors have these members in same position */
673         dma_desc->cmd = cpu_to_le16(cmd);
674         dma_desc->len = cpu_to_le16(len);
675         dma_desc->addr_lo = cpu_to_le32((u32)addr);
676
677         if (host->flags & SDHCI_USE_64_BIT_DMA)
678                 dma_desc->addr_hi = cpu_to_le32((u64)addr >> 32);
679
680         *desc += host->desc_sz;
681 }
682 EXPORT_SYMBOL_GPL(sdhci_adma_write_desc);
683
684 static inline void __sdhci_adma_write_desc(struct sdhci_host *host,
685                                            void **desc, dma_addr_t addr,
686                                            int len, unsigned int cmd)
687 {
688         if (host->ops->adma_write_desc)
689                 host->ops->adma_write_desc(host, desc, addr, len, cmd);
690         else
691                 sdhci_adma_write_desc(host, desc, addr, len, cmd);
692 }
693
694 static void sdhci_adma_mark_end(void *desc)
695 {
696         struct sdhci_adma2_64_desc *dma_desc = desc;
697
698         /* 32-bit and 64-bit descriptors have 'cmd' in same position */
699         dma_desc->cmd |= cpu_to_le16(ADMA2_END);
700 }
701
702 static void sdhci_adma_table_pre(struct sdhci_host *host,
703         struct mmc_data *data, int sg_count)
704 {
705         struct scatterlist *sg;
706         unsigned long flags;
707         dma_addr_t addr, align_addr;
708         void *desc, *align;
709         char *buffer;
710         int len, offset, i;
711
712         /*
713          * The spec does not specify endianness of descriptor table.
714          * We currently guess that it is LE.
715          */
716
717         host->sg_count = sg_count;
718
719         desc = host->adma_table;
720         align = host->align_buffer;
721
722         align_addr = host->align_addr;
723
724         for_each_sg(data->sg, sg, host->sg_count, i) {
725                 addr = sg_dma_address(sg);
726                 len = sg_dma_len(sg);
727
728                 /*
729                  * The SDHCI specification states that ADMA addresses must
730                  * be 32-bit aligned. If they aren't, then we use a bounce
731                  * buffer for the (up to three) bytes that screw up the
732                  * alignment.
733                  */
734                 offset = (SDHCI_ADMA2_ALIGN - (addr & SDHCI_ADMA2_MASK)) &
735                          SDHCI_ADMA2_MASK;
736                 if (offset) {
737                         if (data->flags & MMC_DATA_WRITE) {
738                                 buffer = sdhci_kmap_atomic(sg, &flags);
739                                 memcpy(align, buffer, offset);
740                                 sdhci_kunmap_atomic(buffer, &flags);
741                         }
742
743                         /* tran, valid */
744                         __sdhci_adma_write_desc(host, &desc, align_addr,
745                                                 offset, ADMA2_TRAN_VALID);
746
747                         BUG_ON(offset > 65536);
748
749                         align += SDHCI_ADMA2_ALIGN;
750                         align_addr += SDHCI_ADMA2_ALIGN;
751
752                         addr += offset;
753                         len -= offset;
754                 }
755
756                 BUG_ON(len > 65536);
757
758                 /* tran, valid */
759                 if (len)
760                         __sdhci_adma_write_desc(host, &desc, addr, len,
761                                                 ADMA2_TRAN_VALID);
762
763                 /*
764                  * If this triggers then we have a calculation bug
765                  * somewhere. :/
766                  */
767                 WARN_ON((desc - host->adma_table) >= host->adma_table_sz);
768         }
769
770         if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) {
771                 /* Mark the last descriptor as the terminating descriptor */
772                 if (desc != host->adma_table) {
773                         desc -= host->desc_sz;
774                         sdhci_adma_mark_end(desc);
775                 }
776         } else {
777                 /* Add a terminating entry - nop, end, valid */
778                 __sdhci_adma_write_desc(host, &desc, 0, 0, ADMA2_NOP_END_VALID);
779         }
780 }
781
782 static void sdhci_adma_table_post(struct sdhci_host *host,
783         struct mmc_data *data)
784 {
785         struct scatterlist *sg;
786         int i, size;
787         void *align;
788         char *buffer;
789         unsigned long flags;
790
791         if (data->flags & MMC_DATA_READ) {
792                 bool has_unaligned = false;
793
794                 /* Do a quick scan of the SG list for any unaligned mappings */
795                 for_each_sg(data->sg, sg, host->sg_count, i)
796                         if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
797                                 has_unaligned = true;
798                                 break;
799                         }
800
801                 if (has_unaligned) {
802                         dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
803                                             data->sg_len, DMA_FROM_DEVICE);
804
805                         align = host->align_buffer;
806
807                         for_each_sg(data->sg, sg, host->sg_count, i) {
808                                 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
809                                         size = SDHCI_ADMA2_ALIGN -
810                                                (sg_dma_address(sg) & SDHCI_ADMA2_MASK);
811
812                                         buffer = sdhci_kmap_atomic(sg, &flags);
813                                         memcpy(buffer, align, size);
814                                         sdhci_kunmap_atomic(buffer, &flags);
815
816                                         align += SDHCI_ADMA2_ALIGN;
817                                 }
818                         }
819                 }
820         }
821 }
822
823 static dma_addr_t sdhci_sdma_address(struct sdhci_host *host)
824 {
825         if (host->bounce_buffer)
826                 return host->bounce_addr;
827         else
828                 return sg_dma_address(host->data->sg);
829 }
830
831 static void sdhci_set_sdma_addr(struct sdhci_host *host, dma_addr_t addr)
832 {
833         if (host->v4_mode) {
834                 sdhci_writel(host, addr, SDHCI_ADMA_ADDRESS);
835                 if (host->flags & SDHCI_USE_64_BIT_DMA)
836                         sdhci_writel(host, (u64)addr >> 32, SDHCI_ADMA_ADDRESS_HI);
837         } else {
838                 sdhci_writel(host, addr, SDHCI_DMA_ADDRESS);
839         }
840 }
841
842 static unsigned int sdhci_target_timeout(struct sdhci_host *host,
843                                          struct mmc_command *cmd,
844                                          struct mmc_data *data)
845 {
846         unsigned int target_timeout;
847
848         /* timeout in us */
849         if (!data) {
850                 target_timeout = cmd->busy_timeout * 1000;
851         } else {
852                 target_timeout = DIV_ROUND_UP(data->timeout_ns, 1000);
853                 if (host->clock && data->timeout_clks) {
854                         unsigned long long val;
855
856                         /*
857                          * data->timeout_clks is in units of clock cycles.
858                          * host->clock is in Hz.  target_timeout is in us.
859                          * Hence, us = 1000000 * cycles / Hz.  Round up.
860                          */
861                         val = 1000000ULL * data->timeout_clks;
862                         if (do_div(val, host->clock))
863                                 target_timeout++;
864                         target_timeout += val;
865                 }
866         }
867
868         return target_timeout;
869 }
870
871 static void sdhci_calc_sw_timeout(struct sdhci_host *host,
872                                   struct mmc_command *cmd)
873 {
874         struct mmc_data *data = cmd->data;
875         struct mmc_host *mmc = host->mmc;
876         struct mmc_ios *ios = &mmc->ios;
877         unsigned char bus_width = 1 << ios->bus_width;
878         unsigned int blksz;
879         unsigned int freq;
880         u64 target_timeout;
881         u64 transfer_time;
882
883         target_timeout = sdhci_target_timeout(host, cmd, data);
884         target_timeout *= NSEC_PER_USEC;
885
886         if (data) {
887                 blksz = data->blksz;
888                 freq = host->mmc->actual_clock ? : host->clock;
889                 transfer_time = (u64)blksz * NSEC_PER_SEC * (8 / bus_width);
890                 do_div(transfer_time, freq);
891                 /* multiply by '2' to account for any unknowns */
892                 transfer_time = transfer_time * 2;
893                 /* calculate timeout for the entire data */
894                 host->data_timeout = data->blocks * target_timeout +
895                                      transfer_time;
896         } else {
897                 host->data_timeout = target_timeout;
898         }
899
900         if (host->data_timeout)
901                 host->data_timeout += MMC_CMD_TRANSFER_TIME;
902 }
903
904 static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd,
905                              bool *too_big)
906 {
907         u8 count;
908         struct mmc_data *data;
909         unsigned target_timeout, current_timeout;
910
911         *too_big = true;
912
913         /*
914          * If the host controller provides us with an incorrect timeout
915          * value, just skip the check and use 0xE.  The hardware may take
916          * longer to time out, but that's much better than having a too-short
917          * timeout value.
918          */
919         if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)
920                 return 0xE;
921
922         /* Unspecified command, asume max */
923         if (cmd == NULL)
924                 return 0xE;
925
926         data = cmd->data;
927         /* Unspecified timeout, assume max */
928         if (!data && !cmd->busy_timeout)
929                 return 0xE;
930
931         /* timeout in us */
932         target_timeout = sdhci_target_timeout(host, cmd, data);
933
934         /*
935          * Figure out needed cycles.
936          * We do this in steps in order to fit inside a 32 bit int.
937          * The first step is the minimum timeout, which will have a
938          * minimum resolution of 6 bits:
939          * (1) 2^13*1000 > 2^22,
940          * (2) host->timeout_clk < 2^16
941          *     =>
942          *     (1) / (2) > 2^6
943          */
944         count = 0;
945         current_timeout = (1 << 13) * 1000 / host->timeout_clk;
946         while (current_timeout < target_timeout) {
947                 count++;
948                 current_timeout <<= 1;
949                 if (count >= 0xF)
950                         break;
951         }
952
953         if (count >= 0xF) {
954                 if (!(host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT))
955                         DBG("Too large timeout 0x%x requested for CMD%d!\n",
956                             count, cmd->opcode);
957                 count = 0xE;
958         } else {
959                 *too_big = false;
960         }
961
962         return count;
963 }
964
965 static void sdhci_set_transfer_irqs(struct sdhci_host *host)
966 {
967         u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL;
968         u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR;
969
970         if (host->flags & SDHCI_REQ_USE_DMA)
971                 host->ier = (host->ier & ~pio_irqs) | dma_irqs;
972         else
973                 host->ier = (host->ier & ~dma_irqs) | pio_irqs;
974
975         if (host->flags & (SDHCI_AUTO_CMD23 | SDHCI_AUTO_CMD12))
976                 host->ier |= SDHCI_INT_AUTO_CMD_ERR;
977         else
978                 host->ier &= ~SDHCI_INT_AUTO_CMD_ERR;
979
980         sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
981         sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
982 }
983
984 static void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable)
985 {
986         if (enable)
987                 host->ier |= SDHCI_INT_DATA_TIMEOUT;
988         else
989                 host->ier &= ~SDHCI_INT_DATA_TIMEOUT;
990         sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
991         sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
992 }
993
994 static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
995 {
996         u8 count;
997
998         if (host->ops->set_timeout) {
999                 host->ops->set_timeout(host, cmd);
1000         } else {
1001                 bool too_big = false;
1002
1003                 count = sdhci_calc_timeout(host, cmd, &too_big);
1004
1005                 if (too_big &&
1006                     host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT) {
1007                         sdhci_calc_sw_timeout(host, cmd);
1008                         sdhci_set_data_timeout_irq(host, false);
1009                 } else if (!(host->ier & SDHCI_INT_DATA_TIMEOUT)) {
1010                         sdhci_set_data_timeout_irq(host, true);
1011                 }
1012
1013                 sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
1014         }
1015 }
1016
1017 static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
1018 {
1019         struct mmc_data *data = cmd->data;
1020
1021         host->data_timeout = 0;
1022
1023         if (sdhci_data_line_cmd(cmd))
1024                 sdhci_set_timeout(host, cmd);
1025
1026         if (!data)
1027                 return;
1028
1029         WARN_ON(host->data);
1030
1031         /* Sanity checks */
1032         BUG_ON(data->blksz * data->blocks > 524288);
1033         BUG_ON(data->blksz > host->mmc->max_blk_size);
1034         BUG_ON(data->blocks > 65535);
1035
1036         host->data = data;
1037         host->data_early = 0;
1038         host->data->bytes_xfered = 0;
1039
1040         if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
1041                 struct scatterlist *sg;
1042                 unsigned int length_mask, offset_mask;
1043                 int i;
1044
1045                 host->flags |= SDHCI_REQ_USE_DMA;
1046
1047                 /*
1048                  * FIXME: This doesn't account for merging when mapping the
1049                  * scatterlist.
1050                  *
1051                  * The assumption here being that alignment and lengths are
1052                  * the same after DMA mapping to device address space.
1053                  */
1054                 length_mask = 0;
1055                 offset_mask = 0;
1056                 if (host->flags & SDHCI_USE_ADMA) {
1057                         if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) {
1058                                 length_mask = 3;
1059                                 /*
1060                                  * As we use up to 3 byte chunks to work
1061                                  * around alignment problems, we need to
1062                                  * check the offset as well.
1063                                  */
1064                                 offset_mask = 3;
1065                         }
1066                 } else {
1067                         if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE)
1068                                 length_mask = 3;
1069                         if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR)
1070                                 offset_mask = 3;
1071                 }
1072
1073                 if (unlikely(length_mask | offset_mask)) {
1074                         for_each_sg(data->sg, sg, data->sg_len, i) {
1075                                 if (sg->length & length_mask) {
1076                                         DBG("Reverting to PIO because of transfer size (%d)\n",
1077                                             sg->length);
1078                                         host->flags &= ~SDHCI_REQ_USE_DMA;
1079                                         break;
1080                                 }
1081                                 if (sg->offset & offset_mask) {
1082                                         DBG("Reverting to PIO because of bad alignment\n");
1083                                         host->flags &= ~SDHCI_REQ_USE_DMA;
1084                                         break;
1085                                 }
1086                         }
1087                 }
1088         }
1089
1090         if (host->flags & SDHCI_REQ_USE_DMA) {
1091                 int sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED);
1092
1093                 if (sg_cnt <= 0) {
1094                         /*
1095                          * This only happens when someone fed
1096                          * us an invalid request.
1097                          */
1098                         WARN_ON(1);
1099                         host->flags &= ~SDHCI_REQ_USE_DMA;
1100                 } else if (host->flags & SDHCI_USE_ADMA) {
1101                         sdhci_adma_table_pre(host, data, sg_cnt);
1102
1103                         sdhci_writel(host, host->adma_addr, SDHCI_ADMA_ADDRESS);
1104                         if (host->flags & SDHCI_USE_64_BIT_DMA)
1105                                 sdhci_writel(host,
1106                                              (u64)host->adma_addr >> 32,
1107                                              SDHCI_ADMA_ADDRESS_HI);
1108                 } else {
1109                         WARN_ON(sg_cnt != 1);
1110                         sdhci_set_sdma_addr(host, sdhci_sdma_address(host));
1111                 }
1112         }
1113
1114         sdhci_config_dma(host);
1115
1116         if (!(host->flags & SDHCI_REQ_USE_DMA)) {
1117                 int flags;
1118
1119                 flags = SG_MITER_ATOMIC;
1120                 if (host->data->flags & MMC_DATA_READ)
1121                         flags |= SG_MITER_TO_SG;
1122                 else
1123                         flags |= SG_MITER_FROM_SG;
1124                 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
1125                 host->blocks = data->blocks;
1126         }
1127
1128         sdhci_set_transfer_irqs(host);
1129
1130         /* Set the DMA boundary value and block size */
1131         sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, data->blksz),
1132                      SDHCI_BLOCK_SIZE);
1133
1134         /*
1135          * For Version 4.10 onwards, if v4 mode is enabled, 32-bit Block Count
1136          * can be supported, in that case 16-bit block count register must be 0.
1137          */
1138         if (host->version >= SDHCI_SPEC_410 && host->v4_mode &&
1139             (host->quirks2 & SDHCI_QUIRK2_USE_32BIT_BLK_CNT)) {
1140                 if (sdhci_readw(host, SDHCI_BLOCK_COUNT))
1141                         sdhci_writew(host, 0, SDHCI_BLOCK_COUNT);
1142                 sdhci_writew(host, data->blocks, SDHCI_32BIT_BLK_CNT);
1143         } else {
1144                 sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
1145         }
1146 }
1147
1148 static inline bool sdhci_auto_cmd12(struct sdhci_host *host,
1149                                     struct mmc_request *mrq)
1150 {
1151         return !mrq->sbc && (host->flags & SDHCI_AUTO_CMD12) &&
1152                !mrq->cap_cmd_during_tfr;
1153 }
1154
1155 static inline void sdhci_auto_cmd_select(struct sdhci_host *host,
1156                                          struct mmc_command *cmd,
1157                                          u16 *mode)
1158 {
1159         bool use_cmd12 = sdhci_auto_cmd12(host, cmd->mrq) &&
1160                          (cmd->opcode != SD_IO_RW_EXTENDED);
1161         bool use_cmd23 = cmd->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23);
1162         u16 ctrl2;
1163
1164         /*
1165          * In case of Version 4.10 or later, use of 'Auto CMD Auto
1166          * Select' is recommended rather than use of 'Auto CMD12
1167          * Enable' or 'Auto CMD23 Enable'.
1168          */
1169         if (host->version >= SDHCI_SPEC_410 && (use_cmd12 || use_cmd23)) {
1170                 *mode |= SDHCI_TRNS_AUTO_SEL;
1171
1172                 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1173                 if (use_cmd23)
1174                         ctrl2 |= SDHCI_CMD23_ENABLE;
1175                 else
1176                         ctrl2 &= ~SDHCI_CMD23_ENABLE;
1177                 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
1178
1179                 return;
1180         }
1181
1182         /*
1183          * If we are sending CMD23, CMD12 never gets sent
1184          * on successful completion (so no Auto-CMD12).
1185          */
1186         if (use_cmd12)
1187                 *mode |= SDHCI_TRNS_AUTO_CMD12;
1188         else if (use_cmd23)
1189                 *mode |= SDHCI_TRNS_AUTO_CMD23;
1190 }
1191
1192 static void sdhci_set_transfer_mode(struct sdhci_host *host,
1193         struct mmc_command *cmd)
1194 {
1195         u16 mode = 0;
1196         struct mmc_data *data = cmd->data;
1197
1198         if (data == NULL) {
1199                 if (host->quirks2 &
1200                         SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD) {
1201                         /* must not clear SDHCI_TRANSFER_MODE when tuning */
1202                         if (cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200)
1203                                 sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE);
1204                 } else {
1205                 /* clear Auto CMD settings for no data CMDs */
1206                         mode = sdhci_readw(host, SDHCI_TRANSFER_MODE);
1207                         sdhci_writew(host, mode & ~(SDHCI_TRNS_AUTO_CMD12 |
1208                                 SDHCI_TRNS_AUTO_CMD23), SDHCI_TRANSFER_MODE);
1209                 }
1210                 return;
1211         }
1212
1213         WARN_ON(!host->data);
1214
1215         if (!(host->quirks2 & SDHCI_QUIRK2_SUPPORT_SINGLE))
1216                 mode = SDHCI_TRNS_BLK_CNT_EN;
1217
1218         if (mmc_op_multi(cmd->opcode) || data->blocks > 1) {
1219                 mode = SDHCI_TRNS_BLK_CNT_EN | SDHCI_TRNS_MULTI;
1220                 sdhci_auto_cmd_select(host, cmd, &mode);
1221                 if (cmd->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23))
1222                         sdhci_writel(host, cmd->mrq->sbc->arg, SDHCI_ARGUMENT2);
1223         }
1224
1225         if (data->flags & MMC_DATA_READ)
1226                 mode |= SDHCI_TRNS_READ;
1227         if (host->flags & SDHCI_REQ_USE_DMA)
1228                 mode |= SDHCI_TRNS_DMA;
1229
1230         sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
1231 }
1232
1233 static bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq)
1234 {
1235         return (!(host->flags & SDHCI_DEVICE_DEAD) &&
1236                 ((mrq->cmd && mrq->cmd->error) ||
1237                  (mrq->sbc && mrq->sbc->error) ||
1238                  (mrq->data && mrq->data->stop && mrq->data->stop->error) ||
1239                  (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST)));
1240 }
1241
1242 static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
1243 {
1244         int i;
1245
1246         if (host->cmd && host->cmd->mrq == mrq)
1247                 host->cmd = NULL;
1248
1249         if (host->data_cmd && host->data_cmd->mrq == mrq)
1250                 host->data_cmd = NULL;
1251
1252         if (host->data && host->data->mrq == mrq)
1253                 host->data = NULL;
1254
1255         if (sdhci_needs_reset(host, mrq))
1256                 host->pending_reset = true;
1257
1258         for (i = 0; i < SDHCI_MAX_MRQS; i++) {
1259                 if (host->mrqs_done[i] == mrq) {
1260                         WARN_ON(1);
1261                         return;
1262                 }
1263         }
1264
1265         for (i = 0; i < SDHCI_MAX_MRQS; i++) {
1266                 if (!host->mrqs_done[i]) {
1267                         host->mrqs_done[i] = mrq;
1268                         break;
1269                 }
1270         }
1271
1272         WARN_ON(i >= SDHCI_MAX_MRQS);
1273
1274         sdhci_del_timer(host, mrq);
1275
1276         if (!sdhci_has_requests(host))
1277                 sdhci_led_deactivate(host);
1278 }
1279
1280 static void sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
1281 {
1282         __sdhci_finish_mrq(host, mrq);
1283
1284         queue_work(host->complete_wq, &host->complete_work);
1285 }
1286
1287 static void sdhci_finish_data(struct sdhci_host *host)
1288 {
1289         struct mmc_command *data_cmd = host->data_cmd;
1290         struct mmc_data *data = host->data;
1291
1292         host->data = NULL;
1293         host->data_cmd = NULL;
1294
1295         /*
1296          * The controller needs a reset of internal state machines upon error
1297          * conditions.
1298          */
1299         if (data->error) {
1300                 if (!host->cmd || host->cmd == data_cmd)
1301                         sdhci_do_reset(host, SDHCI_RESET_CMD);
1302                 sdhci_do_reset(host, SDHCI_RESET_DATA);
1303         }
1304
1305         if ((host->flags & (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) ==
1306             (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA))
1307                 sdhci_adma_table_post(host, data);
1308
1309         /*
1310          * The specification states that the block count register must
1311          * be updated, but it does not specify at what point in the
1312          * data flow. That makes the register entirely useless to read
1313          * back so we have to assume that nothing made it to the card
1314          * in the event of an error.
1315          */
1316         if (data->error)
1317                 data->bytes_xfered = 0;
1318         else
1319                 data->bytes_xfered = data->blksz * data->blocks;
1320
1321         /*
1322          * Need to send CMD12 if -
1323          * a) open-ended multiblock transfer (no CMD23)
1324          * b) error in multiblock transfer
1325          */
1326         if (data->stop &&
1327             (data->error ||
1328              !data->mrq->sbc)) {
1329                 /*
1330                  * 'cap_cmd_during_tfr' request must not use the command line
1331                  * after mmc_command_done() has been called. It is upper layer's
1332                  * responsibility to send the stop command if required.
1333                  */
1334                 if (data->mrq->cap_cmd_during_tfr) {
1335                         __sdhci_finish_mrq(host, data->mrq);
1336                 } else {
1337                         /* Avoid triggering warning in sdhci_send_command() */
1338                         host->cmd = NULL;
1339                         sdhci_send_command(host, data->stop);
1340                 }
1341         } else {
1342                 __sdhci_finish_mrq(host, data->mrq);
1343         }
1344 }
1345
1346 void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
1347 {
1348         int flags;
1349         u32 mask;
1350         unsigned long timeout;
1351
1352         WARN_ON(host->cmd);
1353
1354         /* Initially, a command has no error */
1355         cmd->error = 0;
1356
1357         if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) &&
1358             cmd->opcode == MMC_STOP_TRANSMISSION)
1359                 cmd->flags |= MMC_RSP_BUSY;
1360
1361         /* Wait max 10 ms */
1362         timeout = 10;
1363
1364         mask = SDHCI_CMD_INHIBIT;
1365         if (sdhci_data_line_cmd(cmd))
1366                 mask |= SDHCI_DATA_INHIBIT;
1367
1368         /* We shouldn't wait for data inihibit for stop commands, even
1369            though they might use busy signaling */
1370         if (cmd->mrq->data && (cmd == cmd->mrq->data->stop))
1371                 mask &= ~SDHCI_DATA_INHIBIT;
1372
1373         while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
1374                 if (timeout == 0) {
1375                         pr_err("%s: Controller never released inhibit bit(s).\n",
1376                                mmc_hostname(host->mmc));
1377                         sdhci_dumpregs(host);
1378                         cmd->error = -EIO;
1379                         sdhci_finish_mrq(host, cmd->mrq);
1380                         return;
1381                 }
1382                 timeout--;
1383                 mdelay(1);
1384         }
1385
1386         host->cmd = cmd;
1387         if (sdhci_data_line_cmd(cmd)) {
1388                 WARN_ON(host->data_cmd);
1389                 host->data_cmd = cmd;
1390         }
1391
1392         sdhci_prepare_data(host, cmd);
1393
1394         sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT);
1395
1396         sdhci_set_transfer_mode(host, cmd);
1397
1398         if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
1399                 pr_err("%s: Unsupported response type!\n",
1400                         mmc_hostname(host->mmc));
1401                 cmd->error = -EINVAL;
1402                 sdhci_finish_mrq(host, cmd->mrq);
1403                 return;
1404         }
1405
1406         if (!(cmd->flags & MMC_RSP_PRESENT))
1407                 flags = SDHCI_CMD_RESP_NONE;
1408         else if (cmd->flags & MMC_RSP_136)
1409                 flags = SDHCI_CMD_RESP_LONG;
1410         else if (cmd->flags & MMC_RSP_BUSY)
1411                 flags = SDHCI_CMD_RESP_SHORT_BUSY;
1412         else
1413                 flags = SDHCI_CMD_RESP_SHORT;
1414
1415         if (cmd->flags & MMC_RSP_CRC)
1416                 flags |= SDHCI_CMD_CRC;
1417         if (cmd->flags & MMC_RSP_OPCODE)
1418                 flags |= SDHCI_CMD_INDEX;
1419
1420         /* CMD19 is special in that the Data Present Select should be set */
1421         if (cmd->data || cmd->opcode == MMC_SEND_TUNING_BLOCK ||
1422             cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200)
1423                 flags |= SDHCI_CMD_DATA;
1424
1425         timeout = jiffies;
1426         if (host->data_timeout)
1427                 timeout += nsecs_to_jiffies(host->data_timeout);
1428         else if (!cmd->data && cmd->busy_timeout > 9000)
1429                 timeout += DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ;
1430         else
1431                 timeout += 10 * HZ;
1432         sdhci_mod_timer(host, cmd->mrq, timeout);
1433
1434         sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
1435 }
1436 EXPORT_SYMBOL_GPL(sdhci_send_command);
1437
1438 static void sdhci_read_rsp_136(struct sdhci_host *host, struct mmc_command *cmd)
1439 {
1440         int i, reg;
1441
1442         for (i = 0; i < 4; i++) {
1443                 reg = SDHCI_RESPONSE + (3 - i) * 4;
1444                 cmd->resp[i] = sdhci_readl(host, reg);
1445         }
1446
1447         if (host->quirks2 & SDHCI_QUIRK2_RSP_136_HAS_CRC)
1448                 return;
1449
1450         /* CRC is stripped so we need to do some shifting */
1451         for (i = 0; i < 4; i++) {
1452                 cmd->resp[i] <<= 8;
1453                 if (i != 3)
1454                         cmd->resp[i] |= cmd->resp[i + 1] >> 24;
1455         }
1456 }
1457
1458 static void sdhci_finish_command(struct sdhci_host *host)
1459 {
1460         struct mmc_command *cmd = host->cmd;
1461
1462         host->cmd = NULL;
1463
1464         if (cmd->flags & MMC_RSP_PRESENT) {
1465                 if (cmd->flags & MMC_RSP_136) {
1466                         sdhci_read_rsp_136(host, cmd);
1467                 } else {
1468                         cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE);
1469                 }
1470         }
1471
1472         if (cmd->mrq->cap_cmd_during_tfr && cmd == cmd->mrq->cmd)
1473                 mmc_command_done(host->mmc, cmd->mrq);
1474
1475         /*
1476          * The host can send and interrupt when the busy state has
1477          * ended, allowing us to wait without wasting CPU cycles.
1478          * The busy signal uses DAT0 so this is similar to waiting
1479          * for data to complete.
1480          *
1481          * Note: The 1.0 specification is a bit ambiguous about this
1482          *       feature so there might be some problems with older
1483          *       controllers.
1484          */
1485         if (cmd->flags & MMC_RSP_BUSY) {
1486                 if (cmd->data) {
1487                         DBG("Cannot wait for busy signal when also doing a data transfer");
1488                 } else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ) &&
1489                            cmd == host->data_cmd) {
1490                         /* Command complete before busy is ended */
1491                         return;
1492                 }
1493         }
1494
1495         /* Finished CMD23, now send actual command. */
1496         if (cmd == cmd->mrq->sbc) {
1497                 sdhci_send_command(host, cmd->mrq->cmd);
1498         } else {
1499
1500                 /* Processed actual command. */
1501                 if (host->data && host->data_early)
1502                         sdhci_finish_data(host);
1503
1504                 if (!cmd->data)
1505                         __sdhci_finish_mrq(host, cmd->mrq);
1506         }
1507 }
1508
1509 static u16 sdhci_get_preset_value(struct sdhci_host *host)
1510 {
1511         u16 preset = 0;
1512
1513         switch (host->timing) {
1514         case MMC_TIMING_UHS_SDR12:
1515                 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1516                 break;
1517         case MMC_TIMING_UHS_SDR25:
1518                 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25);
1519                 break;
1520         case MMC_TIMING_UHS_SDR50:
1521                 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50);
1522                 break;
1523         case MMC_TIMING_UHS_SDR104:
1524         case MMC_TIMING_MMC_HS200:
1525                 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104);
1526                 break;
1527         case MMC_TIMING_UHS_DDR50:
1528         case MMC_TIMING_MMC_DDR52:
1529                 preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50);
1530                 break;
1531         case MMC_TIMING_MMC_HS400:
1532                 preset = sdhci_readw(host, SDHCI_PRESET_FOR_HS400);
1533                 break;
1534         default:
1535                 pr_warn("%s: Invalid UHS-I mode selected\n",
1536                         mmc_hostname(host->mmc));
1537                 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1538                 break;
1539         }
1540         return preset;
1541 }
1542
1543 u16 sdhci_calc_clk(struct sdhci_host *host, unsigned int clock,
1544                    unsigned int *actual_clock)
1545 {
1546         int div = 0; /* Initialized for compiler warning */
1547         int real_div = div, clk_mul = 1;
1548         u16 clk = 0;
1549         bool switch_base_clk = false;
1550
1551         if (host->version >= SDHCI_SPEC_300) {
1552                 if (host->preset_enabled) {
1553                         u16 pre_val;
1554
1555                         clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1556                         pre_val = sdhci_get_preset_value(host);
1557                         div = (pre_val & SDHCI_PRESET_SDCLK_FREQ_MASK)
1558                                 >> SDHCI_PRESET_SDCLK_FREQ_SHIFT;
1559                         if (host->clk_mul &&
1560                                 (pre_val & SDHCI_PRESET_CLKGEN_SEL_MASK)) {
1561                                 clk = SDHCI_PROG_CLOCK_MODE;
1562                                 real_div = div + 1;
1563                                 clk_mul = host->clk_mul;
1564                         } else {
1565                                 real_div = max_t(int, 1, div << 1);
1566                         }
1567                         goto clock_set;
1568                 }
1569
1570                 /*
1571                  * Check if the Host Controller supports Programmable Clock
1572                  * Mode.
1573                  */
1574                 if (host->clk_mul) {
1575                         for (div = 1; div <= 1024; div++) {
1576                                 if ((host->max_clk * host->clk_mul / div)
1577                                         <= clock)
1578                                         break;
1579                         }
1580                         if ((host->max_clk * host->clk_mul / div) <= clock) {
1581                                 /*
1582                                  * Set Programmable Clock Mode in the Clock
1583                                  * Control register.
1584                                  */
1585                                 clk = SDHCI_PROG_CLOCK_MODE;
1586                                 real_div = div;
1587                                 clk_mul = host->clk_mul;
1588                                 div--;
1589                         } else {
1590                                 /*
1591                                  * Divisor can be too small to reach clock
1592                                  * speed requirement. Then use the base clock.
1593                                  */
1594                                 switch_base_clk = true;
1595                         }
1596                 }
1597
1598                 if (!host->clk_mul || switch_base_clk) {
1599                         /* Version 3.00 divisors must be a multiple of 2. */
1600                         if (host->max_clk <= clock)
1601                                 div = 1;
1602                         else {
1603                                 for (div = 2; div < SDHCI_MAX_DIV_SPEC_300;
1604                                      div += 2) {
1605                                         if ((host->max_clk / div) <= clock)
1606                                                 break;
1607                                 }
1608                         }
1609                         real_div = div;
1610                         div >>= 1;
1611                         if ((host->quirks2 & SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN)
1612                                 && !div && host->max_clk <= 25000000)
1613                                 div = 1;
1614                 }
1615         } else {
1616                 /* Version 2.00 divisors must be a power of 2. */
1617                 for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) {
1618                         if ((host->max_clk / div) <= clock)
1619                                 break;
1620                 }
1621                 real_div = div;
1622                 div >>= 1;
1623         }
1624
1625 clock_set:
1626         if (real_div)
1627                 *actual_clock = (host->max_clk * clk_mul) / real_div;
1628         clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
1629         clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
1630                 << SDHCI_DIVIDER_HI_SHIFT;
1631
1632         return clk;
1633 }
1634 EXPORT_SYMBOL_GPL(sdhci_calc_clk);
1635
1636 void sdhci_enable_clk(struct sdhci_host *host, u16 clk)
1637 {
1638         ktime_t timeout;
1639
1640         clk |= SDHCI_CLOCK_INT_EN;
1641         sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1642
1643         /* Wait max 20 ms */
1644         timeout = ktime_add_ms(ktime_get(), 20);
1645         while (1) {
1646                 bool timedout = ktime_after(ktime_get(), timeout);
1647
1648                 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1649                 if (clk & SDHCI_CLOCK_INT_STABLE)
1650                         break;
1651                 if (timedout) {
1652                         pr_err("%s: Internal clock never stabilised.\n",
1653                                mmc_hostname(host->mmc));
1654                         sdhci_dumpregs(host);
1655                         return;
1656                 }
1657                 udelay(10);
1658         }
1659
1660         clk |= SDHCI_CLOCK_CARD_EN;
1661         sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1662 }
1663 EXPORT_SYMBOL_GPL(sdhci_enable_clk);
1664
1665 void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
1666 {
1667         u16 clk;
1668
1669         host->mmc->actual_clock = 0;
1670
1671         sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
1672
1673         if (clock == 0)
1674                 return;
1675
1676         clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock);
1677         sdhci_enable_clk(host, clk);
1678 }
1679 EXPORT_SYMBOL_GPL(sdhci_set_clock);
1680
1681 static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode,
1682                                 unsigned short vdd)
1683 {
1684         struct mmc_host *mmc = host->mmc;
1685
1686         mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
1687
1688         if (mode != MMC_POWER_OFF)
1689                 sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL);
1690         else
1691                 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1692 }
1693
1694 void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode,
1695                            unsigned short vdd)
1696 {
1697         u8 pwr = 0;
1698
1699         if (mode != MMC_POWER_OFF) {
1700                 switch (1 << vdd) {
1701                 case MMC_VDD_165_195:
1702                 /*
1703                  * Without a regulator, SDHCI does not support 2.0v
1704                  * so we only get here if the driver deliberately
1705                  * added the 2.0v range to ocr_avail. Map it to 1.8v
1706                  * for the purpose of turning on the power.
1707                  */
1708                 case MMC_VDD_20_21:
1709                         pwr = SDHCI_POWER_180;
1710                         break;
1711                 case MMC_VDD_29_30:
1712                 case MMC_VDD_30_31:
1713                         pwr = SDHCI_POWER_300;
1714                         break;
1715                 case MMC_VDD_32_33:
1716                 case MMC_VDD_33_34:
1717                         pwr = SDHCI_POWER_330;
1718                         break;
1719                 default:
1720                         WARN(1, "%s: Invalid vdd %#x\n",
1721                              mmc_hostname(host->mmc), vdd);
1722                         break;
1723                 }
1724         }
1725
1726         if (host->pwr == pwr)
1727                 return;
1728
1729         host->pwr = pwr;
1730
1731         if (pwr == 0) {
1732                 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1733                 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
1734                         sdhci_runtime_pm_bus_off(host);
1735         } else {
1736                 /*
1737                  * Spec says that we should clear the power reg before setting
1738                  * a new value. Some controllers don't seem to like this though.
1739                  */
1740                 if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
1741                         sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1742
1743                 /*
1744                  * At least the Marvell CaFe chip gets confused if we set the
1745                  * voltage and set turn on power at the same time, so set the
1746                  * voltage first.
1747                  */
1748                 if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)
1749                         sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1750
1751                 pwr |= SDHCI_POWER_ON;
1752
1753                 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1754
1755                 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
1756                         sdhci_runtime_pm_bus_on(host);
1757
1758                 /*
1759                  * Some controllers need an extra 10ms delay of 10ms before
1760                  * they can apply clock after applying power
1761                  */
1762                 if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
1763                         mdelay(10);
1764         }
1765 }
1766 EXPORT_SYMBOL_GPL(sdhci_set_power_noreg);
1767
1768 void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
1769                      unsigned short vdd)
1770 {
1771         if (IS_ERR(host->mmc->supply.vmmc))
1772                 sdhci_set_power_noreg(host, mode, vdd);
1773         else
1774                 sdhci_set_power_reg(host, mode, vdd);
1775 }
1776 EXPORT_SYMBOL_GPL(sdhci_set_power);
1777
1778 /*****************************************************************************\
1779  *                                                                           *
1780  * MMC callbacks                                                             *
1781  *                                                                           *
1782 \*****************************************************************************/
1783
1784 void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1785 {
1786         struct sdhci_host *host;
1787         int present;
1788         unsigned long flags;
1789
1790         host = mmc_priv(mmc);
1791
1792         /* Firstly check card presence */
1793         present = mmc->ops->get_cd(mmc);
1794
1795         spin_lock_irqsave(&host->lock, flags);
1796
1797         sdhci_led_activate(host);
1798
1799         /*
1800          * Ensure we don't send the STOP for non-SET_BLOCK_COUNTED
1801          * requests if Auto-CMD12 is enabled.
1802          */
1803         if (sdhci_auto_cmd12(host, mrq)) {
1804                 if (mrq->stop) {
1805                         mrq->data->stop = NULL;
1806                         mrq->stop = NULL;
1807                 }
1808         }
1809
1810         if (!present || host->flags & SDHCI_DEVICE_DEAD) {
1811                 mrq->cmd->error = -ENOMEDIUM;
1812                 sdhci_finish_mrq(host, mrq);
1813         } else {
1814                 if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23))
1815                         sdhci_send_command(host, mrq->sbc);
1816                 else
1817                         sdhci_send_command(host, mrq->cmd);
1818         }
1819
1820         spin_unlock_irqrestore(&host->lock, flags);
1821 }
1822 EXPORT_SYMBOL_GPL(sdhci_request);
1823
1824 void sdhci_set_bus_width(struct sdhci_host *host, int width)
1825 {
1826         u8 ctrl;
1827
1828         ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
1829         if (width == MMC_BUS_WIDTH_8) {
1830                 ctrl &= ~SDHCI_CTRL_4BITBUS;
1831                 ctrl |= SDHCI_CTRL_8BITBUS;
1832         } else {
1833                 if (host->mmc->caps & MMC_CAP_8_BIT_DATA)
1834                         ctrl &= ~SDHCI_CTRL_8BITBUS;
1835                 if (width == MMC_BUS_WIDTH_4)
1836                         ctrl |= SDHCI_CTRL_4BITBUS;
1837                 else
1838                         ctrl &= ~SDHCI_CTRL_4BITBUS;
1839         }
1840         sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1841 }
1842 EXPORT_SYMBOL_GPL(sdhci_set_bus_width);
1843
1844 void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
1845 {
1846         u16 ctrl_2;
1847
1848         ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1849         /* Select Bus Speed Mode for host */
1850         ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
1851         if ((timing == MMC_TIMING_MMC_HS200) ||
1852             (timing == MMC_TIMING_UHS_SDR104))
1853                 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
1854         else if (timing == MMC_TIMING_UHS_SDR12)
1855                 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
1856         else if (timing == MMC_TIMING_UHS_SDR25)
1857                 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
1858         else if (timing == MMC_TIMING_UHS_SDR50)
1859                 ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
1860         else if ((timing == MMC_TIMING_UHS_DDR50) ||
1861                  (timing == MMC_TIMING_MMC_DDR52))
1862                 ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
1863         else if (timing == MMC_TIMING_MMC_HS400)
1864                 ctrl_2 |= SDHCI_CTRL_HS400; /* Non-standard */
1865         sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1866 }
1867 EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling);
1868
1869 void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1870 {
1871         struct sdhci_host *host = mmc_priv(mmc);
1872         u8 ctrl;
1873
1874         if (ios->power_mode == MMC_POWER_UNDEFINED)
1875                 return;
1876
1877         if (host->flags & SDHCI_DEVICE_DEAD) {
1878                 if (!IS_ERR(mmc->supply.vmmc) &&
1879                     ios->power_mode == MMC_POWER_OFF)
1880                         mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
1881                 return;
1882         }
1883
1884         /*
1885          * Reset the chip on each power off.
1886          * Should clear out any weird states.
1887          */
1888         if (ios->power_mode == MMC_POWER_OFF) {
1889                 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
1890                 sdhci_reinit(host);
1891         }
1892
1893         if (host->version >= SDHCI_SPEC_300 &&
1894                 (ios->power_mode == MMC_POWER_UP) &&
1895                 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN))
1896                 sdhci_enable_preset_value(host, false);
1897
1898         if (!ios->clock || ios->clock != host->clock) {
1899                 host->ops->set_clock(host, ios->clock);
1900                 host->clock = ios->clock;
1901
1902                 if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK &&
1903                     host->clock) {
1904                         host->timeout_clk = host->mmc->actual_clock ?
1905                                                 host->mmc->actual_clock / 1000 :
1906                                                 host->clock / 1000;
1907                         host->mmc->max_busy_timeout =
1908                                 host->ops->get_max_timeout_count ?
1909                                 host->ops->get_max_timeout_count(host) :
1910                                 1 << 27;
1911                         host->mmc->max_busy_timeout /= host->timeout_clk;
1912                 }
1913         }
1914
1915         if (host->ops->set_power)
1916                 host->ops->set_power(host, ios->power_mode, ios->vdd);
1917         else
1918                 sdhci_set_power(host, ios->power_mode, ios->vdd);
1919
1920         if (host->ops->platform_send_init_74_clocks)
1921                 host->ops->platform_send_init_74_clocks(host, ios->power_mode);
1922
1923         host->ops->set_bus_width(host, ios->bus_width);
1924
1925         ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
1926
1927         if (!(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT)) {
1928                 if (ios->timing == MMC_TIMING_SD_HS ||
1929                      ios->timing == MMC_TIMING_MMC_HS ||
1930                      ios->timing == MMC_TIMING_MMC_HS400 ||
1931                      ios->timing == MMC_TIMING_MMC_HS200 ||
1932                      ios->timing == MMC_TIMING_MMC_DDR52 ||
1933                      ios->timing == MMC_TIMING_UHS_SDR50 ||
1934                      ios->timing == MMC_TIMING_UHS_SDR104 ||
1935                      ios->timing == MMC_TIMING_UHS_DDR50 ||
1936                      ios->timing == MMC_TIMING_UHS_SDR25)
1937                         ctrl |= SDHCI_CTRL_HISPD;
1938                 else
1939                         ctrl &= ~SDHCI_CTRL_HISPD;
1940         }
1941
1942         if (host->version >= SDHCI_SPEC_300) {
1943                 u16 clk, ctrl_2;
1944
1945                 if (!host->preset_enabled) {
1946                         sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1947                         /*
1948                          * We only need to set Driver Strength if the
1949                          * preset value enable is not set.
1950                          */
1951                         ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1952                         ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK;
1953                         if (ios->drv_type == MMC_SET_DRIVER_TYPE_A)
1954                                 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A;
1955                         else if (ios->drv_type == MMC_SET_DRIVER_TYPE_B)
1956                                 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
1957                         else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C)
1958                                 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C;
1959                         else if (ios->drv_type == MMC_SET_DRIVER_TYPE_D)
1960                                 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_D;
1961                         else {
1962                                 pr_warn("%s: invalid driver type, default to driver type B\n",
1963                                         mmc_hostname(mmc));
1964                                 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
1965                         }
1966
1967                         sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1968                 } else {
1969                         /*
1970                          * According to SDHC Spec v3.00, if the Preset Value
1971                          * Enable in the Host Control 2 register is set, we
1972                          * need to reset SD Clock Enable before changing High
1973                          * Speed Enable to avoid generating clock gliches.
1974                          */
1975
1976                         /* Reset SD Clock Enable */
1977                         clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1978                         clk &= ~SDHCI_CLOCK_CARD_EN;
1979                         sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1980
1981                         sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1982
1983                         /* Re-enable SD Clock */
1984                         host->ops->set_clock(host, host->clock);
1985                 }
1986
1987                 /* Reset SD Clock Enable */
1988                 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1989                 clk &= ~SDHCI_CLOCK_CARD_EN;
1990                 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1991
1992                 host->ops->set_uhs_signaling(host, ios->timing);
1993                 host->timing = ios->timing;
1994
1995                 if (!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) &&
1996                                 ((ios->timing == MMC_TIMING_UHS_SDR12) ||
1997                                  (ios->timing == MMC_TIMING_UHS_SDR25) ||
1998                                  (ios->timing == MMC_TIMING_UHS_SDR50) ||
1999                                  (ios->timing == MMC_TIMING_UHS_SDR104) ||
2000                                  (ios->timing == MMC_TIMING_UHS_DDR50) ||
2001                                  (ios->timing == MMC_TIMING_MMC_DDR52))) {
2002                         u16 preset;
2003
2004                         sdhci_enable_preset_value(host, true);
2005                         preset = sdhci_get_preset_value(host);
2006                         ios->drv_type = (preset & SDHCI_PRESET_DRV_MASK)
2007                                 >> SDHCI_PRESET_DRV_SHIFT;
2008                 }
2009
2010                 /* Re-enable SD Clock */
2011                 host->ops->set_clock(host, host->clock);
2012         } else
2013                 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
2014
2015         /*
2016          * Some (ENE) controllers go apeshit on some ios operation,
2017          * signalling timeout and CRC errors even on CMD0. Resetting
2018          * it on each ios seems to solve the problem.
2019          */
2020         if (host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
2021                 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
2022 }
2023 EXPORT_SYMBOL_GPL(sdhci_set_ios);
2024
2025 static int sdhci_get_cd(struct mmc_host *mmc)
2026 {
2027         struct sdhci_host *host = mmc_priv(mmc);
2028         int gpio_cd = mmc_gpio_get_cd(mmc);
2029
2030         if (host->flags & SDHCI_DEVICE_DEAD)
2031                 return 0;
2032
2033         /* If nonremovable, assume that the card is always present. */
2034         if (!mmc_card_is_removable(host->mmc))
2035                 return 1;
2036
2037         /*
2038          * Try slot gpio detect, if defined it take precedence
2039          * over build in controller functionality
2040          */
2041         if (gpio_cd >= 0)
2042                 return !!gpio_cd;
2043
2044         /* If polling, assume that the card is always present. */
2045         if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
2046                 return 1;
2047
2048         /* Host native card detect */
2049         return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
2050 }
2051
2052 static int sdhci_check_ro(struct sdhci_host *host)
2053 {
2054         unsigned long flags;
2055         int is_readonly;
2056
2057         spin_lock_irqsave(&host->lock, flags);
2058
2059         if (host->flags & SDHCI_DEVICE_DEAD)
2060                 is_readonly = 0;
2061         else if (host->ops->get_ro)
2062                 is_readonly = host->ops->get_ro(host);
2063         else if (mmc_can_gpio_ro(host->mmc))
2064                 is_readonly = mmc_gpio_get_ro(host->mmc);
2065         else
2066                 is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE)
2067                                 & SDHCI_WRITE_PROTECT);
2068
2069         spin_unlock_irqrestore(&host->lock, flags);
2070
2071         /* This quirk needs to be replaced by a callback-function later */
2072         return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ?
2073                 !is_readonly : is_readonly;
2074 }
2075
2076 #define SAMPLE_COUNT    5
2077
2078 static int sdhci_get_ro(struct mmc_host *mmc)
2079 {
2080         struct sdhci_host *host = mmc_priv(mmc);
2081         int i, ro_count;
2082
2083         if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT))
2084                 return sdhci_check_ro(host);
2085
2086         ro_count = 0;
2087         for (i = 0; i < SAMPLE_COUNT; i++) {
2088                 if (sdhci_check_ro(host)) {
2089                         if (++ro_count > SAMPLE_COUNT / 2)
2090                                 return 1;
2091                 }
2092                 msleep(30);
2093         }
2094         return 0;
2095 }
2096
2097 static void sdhci_hw_reset(struct mmc_host *mmc)
2098 {
2099         struct sdhci_host *host = mmc_priv(mmc);
2100
2101         if (host->ops && host->ops->hw_reset)
2102                 host->ops->hw_reset(host);
2103 }
2104
2105 static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable)
2106 {
2107         if (!(host->flags & SDHCI_DEVICE_DEAD)) {
2108                 if (enable)
2109                         host->ier |= SDHCI_INT_CARD_INT;
2110                 else
2111                         host->ier &= ~SDHCI_INT_CARD_INT;
2112
2113                 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2114                 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2115         }
2116 }
2117
2118 void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
2119 {
2120         struct sdhci_host *host = mmc_priv(mmc);
2121         unsigned long flags;
2122
2123         if (enable)
2124                 pm_runtime_get_noresume(host->mmc->parent);
2125
2126         spin_lock_irqsave(&host->lock, flags);
2127         if (enable)
2128                 host->flags |= SDHCI_SDIO_IRQ_ENABLED;
2129         else
2130                 host->flags &= ~SDHCI_SDIO_IRQ_ENABLED;
2131
2132         sdhci_enable_sdio_irq_nolock(host, enable);
2133         spin_unlock_irqrestore(&host->lock, flags);
2134
2135         if (!enable)
2136                 pm_runtime_put_noidle(host->mmc->parent);
2137 }
2138 EXPORT_SYMBOL_GPL(sdhci_enable_sdio_irq);
2139
2140 int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
2141                                       struct mmc_ios *ios)
2142 {
2143         struct sdhci_host *host = mmc_priv(mmc);
2144         u16 ctrl;
2145         int ret;
2146
2147         /*
2148          * Signal Voltage Switching is only applicable for Host Controllers
2149          * v3.00 and above.
2150          */
2151         if (host->version < SDHCI_SPEC_300)
2152                 return 0;
2153
2154         ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2155
2156         switch (ios->signal_voltage) {
2157         case MMC_SIGNAL_VOLTAGE_330:
2158                 if (!(host->flags & SDHCI_SIGNALING_330))
2159                         return -EINVAL;
2160                 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */
2161                 ctrl &= ~SDHCI_CTRL_VDD_180;
2162                 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2163
2164                 if (!IS_ERR(mmc->supply.vqmmc)) {
2165                         ret = mmc_regulator_set_vqmmc(mmc, ios);
2166                         if (ret) {
2167                                 pr_warn("%s: Switching to 3.3V signalling voltage failed\n",
2168                                         mmc_hostname(mmc));
2169                                 return -EIO;
2170                         }
2171                 }
2172                 /* Wait for 5ms */
2173                 usleep_range(5000, 5500);
2174
2175                 /* 3.3V regulator output should be stable within 5 ms */
2176                 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2177                 if (!(ctrl & SDHCI_CTRL_VDD_180))
2178                         return 0;
2179
2180                 pr_warn("%s: 3.3V regulator output did not became stable\n",
2181                         mmc_hostname(mmc));
2182
2183                 return -EAGAIN;
2184         case MMC_SIGNAL_VOLTAGE_180:
2185                 if (!(host->flags & SDHCI_SIGNALING_180))
2186                         return -EINVAL;
2187                 if (!IS_ERR(mmc->supply.vqmmc)) {
2188                         ret = mmc_regulator_set_vqmmc(mmc, ios);
2189                         if (ret) {
2190                                 pr_warn("%s: Switching to 1.8V signalling voltage failed\n",
2191                                         mmc_hostname(mmc));
2192                                 return -EIO;
2193                         }
2194                 }
2195
2196                 /*
2197                  * Enable 1.8V Signal Enable in the Host Control2
2198                  * register
2199                  */
2200                 ctrl |= SDHCI_CTRL_VDD_180;
2201                 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2202
2203                 /* Some controller need to do more when switching */
2204                 if (host->ops->voltage_switch)
2205                         host->ops->voltage_switch(host);
2206
2207                 /* 1.8V regulator output should be stable within 5 ms */
2208                 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2209                 if (ctrl & SDHCI_CTRL_VDD_180)
2210                         return 0;
2211
2212                 pr_warn("%s: 1.8V regulator output did not became stable\n",
2213                         mmc_hostname(mmc));
2214
2215                 return -EAGAIN;
2216         case MMC_SIGNAL_VOLTAGE_120:
2217                 if (!(host->flags & SDHCI_SIGNALING_120))
2218                         return -EINVAL;
2219                 if (!IS_ERR(mmc->supply.vqmmc)) {
2220                         ret = mmc_regulator_set_vqmmc(mmc, ios);
2221                         if (ret) {
2222                                 pr_warn("%s: Switching to 1.2V signalling voltage failed\n",
2223                                         mmc_hostname(mmc));
2224                                 return -EIO;
2225                         }
2226                 }
2227                 return 0;
2228         default:
2229                 /* No signal voltage switch required */
2230                 return 0;
2231         }
2232 }
2233 EXPORT_SYMBOL_GPL(sdhci_start_signal_voltage_switch);
2234
2235 static int sdhci_card_busy(struct mmc_host *mmc)
2236 {
2237         struct sdhci_host *host = mmc_priv(mmc);
2238         u32 present_state;
2239
2240         /* Check whether DAT[0] is 0 */
2241         present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
2242
2243         return !(present_state & SDHCI_DATA_0_LVL_MASK);
2244 }
2245
2246 static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
2247 {
2248         struct sdhci_host *host = mmc_priv(mmc);
2249         unsigned long flags;
2250
2251         spin_lock_irqsave(&host->lock, flags);
2252         host->flags |= SDHCI_HS400_TUNING;
2253         spin_unlock_irqrestore(&host->lock, flags);
2254
2255         return 0;
2256 }
2257
2258 void sdhci_start_tuning(struct sdhci_host *host)
2259 {
2260         u16 ctrl;
2261
2262         ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2263         ctrl |= SDHCI_CTRL_EXEC_TUNING;
2264         if (host->quirks2 & SDHCI_QUIRK2_TUNING_WORK_AROUND)
2265                 ctrl |= SDHCI_CTRL_TUNED_CLK;
2266         sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2267
2268         /*
2269          * As per the Host Controller spec v3.00, tuning command
2270          * generates Buffer Read Ready interrupt, so enable that.
2271          *
2272          * Note: The spec clearly says that when tuning sequence
2273          * is being performed, the controller does not generate
2274          * interrupts other than Buffer Read Ready interrupt. But
2275          * to make sure we don't hit a controller bug, we _only_
2276          * enable Buffer Read Ready interrupt here.
2277          */
2278         sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE);
2279         sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE);
2280 }
2281 EXPORT_SYMBOL_GPL(sdhci_start_tuning);
2282
2283 void sdhci_end_tuning(struct sdhci_host *host)
2284 {
2285         sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2286         sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2287 }
2288 EXPORT_SYMBOL_GPL(sdhci_end_tuning);
2289
2290 void sdhci_reset_tuning(struct sdhci_host *host)
2291 {
2292         u16 ctrl;
2293
2294         ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2295         ctrl &= ~SDHCI_CTRL_TUNED_CLK;
2296         ctrl &= ~SDHCI_CTRL_EXEC_TUNING;
2297         sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2298 }
2299 EXPORT_SYMBOL_GPL(sdhci_reset_tuning);
2300
2301 static void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode)
2302 {
2303         sdhci_reset_tuning(host);
2304
2305         sdhci_do_reset(host, SDHCI_RESET_CMD);
2306         sdhci_do_reset(host, SDHCI_RESET_DATA);
2307
2308         sdhci_end_tuning(host);
2309
2310         mmc_abort_tuning(host->mmc, opcode);
2311 }
2312
2313 /*
2314  * We use sdhci_send_tuning() because mmc_send_tuning() is not a good fit. SDHCI
2315  * tuning command does not have a data payload (or rather the hardware does it
2316  * automatically) so mmc_send_tuning() will return -EIO. Also the tuning command
2317  * interrupt setup is different to other commands and there is no timeout
2318  * interrupt so special handling is needed.
2319  */
2320 void sdhci_send_tuning(struct sdhci_host *host, u32 opcode)
2321 {
2322         struct mmc_host *mmc = host->mmc;
2323         struct mmc_command cmd = {};
2324         struct mmc_request mrq = {};
2325         unsigned long flags;
2326         u32 b = host->sdma_boundary;
2327
2328         spin_lock_irqsave(&host->lock, flags);
2329
2330         cmd.opcode = opcode;
2331         cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
2332         cmd.mrq = &mrq;
2333
2334         mrq.cmd = &cmd;
2335         /*
2336          * In response to CMD19, the card sends 64 bytes of tuning
2337          * block to the Host Controller. So we set the block size
2338          * to 64 here.
2339          */
2340         if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200 &&
2341             mmc->ios.bus_width == MMC_BUS_WIDTH_8)
2342                 sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 128), SDHCI_BLOCK_SIZE);
2343         else
2344                 sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 64), SDHCI_BLOCK_SIZE);
2345
2346         /*
2347          * The tuning block is sent by the card to the host controller.
2348          * So we set the TRNS_READ bit in the Transfer Mode register.
2349          * This also takes care of setting DMA Enable and Multi Block
2350          * Select in the same register to 0.
2351          */
2352         sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE);
2353
2354         sdhci_send_command(host, &cmd);
2355
2356         host->cmd = NULL;
2357
2358         sdhci_del_timer(host, &mrq);
2359
2360         host->tuning_done = 0;
2361
2362         spin_unlock_irqrestore(&host->lock, flags);
2363
2364         /* Wait for Buffer Read Ready interrupt */
2365         wait_event_timeout(host->buf_ready_int, (host->tuning_done == 1),
2366                            msecs_to_jiffies(50));
2367
2368 }
2369 EXPORT_SYMBOL_GPL(sdhci_send_tuning);
2370
2371 static int __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
2372 {
2373         int i;
2374
2375         /*
2376          * Issue opcode repeatedly till Execute Tuning is set to 0 or the number
2377          * of loops reaches tuning loop count.
2378          */
2379         for (i = 0; i < host->tuning_loop_count; i++) {
2380                 u16 ctrl;
2381
2382                 sdhci_send_tuning(host, opcode);
2383
2384                 if (!host->tuning_done) {
2385                         pr_info("%s: Tuning timeout, falling back to fixed sampling clock\n",
2386                                 mmc_hostname(host->mmc));
2387                         sdhci_abort_tuning(host, opcode);
2388                         return -ETIMEDOUT;
2389                 }
2390
2391                 /* Spec does not require a delay between tuning cycles */
2392                 if (host->tuning_delay > 0)
2393                         mdelay(host->tuning_delay);
2394
2395                 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2396                 if (!(ctrl & SDHCI_CTRL_EXEC_TUNING)) {
2397                         if (ctrl & SDHCI_CTRL_TUNED_CLK)
2398                                 return 0; /* Success! */
2399                         break;
2400                 }
2401
2402         }
2403
2404         pr_info("%s: Tuning failed, falling back to fixed sampling clock\n",
2405                 mmc_hostname(host->mmc));
2406         sdhci_reset_tuning(host);
2407         return -EAGAIN;
2408 }
2409
2410 int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
2411 {
2412         struct sdhci_host *host = mmc_priv(mmc);
2413         int err = 0;
2414         unsigned int tuning_count = 0;
2415         bool hs400_tuning;
2416
2417         hs400_tuning = host->flags & SDHCI_HS400_TUNING;
2418
2419         if (host->tuning_mode == SDHCI_TUNING_MODE_1)
2420                 tuning_count = host->tuning_count;
2421
2422         /*
2423          * The Host Controller needs tuning in case of SDR104 and DDR50
2424          * mode, and for SDR50 mode when Use Tuning for SDR50 is set in
2425          * the Capabilities register.
2426          * If the Host Controller supports the HS200 mode then the
2427          * tuning function has to be executed.
2428          */
2429         switch (host->timing) {
2430         /* HS400 tuning is done in HS200 mode */
2431         case MMC_TIMING_MMC_HS400:
2432                 err = -EINVAL;
2433                 goto out;
2434
2435         case MMC_TIMING_MMC_HS200:
2436                 /*
2437                  * Periodic re-tuning for HS400 is not expected to be needed, so
2438                  * disable it here.
2439                  */
2440                 if (hs400_tuning)
2441                         tuning_count = 0;
2442                 break;
2443
2444         case MMC_TIMING_UHS_SDR104:
2445         case MMC_TIMING_UHS_DDR50:
2446                 break;
2447
2448         case MMC_TIMING_UHS_SDR50:
2449                 if (host->flags & SDHCI_SDR50_NEEDS_TUNING)
2450                         break;
2451                 /* FALLTHROUGH */
2452
2453         default:
2454                 goto out;
2455         }
2456
2457         if (host->ops->platform_execute_tuning) {
2458                 err = host->ops->platform_execute_tuning(host, opcode);
2459                 goto out;
2460         }
2461
2462         host->mmc->retune_period = tuning_count;
2463
2464         if (host->tuning_delay < 0)
2465                 host->tuning_delay = opcode == MMC_SEND_TUNING_BLOCK;
2466
2467         sdhci_start_tuning(host);
2468
2469         host->tuning_err = __sdhci_execute_tuning(host, opcode);
2470
2471         sdhci_end_tuning(host);
2472 out:
2473         host->flags &= ~SDHCI_HS400_TUNING;
2474
2475         return err;
2476 }
2477 EXPORT_SYMBOL_GPL(sdhci_execute_tuning);
2478
2479 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable)
2480 {
2481         /* Host Controller v3.00 defines preset value registers */
2482         if (host->version < SDHCI_SPEC_300)
2483                 return;
2484
2485         /*
2486          * We only enable or disable Preset Value if they are not already
2487          * enabled or disabled respectively. Otherwise, we bail out.
2488          */
2489         if (host->preset_enabled != enable) {
2490                 u16 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2491
2492                 if (enable)
2493                         ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE;
2494                 else
2495                         ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE;
2496
2497                 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2498
2499                 if (enable)
2500                         host->flags |= SDHCI_PV_ENABLED;
2501                 else
2502                         host->flags &= ~SDHCI_PV_ENABLED;
2503
2504                 host->preset_enabled = enable;
2505         }
2506 }
2507
2508 static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
2509                                 int err)
2510 {
2511         struct sdhci_host *host = mmc_priv(mmc);
2512         struct mmc_data *data = mrq->data;
2513
2514         if (data->host_cookie != COOKIE_UNMAPPED)
2515                 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
2516                              mmc_get_dma_dir(data));
2517
2518         data->host_cookie = COOKIE_UNMAPPED;
2519 }
2520
2521 static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq)
2522 {
2523         struct sdhci_host *host = mmc_priv(mmc);
2524
2525         mrq->data->host_cookie = COOKIE_UNMAPPED;
2526
2527         /*
2528          * No pre-mapping in the pre hook if we're using the bounce buffer,
2529          * for that we would need two bounce buffers since one buffer is
2530          * in flight when this is getting called.
2531          */
2532         if (host->flags & SDHCI_REQ_USE_DMA && !host->bounce_buffer)
2533                 sdhci_pre_dma_transfer(host, mrq->data, COOKIE_PRE_MAPPED);
2534 }
2535
2536 static void sdhci_error_out_mrqs(struct sdhci_host *host, int err)
2537 {
2538         if (host->data_cmd) {
2539                 host->data_cmd->error = err;
2540                 sdhci_finish_mrq(host, host->data_cmd->mrq);
2541         }
2542
2543         if (host->cmd) {
2544                 host->cmd->error = err;
2545                 sdhci_finish_mrq(host, host->cmd->mrq);
2546         }
2547 }
2548
2549 static void sdhci_card_event(struct mmc_host *mmc)
2550 {
2551         struct sdhci_host *host = mmc_priv(mmc);
2552         unsigned long flags;
2553         int present;
2554
2555         /* First check if client has provided their own card event */
2556         if (host->ops->card_event)
2557                 host->ops->card_event(host);
2558
2559         present = mmc->ops->get_cd(mmc);
2560
2561         spin_lock_irqsave(&host->lock, flags);
2562
2563         /* Check sdhci_has_requests() first in case we are runtime suspended */
2564         if (sdhci_has_requests(host) && !present) {
2565                 pr_err("%s: Card removed during transfer!\n",
2566                         mmc_hostname(host->mmc));
2567                 pr_err("%s: Resetting controller.\n",
2568                         mmc_hostname(host->mmc));
2569
2570                 sdhci_do_reset(host, SDHCI_RESET_CMD);
2571                 sdhci_do_reset(host, SDHCI_RESET_DATA);
2572
2573                 sdhci_error_out_mrqs(host, -ENOMEDIUM);
2574         }
2575
2576         spin_unlock_irqrestore(&host->lock, flags);
2577 }
2578
2579 static const struct mmc_host_ops sdhci_ops = {
2580         .request        = sdhci_request,
2581         .post_req       = sdhci_post_req,
2582         .pre_req        = sdhci_pre_req,
2583         .set_ios        = sdhci_set_ios,
2584         .get_cd         = sdhci_get_cd,
2585         .get_ro         = sdhci_get_ro,
2586         .hw_reset       = sdhci_hw_reset,
2587         .enable_sdio_irq = sdhci_enable_sdio_irq,
2588         .start_signal_voltage_switch    = sdhci_start_signal_voltage_switch,
2589         .prepare_hs400_tuning           = sdhci_prepare_hs400_tuning,
2590         .execute_tuning                 = sdhci_execute_tuning,
2591         .card_event                     = sdhci_card_event,
2592         .card_busy      = sdhci_card_busy,
2593 };
2594
2595 /*****************************************************************************\
2596  *                                                                           *
2597  * Request done                                                              *
2598  *                                                                           *
2599 \*****************************************************************************/
2600
2601 static bool sdhci_request_done(struct sdhci_host *host)
2602 {
2603         unsigned long flags;
2604         struct mmc_request *mrq;
2605         int i;
2606
2607         spin_lock_irqsave(&host->lock, flags);
2608
2609         for (i = 0; i < SDHCI_MAX_MRQS; i++) {
2610                 mrq = host->mrqs_done[i];
2611                 if (mrq)
2612                         break;
2613         }
2614
2615         if (!mrq) {
2616                 spin_unlock_irqrestore(&host->lock, flags);
2617                 return true;
2618         }
2619
2620         /*
2621          * Always unmap the data buffers if they were mapped by
2622          * sdhci_prepare_data() whenever we finish with a request.
2623          * This avoids leaking DMA mappings on error.
2624          */
2625         if (host->flags & SDHCI_REQ_USE_DMA) {
2626                 struct mmc_data *data = mrq->data;
2627
2628                 if (data && data->host_cookie == COOKIE_MAPPED) {
2629                         if (host->bounce_buffer) {
2630                                 /*
2631                                  * On reads, copy the bounced data into the
2632                                  * sglist
2633                                  */
2634                                 if (mmc_get_dma_dir(data) == DMA_FROM_DEVICE) {
2635                                         unsigned int length = data->bytes_xfered;
2636
2637                                         if (length > host->bounce_buffer_size) {
2638                                                 pr_err("%s: bounce buffer is %u bytes but DMA claims to have transferred %u bytes\n",
2639                                                        mmc_hostname(host->mmc),
2640                                                        host->bounce_buffer_size,
2641                                                        data->bytes_xfered);
2642                                                 /* Cap it down and continue */
2643                                                 length = host->bounce_buffer_size;
2644                                         }
2645                                         dma_sync_single_for_cpu(
2646                                                 host->mmc->parent,
2647                                                 host->bounce_addr,
2648                                                 host->bounce_buffer_size,
2649                                                 DMA_FROM_DEVICE);
2650                                         sg_copy_from_buffer(data->sg,
2651                                                 data->sg_len,
2652                                                 host->bounce_buffer,
2653                                                 length);
2654                                 } else {
2655                                         /* No copying, just switch ownership */
2656                                         dma_sync_single_for_cpu(
2657                                                 host->mmc->parent,
2658                                                 host->bounce_addr,
2659                                                 host->bounce_buffer_size,
2660                                                 mmc_get_dma_dir(data));
2661                                 }
2662                         } else {
2663                                 /* Unmap the raw data */
2664                                 dma_unmap_sg(mmc_dev(host->mmc), data->sg,
2665                                              data->sg_len,
2666                                              mmc_get_dma_dir(data));
2667                         }
2668                         data->host_cookie = COOKIE_UNMAPPED;
2669                 }
2670         }
2671
2672         /*
2673          * The controller needs a reset of internal state machines
2674          * upon error conditions.
2675          */
2676         if (sdhci_needs_reset(host, mrq)) {
2677                 /*
2678                  * Do not finish until command and data lines are available for
2679                  * reset. Note there can only be one other mrq, so it cannot
2680                  * also be in mrqs_done, otherwise host->cmd and host->data_cmd
2681                  * would both be null.
2682                  */
2683                 if (host->cmd || host->data_cmd) {
2684                         spin_unlock_irqrestore(&host->lock, flags);
2685                         return true;
2686                 }
2687
2688                 /* Some controllers need this kick or reset won't work here */
2689                 if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
2690                         /* This is to force an update */
2691                         host->ops->set_clock(host, host->clock);
2692
2693                 /* Spec says we should do both at the same time, but Ricoh
2694                    controllers do not like that. */
2695                 sdhci_do_reset(host, SDHCI_RESET_CMD);
2696                 sdhci_do_reset(host, SDHCI_RESET_DATA);
2697
2698                 host->pending_reset = false;
2699         }
2700
2701         host->mrqs_done[i] = NULL;
2702
2703         spin_unlock_irqrestore(&host->lock, flags);
2704
2705         mmc_request_done(host->mmc, mrq);
2706
2707         return false;
2708 }
2709
2710 static void sdhci_complete_work(struct work_struct *work)
2711 {
2712         struct sdhci_host *host = container_of(work, struct sdhci_host,
2713                                                complete_work);
2714
2715         while (!sdhci_request_done(host))
2716                 ;
2717 }
2718
2719 static void sdhci_timeout_timer(struct timer_list *t)
2720 {
2721         struct sdhci_host *host;
2722         unsigned long flags;
2723
2724         host = from_timer(host, t, timer);
2725
2726         spin_lock_irqsave(&host->lock, flags);
2727
2728         if (host->cmd && !sdhci_data_line_cmd(host->cmd)) {
2729                 pr_err("%s: Timeout waiting for hardware cmd interrupt.\n",
2730                        mmc_hostname(host->mmc));
2731                 sdhci_dumpregs(host);
2732
2733                 host->cmd->error = -ETIMEDOUT;
2734                 sdhci_finish_mrq(host, host->cmd->mrq);
2735         }
2736
2737         spin_unlock_irqrestore(&host->lock, flags);
2738 }
2739
2740 static void sdhci_timeout_data_timer(struct timer_list *t)
2741 {
2742         struct sdhci_host *host;
2743         unsigned long flags;
2744
2745         host = from_timer(host, t, data_timer);
2746
2747         spin_lock_irqsave(&host->lock, flags);
2748
2749         if (host->data || host->data_cmd ||
2750             (host->cmd && sdhci_data_line_cmd(host->cmd))) {
2751                 pr_err("%s: Timeout waiting for hardware interrupt.\n",
2752                        mmc_hostname(host->mmc));
2753                 sdhci_dumpregs(host);
2754
2755                 if (host->data) {
2756                         host->data->error = -ETIMEDOUT;
2757                         sdhci_finish_data(host);
2758                         queue_work(host->complete_wq, &host->complete_work);
2759                 } else if (host->data_cmd) {
2760                         host->data_cmd->error = -ETIMEDOUT;
2761                         sdhci_finish_mrq(host, host->data_cmd->mrq);
2762                 } else {
2763                         host->cmd->error = -ETIMEDOUT;
2764                         sdhci_finish_mrq(host, host->cmd->mrq);
2765                 }
2766         }
2767
2768         spin_unlock_irqrestore(&host->lock, flags);
2769 }
2770
2771 /*****************************************************************************\
2772  *                                                                           *
2773  * Interrupt handling                                                        *
2774  *                                                                           *
2775 \*****************************************************************************/
2776
2777 static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *intmask_p)
2778 {
2779         /* Handle auto-CMD12 error */
2780         if (intmask & SDHCI_INT_AUTO_CMD_ERR && host->data_cmd) {
2781                 struct mmc_request *mrq = host->data_cmd->mrq;
2782                 u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS);
2783                 int data_err_bit = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ?
2784                                    SDHCI_INT_DATA_TIMEOUT :
2785                                    SDHCI_INT_DATA_CRC;
2786
2787                 /* Treat auto-CMD12 error the same as data error */
2788                 if (!mrq->sbc && (host->flags & SDHCI_AUTO_CMD12)) {
2789                         *intmask_p |= data_err_bit;
2790                         return;
2791                 }
2792         }
2793
2794         if (!host->cmd) {
2795                 /*
2796                  * SDHCI recovers from errors by resetting the cmd and data
2797                  * circuits.  Until that is done, there very well might be more
2798                  * interrupts, so ignore them in that case.
2799                  */
2800                 if (host->pending_reset)
2801                         return;
2802                 pr_err("%s: Got command interrupt 0x%08x even though no command operation was in progress.\n",
2803                        mmc_hostname(host->mmc), (unsigned)intmask);
2804                 sdhci_dumpregs(host);
2805                 return;
2806         }
2807
2808         if (intmask & (SDHCI_INT_TIMEOUT | SDHCI_INT_CRC |
2809                        SDHCI_INT_END_BIT | SDHCI_INT_INDEX)) {
2810                 if (intmask & SDHCI_INT_TIMEOUT)
2811                         host->cmd->error = -ETIMEDOUT;
2812                 else
2813                         host->cmd->error = -EILSEQ;
2814
2815                 /* Treat data command CRC error the same as data CRC error */
2816                 if (host->cmd->data &&
2817                     (intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) ==
2818                      SDHCI_INT_CRC) {
2819                         host->cmd = NULL;
2820                         *intmask_p |= SDHCI_INT_DATA_CRC;
2821                         return;
2822                 }
2823
2824                 __sdhci_finish_mrq(host, host->cmd->mrq);
2825                 return;
2826         }
2827
2828         /* Handle auto-CMD23 error */
2829         if (intmask & SDHCI_INT_AUTO_CMD_ERR) {
2830                 struct mmc_request *mrq = host->cmd->mrq;
2831                 u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS);
2832                 int err = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ?
2833                           -ETIMEDOUT :
2834                           -EILSEQ;
2835
2836                 if (mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) {
2837                         mrq->sbc->error = err;
2838                         __sdhci_finish_mrq(host, mrq);
2839                         return;
2840                 }
2841         }
2842
2843         if (intmask & SDHCI_INT_RESPONSE)
2844                 sdhci_finish_command(host);
2845 }
2846
2847 static void sdhci_adma_show_error(struct sdhci_host *host)
2848 {
2849         void *desc = host->adma_table;
2850
2851         sdhci_dumpregs(host);
2852
2853         while (true) {
2854                 struct sdhci_adma2_64_desc *dma_desc = desc;
2855
2856                 if (host->flags & SDHCI_USE_64_BIT_DMA)
2857                         DBG("%p: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n",
2858                             desc, le32_to_cpu(dma_desc->addr_hi),
2859                             le32_to_cpu(dma_desc->addr_lo),
2860                             le16_to_cpu(dma_desc->len),
2861                             le16_to_cpu(dma_desc->cmd));
2862                 else
2863                         DBG("%p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
2864                             desc, le32_to_cpu(dma_desc->addr_lo),
2865                             le16_to_cpu(dma_desc->len),
2866                             le16_to_cpu(dma_desc->cmd));
2867
2868                 desc += host->desc_sz;
2869
2870                 if (dma_desc->cmd & cpu_to_le16(ADMA2_END))
2871                         break;
2872         }
2873 }
2874
2875 static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
2876 {
2877         u32 command;
2878
2879         /* CMD19 generates _only_ Buffer Read Ready interrupt */
2880         if (intmask & SDHCI_INT_DATA_AVAIL) {
2881                 command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND));
2882                 if (command == MMC_SEND_TUNING_BLOCK ||
2883                     command == MMC_SEND_TUNING_BLOCK_HS200) {
2884                         host->tuning_done = 1;
2885                         wake_up(&host->buf_ready_int);
2886                         return;
2887                 }
2888         }
2889
2890         if (!host->data) {
2891                 struct mmc_command *data_cmd = host->data_cmd;
2892
2893                 /*
2894                  * The "data complete" interrupt is also used to
2895                  * indicate that a busy state has ended. See comment
2896                  * above in sdhci_cmd_irq().
2897                  */
2898                 if (data_cmd && (data_cmd->flags & MMC_RSP_BUSY)) {
2899                         if (intmask & SDHCI_INT_DATA_TIMEOUT) {
2900                                 host->data_cmd = NULL;
2901                                 data_cmd->error = -ETIMEDOUT;
2902                                 __sdhci_finish_mrq(host, data_cmd->mrq);
2903                                 return;
2904                         }
2905                         if (intmask & SDHCI_INT_DATA_END) {
2906                                 host->data_cmd = NULL;
2907                                 /*
2908                                  * Some cards handle busy-end interrupt
2909                                  * before the command completed, so make
2910                                  * sure we do things in the proper order.
2911                                  */
2912                                 if (host->cmd == data_cmd)
2913                                         return;
2914
2915                                 __sdhci_finish_mrq(host, data_cmd->mrq);
2916                                 return;
2917                         }
2918                 }
2919
2920                 /*
2921                  * SDHCI recovers from errors by resetting the cmd and data
2922                  * circuits. Until that is done, there very well might be more
2923                  * interrupts, so ignore them in that case.
2924                  */
2925                 if (host->pending_reset)
2926                         return;
2927
2928                 pr_err("%s: Got data interrupt 0x%08x even though no data operation was in progress.\n",
2929                        mmc_hostname(host->mmc), (unsigned)intmask);
2930                 sdhci_dumpregs(host);
2931
2932                 return;
2933         }
2934
2935         if (intmask & SDHCI_INT_DATA_TIMEOUT)
2936                 host->data->error = -ETIMEDOUT;
2937         else if (intmask & SDHCI_INT_DATA_END_BIT)
2938                 host->data->error = -EILSEQ;
2939         else if ((intmask & SDHCI_INT_DATA_CRC) &&
2940                 SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
2941                         != MMC_BUS_TEST_R)
2942                 host->data->error = -EILSEQ;
2943         else if (intmask & SDHCI_INT_ADMA_ERROR) {
2944                 pr_err("%s: ADMA error\n", mmc_hostname(host->mmc));
2945                 sdhci_adma_show_error(host);
2946                 host->data->error = -EIO;
2947                 if (host->ops->adma_workaround)
2948                         host->ops->adma_workaround(host, intmask);
2949         }
2950
2951         if (host->data->error)
2952                 sdhci_finish_data(host);
2953         else {
2954                 if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
2955                         sdhci_transfer_pio(host);
2956
2957                 /*
2958                  * We currently don't do anything fancy with DMA
2959                  * boundaries, but as we can't disable the feature
2960                  * we need to at least restart the transfer.
2961                  *
2962                  * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS)
2963                  * should return a valid address to continue from, but as
2964                  * some controllers are faulty, don't trust them.
2965                  */
2966                 if (intmask & SDHCI_INT_DMA_END) {
2967                         dma_addr_t dmastart, dmanow;
2968
2969                         dmastart = sdhci_sdma_address(host);
2970                         dmanow = dmastart + host->data->bytes_xfered;
2971                         /*
2972                          * Force update to the next DMA block boundary.
2973                          */
2974                         dmanow = (dmanow &
2975                                 ~((dma_addr_t)SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
2976                                 SDHCI_DEFAULT_BOUNDARY_SIZE;
2977                         host->data->bytes_xfered = dmanow - dmastart;
2978                         DBG("DMA base %pad, transferred 0x%06x bytes, next %pad\n",
2979                             &dmastart, host->data->bytes_xfered, &dmanow);
2980                         sdhci_set_sdma_addr(host, dmanow);
2981                 }
2982
2983                 if (intmask & SDHCI_INT_DATA_END) {
2984                         if (host->cmd == host->data_cmd) {
2985                                 /*
2986                                  * Data managed to finish before the
2987                                  * command completed. Make sure we do
2988                                  * things in the proper order.
2989                                  */
2990                                 host->data_early = 1;
2991                         } else {
2992                                 sdhci_finish_data(host);
2993                         }
2994                 }
2995         }
2996 }
2997
2998 static inline bool sdhci_defer_done(struct sdhci_host *host,
2999                                     struct mmc_request *mrq)
3000 {
3001         struct mmc_data *data = mrq->data;
3002
3003         return host->pending_reset ||
3004                ((host->flags & SDHCI_REQ_USE_DMA) && data &&
3005                 data->host_cookie == COOKIE_MAPPED);
3006 }
3007
3008 static irqreturn_t sdhci_irq(int irq, void *dev_id)
3009 {
3010         struct mmc_request *mrqs_done[SDHCI_MAX_MRQS] = {0};
3011         irqreturn_t result = IRQ_NONE;
3012         struct sdhci_host *host = dev_id;
3013         u32 intmask, mask, unexpected = 0;
3014         int max_loops = 16;
3015         int i;
3016
3017         spin_lock(&host->lock);
3018
3019         if (host->runtime_suspended && !sdhci_sdio_irq_enabled(host)) {
3020                 spin_unlock(&host->lock);
3021                 return IRQ_NONE;
3022         }
3023
3024         intmask = sdhci_readl(host, SDHCI_INT_STATUS);
3025         if (!intmask || intmask == 0xffffffff) {
3026                 result = IRQ_NONE;
3027                 goto out;
3028         }
3029
3030         do {
3031                 DBG("IRQ status 0x%08x\n", intmask);
3032
3033                 if (host->ops->irq) {
3034                         intmask = host->ops->irq(host, intmask);
3035                         if (!intmask)
3036                                 goto cont;
3037                 }
3038
3039                 /* Clear selected interrupts. */
3040                 mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
3041                                   SDHCI_INT_BUS_POWER);
3042                 sdhci_writel(host, mask, SDHCI_INT_STATUS);
3043
3044                 if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
3045                         u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
3046                                       SDHCI_CARD_PRESENT;
3047
3048                         /*
3049                          * There is a observation on i.mx esdhc.  INSERT
3050                          * bit will be immediately set again when it gets
3051                          * cleared, if a card is inserted.  We have to mask
3052                          * the irq to prevent interrupt storm which will
3053                          * freeze the system.  And the REMOVE gets the
3054                          * same situation.
3055                          *
3056                          * More testing are needed here to ensure it works
3057                          * for other platforms though.
3058                          */
3059                         host->ier &= ~(SDHCI_INT_CARD_INSERT |
3060                                        SDHCI_INT_CARD_REMOVE);
3061                         host->ier |= present ? SDHCI_INT_CARD_REMOVE :
3062                                                SDHCI_INT_CARD_INSERT;
3063                         sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
3064                         sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
3065
3066                         sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT |
3067                                      SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
3068
3069                         host->thread_isr |= intmask & (SDHCI_INT_CARD_INSERT |
3070                                                        SDHCI_INT_CARD_REMOVE);
3071                         result = IRQ_WAKE_THREAD;
3072                 }
3073
3074                 if (intmask & SDHCI_INT_CMD_MASK)
3075                         sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK, &intmask);
3076
3077                 if (intmask & SDHCI_INT_DATA_MASK)
3078                         sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
3079
3080                 if (intmask & SDHCI_INT_BUS_POWER)
3081                         pr_err("%s: Card is consuming too much power!\n",
3082                                 mmc_hostname(host->mmc));
3083
3084                 if (intmask & SDHCI_INT_RETUNE)
3085                         mmc_retune_needed(host->mmc);
3086
3087                 if ((intmask & SDHCI_INT_CARD_INT) &&
3088                     (host->ier & SDHCI_INT_CARD_INT)) {
3089                         sdhci_enable_sdio_irq_nolock(host, false);
3090                         host->thread_isr |= SDHCI_INT_CARD_INT;
3091                         result = IRQ_WAKE_THREAD;
3092                 }
3093
3094                 intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE |
3095                              SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
3096                              SDHCI_INT_ERROR | SDHCI_INT_BUS_POWER |
3097                              SDHCI_INT_RETUNE | SDHCI_INT_CARD_INT);
3098
3099                 if (intmask) {
3100                         unexpected |= intmask;
3101                         sdhci_writel(host, intmask, SDHCI_INT_STATUS);
3102                 }
3103 cont:
3104                 if (result == IRQ_NONE)
3105                         result = IRQ_HANDLED;
3106
3107                 intmask = sdhci_readl(host, SDHCI_INT_STATUS);
3108         } while (intmask && --max_loops);
3109
3110         /* Determine if mrqs can be completed immediately */
3111         for (i = 0; i < SDHCI_MAX_MRQS; i++) {
3112                 struct mmc_request *mrq = host->mrqs_done[i];
3113
3114                 if (!mrq)
3115                         continue;
3116
3117                 if (sdhci_defer_done(host, mrq)) {
3118                         result = IRQ_WAKE_THREAD;
3119                 } else {
3120                         mrqs_done[i] = mrq;
3121                         host->mrqs_done[i] = NULL;
3122                 }
3123         }
3124 out:
3125         spin_unlock(&host->lock);
3126
3127         /* Process mrqs ready for immediate completion */
3128         for (i = 0; i < SDHCI_MAX_MRQS; i++) {
3129                 if (mrqs_done[i])
3130                         mmc_request_done(host->mmc, mrqs_done[i]);
3131         }
3132
3133         if (unexpected) {
3134                 pr_err("%s: Unexpected interrupt 0x%08x.\n",
3135                            mmc_hostname(host->mmc), unexpected);
3136                 sdhci_dumpregs(host);
3137         }
3138
3139         return result;
3140 }
3141
3142 static irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
3143 {
3144         struct sdhci_host *host = dev_id;
3145         unsigned long flags;
3146         u32 isr;
3147
3148         while (!sdhci_request_done(host))
3149                 ;
3150
3151         spin_lock_irqsave(&host->lock, flags);
3152         isr = host->thread_isr;
3153         host->thread_isr = 0;
3154         spin_unlock_irqrestore(&host->lock, flags);
3155
3156         if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
3157                 struct mmc_host *mmc = host->mmc;
3158
3159                 mmc->ops->card_event(mmc);
3160                 mmc_detect_change(mmc, msecs_to_jiffies(200));
3161         }
3162
3163         if (isr & SDHCI_INT_CARD_INT) {
3164                 sdio_run_irqs(host->mmc);
3165
3166                 spin_lock_irqsave(&host->lock, flags);
3167                 if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
3168                         sdhci_enable_sdio_irq_nolock(host, true);
3169                 spin_unlock_irqrestore(&host->lock, flags);
3170         }
3171
3172         return IRQ_HANDLED;
3173 }
3174
3175 /*****************************************************************************\
3176  *                                                                           *
3177  * Suspend/resume                                                            *
3178  *                                                                           *
3179 \*****************************************************************************/
3180
3181 #ifdef CONFIG_PM
3182
3183 static bool sdhci_cd_irq_can_wakeup(struct sdhci_host *host)
3184 {
3185         return mmc_card_is_removable(host->mmc) &&
3186                !(host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
3187                !mmc_can_gpio_cd(host->mmc);
3188 }
3189
3190 /*
3191  * To enable wakeup events, the corresponding events have to be enabled in
3192  * the Interrupt Status Enable register too. See 'Table 1-6: Wakeup Signal
3193  * Table' in the SD Host Controller Standard Specification.
3194  * It is useless to restore SDHCI_INT_ENABLE state in
3195  * sdhci_disable_irq_wakeups() since it will be set by
3196  * sdhci_enable_card_detection() or sdhci_init().
3197  */
3198 static bool sdhci_enable_irq_wakeups(struct sdhci_host *host)
3199 {
3200         u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE |
3201                   SDHCI_WAKE_ON_INT;
3202         u32 irq_val = 0;
3203         u8 wake_val = 0;
3204         u8 val;
3205
3206         if (sdhci_cd_irq_can_wakeup(host)) {
3207                 wake_val |= SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE;
3208                 irq_val |= SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE;
3209         }
3210
3211         if (mmc_card_wake_sdio_irq(host->mmc)) {
3212                 wake_val |= SDHCI_WAKE_ON_INT;
3213                 irq_val |= SDHCI_INT_CARD_INT;
3214         }
3215
3216         if (!irq_val)
3217                 return false;
3218
3219         val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
3220         val &= ~mask;
3221         val |= wake_val;
3222         sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
3223
3224         sdhci_writel(host, irq_val, SDHCI_INT_ENABLE);
3225
3226         host->irq_wake_enabled = !enable_irq_wake(host->irq);
3227
3228         return host->irq_wake_enabled;
3229 }
3230
3231 static void sdhci_disable_irq_wakeups(struct sdhci_host *host)
3232 {
3233         u8 val;
3234         u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
3235                         | SDHCI_WAKE_ON_INT;
3236
3237         val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
3238         val &= ~mask;
3239         sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
3240
3241         disable_irq_wake(host->irq);
3242
3243         host->irq_wake_enabled = false;
3244 }
3245
3246 int sdhci_suspend_host(struct sdhci_host *host)
3247 {
3248         sdhci_disable_card_detection(host);
3249
3250         mmc_retune_timer_stop(host->mmc);
3251
3252         if (!device_may_wakeup(mmc_dev(host->mmc)) ||
3253             !sdhci_enable_irq_wakeups(host)) {
3254                 host->ier = 0;
3255                 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
3256                 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
3257                 free_irq(host->irq, host);
3258         }
3259
3260         return 0;
3261 }
3262
3263 EXPORT_SYMBOL_GPL(sdhci_suspend_host);
3264
3265 int sdhci_resume_host(struct sdhci_host *host)
3266 {
3267         struct mmc_host *mmc = host->mmc;
3268         int ret = 0;
3269
3270         if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
3271                 if (host->ops->enable_dma)
3272                         host->ops->enable_dma(host);
3273         }
3274
3275         if ((host->mmc->pm_flags & MMC_PM_KEEP_POWER) &&
3276             (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) {
3277                 /* Card keeps power but host controller does not */
3278                 sdhci_init(host, 0);
3279                 host->pwr = 0;
3280                 host->clock = 0;
3281                 mmc->ops->set_ios(mmc, &mmc->ios);
3282         } else {
3283                 sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER));
3284         }
3285
3286         if (host->irq_wake_enabled) {
3287                 sdhci_disable_irq_wakeups(host);
3288         } else {
3289                 ret = request_threaded_irq(host->irq, sdhci_irq,
3290                                            sdhci_thread_irq, IRQF_SHARED,
3291                                            mmc_hostname(host->mmc), host);
3292                 if (ret)
3293                         return ret;
3294         }
3295
3296         sdhci_enable_card_detection(host);
3297
3298         return ret;
3299 }
3300
3301 EXPORT_SYMBOL_GPL(sdhci_resume_host);
3302
3303 int sdhci_runtime_suspend_host(struct sdhci_host *host)
3304 {
3305         unsigned long flags;
3306
3307         mmc_retune_timer_stop(host->mmc);
3308
3309         spin_lock_irqsave(&host->lock, flags);
3310         host->ier &= SDHCI_INT_CARD_INT;
3311         sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
3312         sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
3313         spin_unlock_irqrestore(&host->lock, flags);
3314
3315         synchronize_hardirq(host->irq);
3316
3317         spin_lock_irqsave(&host->lock, flags);
3318         host->runtime_suspended = true;
3319         spin_unlock_irqrestore(&host->lock, flags);
3320
3321         return 0;
3322 }
3323 EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host);
3324
3325 int sdhci_runtime_resume_host(struct sdhci_host *host)
3326 {
3327         struct mmc_host *mmc = host->mmc;
3328         unsigned long flags;
3329         int host_flags = host->flags;
3330
3331         if (host_flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
3332                 if (host->ops->enable_dma)
3333                         host->ops->enable_dma(host);
3334         }
3335
3336         sdhci_init(host, 0);
3337
3338         if (mmc->ios.power_mode != MMC_POWER_UNDEFINED &&
3339             mmc->ios.power_mode != MMC_POWER_OFF) {
3340                 /* Force clock and power re-program */
3341                 host->pwr = 0;
3342                 host->clock = 0;
3343                 mmc->ops->start_signal_voltage_switch(mmc, &mmc->ios);
3344                 mmc->ops->set_ios(mmc, &mmc->ios);
3345
3346                 if ((host_flags & SDHCI_PV_ENABLED) &&
3347                     !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) {
3348                         spin_lock_irqsave(&host->lock, flags);
3349                         sdhci_enable_preset_value(host, true);
3350                         spin_unlock_irqrestore(&host->lock, flags);
3351                 }
3352
3353                 if ((mmc->caps2 & MMC_CAP2_HS400_ES) &&
3354                     mmc->ops->hs400_enhanced_strobe)
3355                         mmc->ops->hs400_enhanced_strobe(mmc, &mmc->ios);
3356         }
3357
3358         spin_lock_irqsave(&host->lock, flags);
3359
3360         host->runtime_suspended = false;
3361
3362         /* Enable SDIO IRQ */
3363         if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
3364                 sdhci_enable_sdio_irq_nolock(host, true);
3365
3366         /* Enable Card Detection */
3367         sdhci_enable_card_detection(host);
3368
3369         spin_unlock_irqrestore(&host->lock, flags);
3370
3371         return 0;
3372 }
3373 EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host);
3374
3375 #endif /* CONFIG_PM */
3376
3377 /*****************************************************************************\
3378  *                                                                           *
3379  * Command Queue Engine (CQE) helpers                                        *
3380  *                                                                           *
3381 \*****************************************************************************/
3382
3383 void sdhci_cqe_enable(struct mmc_host *mmc)
3384 {
3385         struct sdhci_host *host = mmc_priv(mmc);
3386         unsigned long flags;
3387         u8 ctrl;
3388
3389         spin_lock_irqsave(&host->lock, flags);
3390
3391         ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
3392         ctrl &= ~SDHCI_CTRL_DMA_MASK;
3393         /*
3394          * Host from V4.10 supports ADMA3 DMA type.
3395          * ADMA3 performs integrated descriptor which is more suitable
3396          * for cmd queuing to fetch both command and transfer descriptors.
3397          */
3398         if (host->v4_mode && (host->caps1 & SDHCI_CAN_DO_ADMA3))
3399                 ctrl |= SDHCI_CTRL_ADMA3;
3400         else if (host->flags & SDHCI_USE_64_BIT_DMA)
3401                 ctrl |= SDHCI_CTRL_ADMA64;
3402         else
3403                 ctrl |= SDHCI_CTRL_ADMA32;
3404         sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
3405
3406         sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, 512),
3407                      SDHCI_BLOCK_SIZE);
3408
3409         /* Set maximum timeout */
3410         sdhci_set_timeout(host, NULL);
3411
3412         host->ier = host->cqe_ier;
3413
3414         sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
3415         sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
3416
3417         host->cqe_on = true;
3418
3419         pr_debug("%s: sdhci: CQE on, IRQ mask %#x, IRQ status %#x\n",
3420                  mmc_hostname(mmc), host->ier,
3421                  sdhci_readl(host, SDHCI_INT_STATUS));
3422
3423         spin_unlock_irqrestore(&host->lock, flags);
3424 }
3425 EXPORT_SYMBOL_GPL(sdhci_cqe_enable);
3426
3427 void sdhci_cqe_disable(struct mmc_host *mmc, bool recovery)
3428 {
3429         struct sdhci_host *host = mmc_priv(mmc);
3430         unsigned long flags;
3431
3432         spin_lock_irqsave(&host->lock, flags);
3433
3434         sdhci_set_default_irqs(host);
3435
3436         host->cqe_on = false;
3437
3438         if (recovery) {
3439                 sdhci_do_reset(host, SDHCI_RESET_CMD);
3440                 sdhci_do_reset(host, SDHCI_RESET_DATA);
3441         }
3442
3443         pr_debug("%s: sdhci: CQE off, IRQ mask %#x, IRQ status %#x\n",
3444                  mmc_hostname(mmc), host->ier,
3445                  sdhci_readl(host, SDHCI_INT_STATUS));
3446
3447         spin_unlock_irqrestore(&host->lock, flags);
3448 }
3449 EXPORT_SYMBOL_GPL(sdhci_cqe_disable);
3450
3451 bool sdhci_cqe_irq(struct sdhci_host *host, u32 intmask, int *cmd_error,
3452                    int *data_error)
3453 {
3454         u32 mask;
3455
3456         if (!host->cqe_on)
3457                 return false;
3458
3459         if (intmask & (SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC))
3460                 *cmd_error = -EILSEQ;
3461         else if (intmask & SDHCI_INT_TIMEOUT)
3462                 *cmd_error = -ETIMEDOUT;
3463         else
3464                 *cmd_error = 0;
3465
3466         if (intmask & (SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC))
3467                 *data_error = -EILSEQ;
3468         else if (intmask & SDHCI_INT_DATA_TIMEOUT)
3469                 *data_error = -ETIMEDOUT;
3470         else if (intmask & SDHCI_INT_ADMA_ERROR)
3471                 *data_error = -EIO;
3472         else
3473                 *data_error = 0;
3474
3475         /* Clear selected interrupts. */
3476         mask = intmask & host->cqe_ier;
3477         sdhci_writel(host, mask, SDHCI_INT_STATUS);
3478
3479         if (intmask & SDHCI_INT_BUS_POWER)
3480                 pr_err("%s: Card is consuming too much power!\n",
3481                        mmc_hostname(host->mmc));
3482
3483         intmask &= ~(host->cqe_ier | SDHCI_INT_ERROR);
3484         if (intmask) {
3485                 sdhci_writel(host, intmask, SDHCI_INT_STATUS);
3486                 pr_err("%s: CQE: Unexpected interrupt 0x%08x.\n",
3487                        mmc_hostname(host->mmc), intmask);
3488                 sdhci_dumpregs(host);
3489         }
3490
3491         return true;
3492 }
3493 EXPORT_SYMBOL_GPL(sdhci_cqe_irq);
3494
3495 /*****************************************************************************\
3496  *                                                                           *
3497  * Device allocation/registration                                            *
3498  *                                                                           *
3499 \*****************************************************************************/
3500
3501 struct sdhci_host *sdhci_alloc_host(struct device *dev,
3502         size_t priv_size)
3503 {
3504         struct mmc_host *mmc;
3505         struct sdhci_host *host;
3506
3507         WARN_ON(dev == NULL);
3508
3509         mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev);
3510         if (!mmc)
3511                 return ERR_PTR(-ENOMEM);
3512
3513         host = mmc_priv(mmc);
3514         host->mmc = mmc;
3515         host->mmc_host_ops = sdhci_ops;
3516         mmc->ops = &host->mmc_host_ops;
3517
3518         host->flags = SDHCI_SIGNALING_330;
3519
3520         host->cqe_ier     = SDHCI_CQE_INT_MASK;
3521         host->cqe_err_ier = SDHCI_CQE_INT_ERR_MASK;
3522
3523         host->tuning_delay = -1;
3524         host->tuning_loop_count = MAX_TUNING_LOOP;
3525
3526         host->sdma_boundary = SDHCI_DEFAULT_BOUNDARY_ARG;
3527
3528         /*
3529          * The DMA table descriptor count is calculated as the maximum
3530          * number of segments times 2, to allow for an alignment
3531          * descriptor for each segment, plus 1 for a nop end descriptor.
3532          */
3533         host->adma_table_cnt = SDHCI_MAX_SEGS * 2 + 1;
3534
3535         return host;
3536 }
3537
3538 EXPORT_SYMBOL_GPL(sdhci_alloc_host);
3539
3540 static int sdhci_set_dma_mask(struct sdhci_host *host)
3541 {
3542         struct mmc_host *mmc = host->mmc;
3543         struct device *dev = mmc_dev(mmc);
3544         int ret = -EINVAL;
3545
3546         if (host->quirks2 & SDHCI_QUIRK2_BROKEN_64_BIT_DMA)
3547                 host->flags &= ~SDHCI_USE_64_BIT_DMA;
3548
3549         /* Try 64-bit mask if hardware is capable  of it */
3550         if (host->flags & SDHCI_USE_64_BIT_DMA) {
3551                 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
3552                 if (ret) {
3553                         pr_warn("%s: Failed to set 64-bit DMA mask.\n",
3554                                 mmc_hostname(mmc));
3555                         host->flags &= ~SDHCI_USE_64_BIT_DMA;
3556                 }
3557         }
3558
3559         /* 32-bit mask as default & fallback */
3560         if (ret) {
3561                 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
3562                 if (ret)
3563                         pr_warn("%s: Failed to set 32-bit DMA mask.\n",
3564                                 mmc_hostname(mmc));
3565         }
3566
3567         return ret;
3568 }
3569
3570 void __sdhci_read_caps(struct sdhci_host *host, u16 *ver, u32 *caps, u32 *caps1)
3571 {
3572         u16 v;
3573         u64 dt_caps_mask = 0;
3574         u64 dt_caps = 0;
3575
3576         if (host->read_caps)
3577                 return;
3578
3579         host->read_caps = true;
3580
3581         if (debug_quirks)
3582                 host->quirks = debug_quirks;
3583
3584         if (debug_quirks2)
3585                 host->quirks2 = debug_quirks2;
3586
3587         sdhci_do_reset(host, SDHCI_RESET_ALL);
3588
3589         if (host->v4_mode)
3590                 sdhci_do_enable_v4_mode(host);
3591
3592         of_property_read_u64(mmc_dev(host->mmc)->of_node,
3593                              "sdhci-caps-mask", &dt_caps_mask);
3594         of_property_read_u64(mmc_dev(host->mmc)->of_node,
3595                              "sdhci-caps", &dt_caps);
3596
3597         v = ver ? *ver : sdhci_readw(host, SDHCI_HOST_VERSION);
3598         host->version = (v & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT;
3599
3600         if (host->quirks & SDHCI_QUIRK_MISSING_CAPS)
3601                 return;
3602
3603         if (caps) {
3604                 host->caps = *caps;
3605         } else {
3606                 host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
3607                 host->caps &= ~lower_32_bits(dt_caps_mask);
3608                 host->caps |= lower_32_bits(dt_caps);
3609         }
3610
3611         if (host->version < SDHCI_SPEC_300)
3612                 return;
3613
3614         if (caps1) {
3615                 host->caps1 = *caps1;
3616         } else {
3617                 host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
3618                 host->caps1 &= ~upper_32_bits(dt_caps_mask);
3619                 host->caps1 |= upper_32_bits(dt_caps);
3620         }
3621 }
3622 EXPORT_SYMBOL_GPL(__sdhci_read_caps);
3623
3624 static void sdhci_allocate_bounce_buffer(struct sdhci_host *host)
3625 {
3626         struct mmc_host *mmc = host->mmc;
3627         unsigned int max_blocks;
3628         unsigned int bounce_size;
3629         int ret;
3630
3631         /*
3632          * Cap the bounce buffer at 64KB. Using a bigger bounce buffer
3633          * has diminishing returns, this is probably because SD/MMC
3634          * cards are usually optimized to handle this size of requests.
3635          */
3636         bounce_size = SZ_64K;
3637         /*
3638          * Adjust downwards to maximum request size if this is less
3639          * than our segment size, else hammer down the maximum
3640          * request size to the maximum buffer size.
3641          */
3642         if (mmc->max_req_size < bounce_size)
3643                 bounce_size = mmc->max_req_size;
3644         max_blocks = bounce_size / 512;
3645
3646         /*
3647          * When we just support one segment, we can get significant
3648          * speedups by the help of a bounce buffer to group scattered
3649          * reads/writes together.
3650          */
3651         host->bounce_buffer = devm_kmalloc(mmc->parent,
3652                                            bounce_size,
3653                                            GFP_KERNEL);
3654         if (!host->bounce_buffer) {
3655                 pr_err("%s: failed to allocate %u bytes for bounce buffer, falling back to single segments\n",
3656                        mmc_hostname(mmc),
3657                        bounce_size);
3658                 /*
3659                  * Exiting with zero here makes sure we proceed with
3660                  * mmc->max_segs == 1.
3661                  */
3662                 return;
3663         }
3664
3665         host->bounce_addr = dma_map_single(mmc->parent,
3666                                            host->bounce_buffer,
3667                                            bounce_size,
3668                                            DMA_BIDIRECTIONAL);
3669         ret = dma_mapping_error(mmc->parent, host->bounce_addr);
3670         if (ret)
3671                 /* Again fall back to max_segs == 1 */
3672                 return;
3673         host->bounce_buffer_size = bounce_size;
3674
3675         /* Lie about this since we're bouncing */
3676         mmc->max_segs = max_blocks;
3677         mmc->max_seg_size = bounce_size;
3678         mmc->max_req_size = bounce_size;
3679
3680         pr_info("%s bounce up to %u segments into one, max segment size %u bytes\n",
3681                 mmc_hostname(mmc), max_blocks, bounce_size);
3682 }
3683
3684 static inline bool sdhci_can_64bit_dma(struct sdhci_host *host)
3685 {
3686         /*
3687          * According to SD Host Controller spec v4.10, bit[27] added from
3688          * version 4.10 in Capabilities Register is used as 64-bit System
3689          * Address support for V4 mode.
3690          */
3691         if (host->version >= SDHCI_SPEC_410 && host->v4_mode)
3692                 return host->caps & SDHCI_CAN_64BIT_V4;
3693
3694         return host->caps & SDHCI_CAN_64BIT;
3695 }
3696
3697 int sdhci_setup_host(struct sdhci_host *host)
3698 {
3699         struct mmc_host *mmc;
3700         u32 max_current_caps;
3701         unsigned int ocr_avail;
3702         unsigned int override_timeout_clk;
3703         u32 max_clk;
3704         int ret;
3705
3706         WARN_ON(host == NULL);
3707         if (host == NULL)
3708                 return -EINVAL;
3709
3710         mmc = host->mmc;
3711
3712         /*
3713          * If there are external regulators, get them. Note this must be done
3714          * early before resetting the host and reading the capabilities so that
3715          * the host can take the appropriate action if regulators are not
3716          * available.
3717          */
3718         ret = mmc_regulator_get_supply(mmc);
3719         if (ret)
3720                 return ret;
3721
3722         DBG("Version:   0x%08x | Present:  0x%08x\n",
3723             sdhci_readw(host, SDHCI_HOST_VERSION),
3724             sdhci_readl(host, SDHCI_PRESENT_STATE));
3725         DBG("Caps:      0x%08x | Caps_1:   0x%08x\n",
3726             sdhci_readl(host, SDHCI_CAPABILITIES),
3727             sdhci_readl(host, SDHCI_CAPABILITIES_1));
3728
3729         sdhci_read_caps(host);
3730
3731         override_timeout_clk = host->timeout_clk;
3732
3733         if (host->version > SDHCI_SPEC_420) {
3734                 pr_err("%s: Unknown controller version (%d). You may experience problems.\n",
3735                        mmc_hostname(mmc), host->version);
3736         }
3737
3738         if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
3739                 host->flags |= SDHCI_USE_SDMA;
3740         else if (!(host->caps & SDHCI_CAN_DO_SDMA))
3741                 DBG("Controller doesn't have SDMA capability\n");
3742         else
3743                 host->flags |= SDHCI_USE_SDMA;
3744
3745         if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) &&
3746                 (host->flags & SDHCI_USE_SDMA)) {
3747                 DBG("Disabling DMA as it is marked broken\n");
3748                 host->flags &= ~SDHCI_USE_SDMA;
3749         }
3750
3751         if ((host->version >= SDHCI_SPEC_200) &&
3752                 (host->caps & SDHCI_CAN_DO_ADMA2))
3753                 host->flags |= SDHCI_USE_ADMA;
3754
3755         if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) &&
3756                 (host->flags & SDHCI_USE_ADMA)) {
3757                 DBG("Disabling ADMA as it is marked broken\n");
3758                 host->flags &= ~SDHCI_USE_ADMA;
3759         }
3760
3761         /*
3762          * It is assumed that a 64-bit capable device has set a 64-bit DMA mask
3763          * and *must* do 64-bit DMA.  A driver has the opportunity to change
3764          * that during the first call to ->enable_dma().  Similarly
3765          * SDHCI_QUIRK2_BROKEN_64_BIT_DMA must be left to the drivers to
3766          * implement.
3767          */
3768         if (sdhci_can_64bit_dma(host))
3769                 host->flags |= SDHCI_USE_64_BIT_DMA;
3770
3771         if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
3772                 ret = sdhci_set_dma_mask(host);
3773
3774                 if (!ret && host->ops->enable_dma)
3775                         ret = host->ops->enable_dma(host);
3776
3777                 if (ret) {
3778                         pr_warn("%s: No suitable DMA available - falling back to PIO\n",
3779                                 mmc_hostname(mmc));
3780                         host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
3781
3782                         ret = 0;
3783                 }
3784         }
3785
3786         /* SDMA does not support 64-bit DMA if v4 mode not set */
3787         if ((host->flags & SDHCI_USE_64_BIT_DMA) && !host->v4_mode)
3788                 host->flags &= ~SDHCI_USE_SDMA;
3789
3790         if (host->flags & SDHCI_USE_ADMA) {
3791                 dma_addr_t dma;
3792                 void *buf;
3793
3794                 if (host->flags & SDHCI_USE_64_BIT_DMA) {
3795                         host->adma_table_sz = host->adma_table_cnt *
3796                                               SDHCI_ADMA2_64_DESC_SZ(host);
3797                         host->desc_sz = SDHCI_ADMA2_64_DESC_SZ(host);
3798                 } else {
3799                         host->adma_table_sz = host->adma_table_cnt *
3800                                               SDHCI_ADMA2_32_DESC_SZ;
3801                         host->desc_sz = SDHCI_ADMA2_32_DESC_SZ;
3802                 }
3803
3804                 host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN;
3805                 /*
3806                  * Use zalloc to zero the reserved high 32-bits of 128-bit
3807                  * descriptors so that they never need to be written.
3808                  */
3809                 buf = dma_alloc_coherent(mmc_dev(mmc),
3810                                          host->align_buffer_sz + host->adma_table_sz,
3811                                          &dma, GFP_KERNEL);
3812                 if (!buf) {
3813                         pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
3814                                 mmc_hostname(mmc));
3815                         host->flags &= ~SDHCI_USE_ADMA;
3816                 } else if ((dma + host->align_buffer_sz) &
3817                            (SDHCI_ADMA2_DESC_ALIGN - 1)) {
3818                         pr_warn("%s: unable to allocate aligned ADMA descriptor\n",
3819                                 mmc_hostname(mmc));
3820                         host->flags &= ~SDHCI_USE_ADMA;
3821                         dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
3822                                           host->adma_table_sz, buf, dma);
3823                 } else {
3824                         host->align_buffer = buf;
3825                         host->align_addr = dma;
3826
3827                         host->adma_table = buf + host->align_buffer_sz;
3828                         host->adma_addr = dma + host->align_buffer_sz;
3829                 }
3830         }
3831
3832         /*
3833          * If we use DMA, then it's up to the caller to set the DMA
3834          * mask, but PIO does not need the hw shim so we set a new
3835          * mask here in that case.
3836          */
3837         if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) {
3838                 host->dma_mask = DMA_BIT_MASK(64);
3839                 mmc_dev(mmc)->dma_mask = &host->dma_mask;
3840         }
3841
3842         if (host->version >= SDHCI_SPEC_300)
3843                 host->max_clk = (host->caps & SDHCI_CLOCK_V3_BASE_MASK)
3844                         >> SDHCI_CLOCK_BASE_SHIFT;
3845         else
3846                 host->max_clk = (host->caps & SDHCI_CLOCK_BASE_MASK)
3847                         >> SDHCI_CLOCK_BASE_SHIFT;
3848
3849         host->max_clk *= 1000000;
3850         if (host->max_clk == 0 || host->quirks &
3851                         SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) {
3852                 if (!host->ops->get_max_clock) {
3853                         pr_err("%s: Hardware doesn't specify base clock frequency.\n",
3854                                mmc_hostname(mmc));
3855                         ret = -ENODEV;
3856                         goto undma;
3857                 }
3858                 host->max_clk = host->ops->get_max_clock(host);
3859         }
3860
3861         /*
3862          * In case of Host Controller v3.00, find out whether clock
3863          * multiplier is supported.
3864          */
3865         host->clk_mul = (host->caps1 & SDHCI_CLOCK_MUL_MASK) >>
3866                         SDHCI_CLOCK_MUL_SHIFT;
3867
3868         /*
3869          * In case the value in Clock Multiplier is 0, then programmable
3870          * clock mode is not supported, otherwise the actual clock
3871          * multiplier is one more than the value of Clock Multiplier
3872          * in the Capabilities Register.
3873          */
3874         if (host->clk_mul)
3875                 host->clk_mul += 1;
3876
3877         /*
3878          * Set host parameters.
3879          */
3880         max_clk = host->max_clk;
3881
3882         if (host->ops->get_min_clock)
3883                 mmc->f_min = host->ops->get_min_clock(host);
3884         else if (host->version >= SDHCI_SPEC_300) {
3885                 if (host->clk_mul) {
3886                         mmc->f_min = (host->max_clk * host->clk_mul) / 1024;
3887                         max_clk = host->max_clk * host->clk_mul;
3888                 } else
3889                         mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
3890         } else
3891                 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
3892
3893         if (!mmc->f_max || mmc->f_max > max_clk)
3894                 mmc->f_max = max_clk;
3895
3896         if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
3897                 host->timeout_clk = (host->caps & SDHCI_TIMEOUT_CLK_MASK) >>
3898                                         SDHCI_TIMEOUT_CLK_SHIFT;
3899
3900                 if (host->caps & SDHCI_TIMEOUT_CLK_UNIT)
3901                         host->timeout_clk *= 1000;
3902
3903                 if (host->timeout_clk == 0) {
3904                         if (!host->ops->get_timeout_clock) {
3905                                 pr_err("%s: Hardware doesn't specify timeout clock frequency.\n",
3906                                         mmc_hostname(mmc));
3907                                 ret = -ENODEV;
3908                                 goto undma;
3909                         }
3910
3911                         host->timeout_clk =
3912                                 DIV_ROUND_UP(host->ops->get_timeout_clock(host),
3913                                              1000);
3914                 }
3915
3916                 if (override_timeout_clk)
3917                         host->timeout_clk = override_timeout_clk;
3918
3919                 mmc->max_busy_timeout = host->ops->get_max_timeout_count ?
3920                         host->ops->get_max_timeout_count(host) : 1 << 27;
3921                 mmc->max_busy_timeout /= host->timeout_clk;
3922         }
3923
3924         if (host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT &&
3925             !host->ops->get_max_timeout_count)
3926                 mmc->max_busy_timeout = 0;
3927
3928         mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23;
3929         mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
3930
3931         if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
3932                 host->flags |= SDHCI_AUTO_CMD12;
3933
3934         /*
3935          * For v3 mode, Auto-CMD23 stuff only works in ADMA or PIO.
3936          * For v4 mode, SDMA may use Auto-CMD23 as well.
3937          */
3938         if ((host->version >= SDHCI_SPEC_300) &&
3939             ((host->flags & SDHCI_USE_ADMA) ||
3940              !(host->flags & SDHCI_USE_SDMA) || host->v4_mode) &&
3941              !(host->quirks2 & SDHCI_QUIRK2_ACMD23_BROKEN)) {
3942                 host->flags |= SDHCI_AUTO_CMD23;
3943                 DBG("Auto-CMD23 available\n");
3944         } else {
3945                 DBG("Auto-CMD23 unavailable\n");
3946         }
3947
3948         /*
3949          * A controller may support 8-bit width, but the board itself
3950          * might not have the pins brought out.  Boards that support
3951          * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in
3952          * their platform code before calling sdhci_add_host(), and we
3953          * won't assume 8-bit width for hosts without that CAP.
3954          */
3955         if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA))
3956                 mmc->caps |= MMC_CAP_4_BIT_DATA;
3957
3958         if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23)
3959                 mmc->caps &= ~MMC_CAP_CMD23;
3960
3961         if (host->caps & SDHCI_CAN_DO_HISPD)
3962                 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
3963
3964         if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
3965             mmc_card_is_removable(mmc) &&
3966             mmc_gpio_get_cd(host->mmc) < 0)
3967                 mmc->caps |= MMC_CAP_NEEDS_POLL;
3968
3969         if (!IS_ERR(mmc->supply.vqmmc)) {
3970                 ret = regulator_enable(mmc->supply.vqmmc);
3971
3972                 /* If vqmmc provides no 1.8V signalling, then there's no UHS */
3973                 if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 1700000,
3974                                                     1950000))
3975                         host->caps1 &= ~(SDHCI_SUPPORT_SDR104 |
3976                                          SDHCI_SUPPORT_SDR50 |
3977                                          SDHCI_SUPPORT_DDR50);
3978
3979                 /* In eMMC case vqmmc might be a fixed 1.8V regulator */
3980                 if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 2700000,
3981                                                     3600000))
3982                         host->flags &= ~SDHCI_SIGNALING_330;
3983
3984                 if (ret) {
3985                         pr_warn("%s: Failed to enable vqmmc regulator: %d\n",
3986                                 mmc_hostname(mmc), ret);
3987                         mmc->supply.vqmmc = ERR_PTR(-EINVAL);
3988                 }
3989         }
3990
3991         if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V) {
3992                 host->caps1 &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
3993                                  SDHCI_SUPPORT_DDR50);
3994                 /*
3995                  * The SDHCI controller in a SoC might support HS200/HS400
3996                  * (indicated using mmc-hs200-1_8v/mmc-hs400-1_8v dt property),
3997                  * but if the board is modeled such that the IO lines are not
3998                  * connected to 1.8v then HS200/HS400 cannot be supported.
3999                  * Disable HS200/HS400 if the board does not have 1.8v connected
4000                  * to the IO lines. (Applicable for other modes in 1.8v)
4001                  */
4002                 mmc->caps2 &= ~(MMC_CAP2_HSX00_1_8V | MMC_CAP2_HS400_ES);
4003                 mmc->caps &= ~(MMC_CAP_1_8V_DDR | MMC_CAP_UHS);
4004         }
4005
4006         /* Any UHS-I mode in caps implies SDR12 and SDR25 support. */
4007         if (host->caps1 & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
4008                            SDHCI_SUPPORT_DDR50))
4009                 mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25;
4010
4011         /* SDR104 supports also implies SDR50 support */
4012         if (host->caps1 & SDHCI_SUPPORT_SDR104) {
4013                 mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
4014                 /* SD3.0: SDR104 is supported so (for eMMC) the caps2
4015                  * field can be promoted to support HS200.
4016                  */
4017                 if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200))
4018                         mmc->caps2 |= MMC_CAP2_HS200;
4019         } else if (host->caps1 & SDHCI_SUPPORT_SDR50) {
4020                 mmc->caps |= MMC_CAP_UHS_SDR50;
4021         }
4022
4023         if (host->quirks2 & SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 &&
4024             (host->caps1 & SDHCI_SUPPORT_HS400))
4025                 mmc->caps2 |= MMC_CAP2_HS400;
4026
4027         if ((mmc->caps2 & MMC_CAP2_HSX00_1_2V) &&
4028             (IS_ERR(mmc->supply.vqmmc) ||
4029              !regulator_is_supported_voltage(mmc->supply.vqmmc, 1100000,
4030                                              1300000)))
4031                 mmc->caps2 &= ~MMC_CAP2_HSX00_1_2V;
4032
4033         if ((host->caps1 & SDHCI_SUPPORT_DDR50) &&
4034             !(host->quirks2 & SDHCI_QUIRK2_BROKEN_DDR50))
4035                 mmc->caps |= MMC_CAP_UHS_DDR50;
4036
4037         /* Does the host need tuning for SDR50? */
4038         if (host->caps1 & SDHCI_USE_SDR50_TUNING)
4039                 host->flags |= SDHCI_SDR50_NEEDS_TUNING;
4040
4041         /* Driver Type(s) (A, C, D) supported by the host */
4042         if (host->caps1 & SDHCI_DRIVER_TYPE_A)
4043                 mmc->caps |= MMC_CAP_DRIVER_TYPE_A;
4044         if (host->caps1 & SDHCI_DRIVER_TYPE_C)
4045                 mmc->caps |= MMC_CAP_DRIVER_TYPE_C;
4046         if (host->caps1 & SDHCI_DRIVER_TYPE_D)
4047                 mmc->caps |= MMC_CAP_DRIVER_TYPE_D;
4048
4049         /* Initial value for re-tuning timer count */
4050         host->tuning_count = (host->caps1 & SDHCI_RETUNING_TIMER_COUNT_MASK) >>
4051                              SDHCI_RETUNING_TIMER_COUNT_SHIFT;
4052
4053         /*
4054          * In case Re-tuning Timer is not disabled, the actual value of
4055          * re-tuning timer will be 2 ^ (n - 1).
4056          */
4057         if (host->tuning_count)
4058                 host->tuning_count = 1 << (host->tuning_count - 1);
4059
4060         /* Re-tuning mode supported by the Host Controller */
4061         host->tuning_mode = (host->caps1 & SDHCI_RETUNING_MODE_MASK) >>
4062                              SDHCI_RETUNING_MODE_SHIFT;
4063
4064         ocr_avail = 0;
4065
4066         /*
4067          * According to SD Host Controller spec v3.00, if the Host System
4068          * can afford more than 150mA, Host Driver should set XPC to 1. Also
4069          * the value is meaningful only if Voltage Support in the Capabilities
4070          * register is set. The actual current value is 4 times the register
4071          * value.
4072          */
4073         max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT);
4074         if (!max_current_caps && !IS_ERR(mmc->supply.vmmc)) {
4075                 int curr = regulator_get_current_limit(mmc->supply.vmmc);
4076                 if (curr > 0) {
4077
4078                         /* convert to SDHCI_MAX_CURRENT format */
4079                         curr = curr/1000;  /* convert to mA */
4080                         curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER;
4081
4082                         curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT);
4083                         max_current_caps =
4084                                 (curr << SDHCI_MAX_CURRENT_330_SHIFT) |
4085                                 (curr << SDHCI_MAX_CURRENT_300_SHIFT) |
4086                                 (curr << SDHCI_MAX_CURRENT_180_SHIFT);
4087                 }
4088         }
4089
4090         if (host->caps & SDHCI_CAN_VDD_330) {
4091                 ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
4092
4093                 mmc->max_current_330 = ((max_current_caps &
4094                                    SDHCI_MAX_CURRENT_330_MASK) >>
4095                                    SDHCI_MAX_CURRENT_330_SHIFT) *
4096                                    SDHCI_MAX_CURRENT_MULTIPLIER;
4097         }
4098         if (host->caps & SDHCI_CAN_VDD_300) {
4099                 ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31;
4100
4101                 mmc->max_current_300 = ((max_current_caps &
4102                                    SDHCI_MAX_CURRENT_300_MASK) >>
4103                                    SDHCI_MAX_CURRENT_300_SHIFT) *
4104                                    SDHCI_MAX_CURRENT_MULTIPLIER;
4105         }
4106         if (host->caps & SDHCI_CAN_VDD_180) {
4107                 ocr_avail |= MMC_VDD_165_195;
4108
4109                 mmc->max_current_180 = ((max_current_caps &
4110                                    SDHCI_MAX_CURRENT_180_MASK) >>
4111                                    SDHCI_MAX_CURRENT_180_SHIFT) *
4112                                    SDHCI_MAX_CURRENT_MULTIPLIER;
4113         }
4114
4115         /* If OCR set by host, use it instead. */
4116         if (host->ocr_mask)
4117                 ocr_avail = host->ocr_mask;
4118
4119         /* If OCR set by external regulators, give it highest prio. */
4120         if (mmc->ocr_avail)
4121                 ocr_avail = mmc->ocr_avail;
4122
4123         mmc->ocr_avail = ocr_avail;
4124         mmc->ocr_avail_sdio = ocr_avail;
4125         if (host->ocr_avail_sdio)
4126                 mmc->ocr_avail_sdio &= host->ocr_avail_sdio;
4127         mmc->ocr_avail_sd = ocr_avail;
4128         if (host->ocr_avail_sd)
4129                 mmc->ocr_avail_sd &= host->ocr_avail_sd;
4130         else /* normal SD controllers don't support 1.8V */
4131                 mmc->ocr_avail_sd &= ~MMC_VDD_165_195;
4132         mmc->ocr_avail_mmc = ocr_avail;
4133         if (host->ocr_avail_mmc)
4134                 mmc->ocr_avail_mmc &= host->ocr_avail_mmc;
4135
4136         if (mmc->ocr_avail == 0) {
4137                 pr_err("%s: Hardware doesn't report any support voltages.\n",
4138                        mmc_hostname(mmc));
4139                 ret = -ENODEV;
4140                 goto unreg;
4141         }
4142
4143         if ((mmc->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
4144                           MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 |
4145                           MMC_CAP_UHS_DDR50 | MMC_CAP_1_8V_DDR)) ||
4146             (mmc->caps2 & (MMC_CAP2_HS200_1_8V_SDR | MMC_CAP2_HS400_1_8V)))
4147                 host->flags |= SDHCI_SIGNALING_180;
4148
4149         if (mmc->caps2 & MMC_CAP2_HSX00_1_2V)
4150                 host->flags |= SDHCI_SIGNALING_120;
4151
4152         spin_lock_init(&host->lock);
4153
4154         /*
4155          * Maximum number of sectors in one transfer. Limited by SDMA boundary
4156          * size (512KiB). Note some tuning modes impose a 4MiB limit, but this
4157          * is less anyway.
4158          */
4159         mmc->max_req_size = 524288;
4160
4161         /*
4162          * Maximum number of segments. Depends on if the hardware
4163          * can do scatter/gather or not.
4164          */
4165         if (host->flags & SDHCI_USE_ADMA) {
4166                 mmc->max_segs = SDHCI_MAX_SEGS;
4167         } else if (host->flags & SDHCI_USE_SDMA) {
4168                 mmc->max_segs = 1;
4169                 if (swiotlb_max_segment()) {
4170                         unsigned int max_req_size = (1 << IO_TLB_SHIFT) *
4171                                                 IO_TLB_SEGSIZE;
4172                         mmc->max_req_size = min(mmc->max_req_size,
4173                                                 max_req_size);
4174                 }
4175         } else { /* PIO */
4176                 mmc->max_segs = SDHCI_MAX_SEGS;
4177         }
4178
4179         /*
4180          * Maximum segment size. Could be one segment with the maximum number
4181          * of bytes. When doing hardware scatter/gather, each entry cannot
4182          * be larger than 64 KiB though.
4183          */
4184         if (host->flags & SDHCI_USE_ADMA) {
4185                 if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC)
4186                         mmc->max_seg_size = 65535;
4187                 else
4188                         mmc->max_seg_size = 65536;
4189         } else {
4190                 mmc->max_seg_size = mmc->max_req_size;
4191         }
4192
4193         /*
4194          * Maximum block size. This varies from controller to controller and
4195          * is specified in the capabilities register.
4196          */
4197         if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) {
4198                 mmc->max_blk_size = 2;
4199         } else {
4200                 mmc->max_blk_size = (host->caps & SDHCI_MAX_BLOCK_MASK) >>
4201                                 SDHCI_MAX_BLOCK_SHIFT;
4202                 if (mmc->max_blk_size >= 3) {
4203                         pr_warn("%s: Invalid maximum block size, assuming 512 bytes\n",
4204                                 mmc_hostname(mmc));
4205                         mmc->max_blk_size = 0;
4206                 }
4207         }
4208
4209         mmc->max_blk_size = 512 << mmc->max_blk_size;
4210
4211         /*
4212          * Maximum block count.
4213          */
4214         mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
4215
4216         if (mmc->max_segs == 1)
4217                 /* This may alter mmc->*_blk_* parameters */
4218                 sdhci_allocate_bounce_buffer(host);
4219
4220         return 0;
4221
4222 unreg:
4223         if (!IS_ERR(mmc->supply.vqmmc))
4224                 regulator_disable(mmc->supply.vqmmc);
4225 undma:
4226         if (host->align_buffer)
4227                 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
4228                                   host->adma_table_sz, host->align_buffer,
4229                                   host->align_addr);
4230         host->adma_table = NULL;
4231         host->align_buffer = NULL;
4232
4233         return ret;
4234 }
4235 EXPORT_SYMBOL_GPL(sdhci_setup_host);
4236
4237 void sdhci_cleanup_host(struct sdhci_host *host)
4238 {
4239         struct mmc_host *mmc = host->mmc;
4240
4241         if (!IS_ERR(mmc->supply.vqmmc))
4242                 regulator_disable(mmc->supply.vqmmc);
4243
4244         if (host->align_buffer)
4245                 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
4246                                   host->adma_table_sz, host->align_buffer,
4247                                   host->align_addr);
4248         host->adma_table = NULL;
4249         host->align_buffer = NULL;
4250 }
4251 EXPORT_SYMBOL_GPL(sdhci_cleanup_host);
4252
4253 int __sdhci_add_host(struct sdhci_host *host)
4254 {
4255         unsigned int flags = WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI;
4256         struct mmc_host *mmc = host->mmc;
4257         int ret;
4258
4259         host->complete_wq = alloc_workqueue("sdhci", flags, 0);
4260         if (!host->complete_wq)
4261                 return -ENOMEM;
4262
4263         INIT_WORK(&host->complete_work, sdhci_complete_work);
4264
4265         timer_setup(&host->timer, sdhci_timeout_timer, 0);
4266         timer_setup(&host->data_timer, sdhci_timeout_data_timer, 0);
4267
4268         init_waitqueue_head(&host->buf_ready_int);
4269
4270         sdhci_init(host, 0);
4271
4272         ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq,
4273                                    IRQF_SHARED, mmc_hostname(mmc), host);
4274         if (ret) {
4275                 pr_err("%s: Failed to request IRQ %d: %d\n",
4276                        mmc_hostname(mmc), host->irq, ret);
4277                 goto unwq;
4278         }
4279
4280         ret = sdhci_led_register(host);
4281         if (ret) {
4282                 pr_err("%s: Failed to register LED device: %d\n",
4283                        mmc_hostname(mmc), ret);
4284                 goto unirq;
4285         }
4286
4287         ret = mmc_add_host(mmc);
4288         if (ret)
4289                 goto unled;
4290
4291         pr_info("%s: SDHCI controller on %s [%s] using %s\n",
4292                 mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
4293                 (host->flags & SDHCI_USE_ADMA) ?
4294                 (host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" :
4295                 (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
4296
4297         sdhci_enable_card_detection(host);
4298
4299         return 0;
4300
4301 unled:
4302         sdhci_led_unregister(host);
4303 unirq:
4304         sdhci_do_reset(host, SDHCI_RESET_ALL);
4305         sdhci_writel(host, 0, SDHCI_INT_ENABLE);
4306         sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
4307         free_irq(host->irq, host);
4308 unwq:
4309         destroy_workqueue(host->complete_wq);
4310
4311         return ret;
4312 }
4313 EXPORT_SYMBOL_GPL(__sdhci_add_host);
4314
4315 int sdhci_add_host(struct sdhci_host *host)
4316 {
4317         int ret;
4318
4319         ret = sdhci_setup_host(host);
4320         if (ret)
4321                 return ret;
4322
4323         ret = __sdhci_add_host(host);
4324         if (ret)
4325                 goto cleanup;
4326
4327         return 0;
4328
4329 cleanup:
4330         sdhci_cleanup_host(host);
4331
4332         return ret;
4333 }
4334 EXPORT_SYMBOL_GPL(sdhci_add_host);
4335
4336 void sdhci_remove_host(struct sdhci_host *host, int dead)
4337 {
4338         struct mmc_host *mmc = host->mmc;
4339         unsigned long flags;
4340
4341         if (dead) {
4342                 spin_lock_irqsave(&host->lock, flags);
4343
4344                 host->flags |= SDHCI_DEVICE_DEAD;
4345
4346                 if (sdhci_has_requests(host)) {
4347                         pr_err("%s: Controller removed during "
4348                                 " transfer!\n", mmc_hostname(mmc));
4349                         sdhci_error_out_mrqs(host, -ENOMEDIUM);
4350                 }
4351
4352                 spin_unlock_irqrestore(&host->lock, flags);
4353         }
4354
4355         sdhci_disable_card_detection(host);
4356
4357         mmc_remove_host(mmc);
4358
4359         sdhci_led_unregister(host);
4360
4361         if (!dead)
4362                 sdhci_do_reset(host, SDHCI_RESET_ALL);
4363
4364         sdhci_writel(host, 0, SDHCI_INT_ENABLE);
4365         sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
4366         free_irq(host->irq, host);
4367
4368         del_timer_sync(&host->timer);
4369         del_timer_sync(&host->data_timer);
4370
4371         destroy_workqueue(host->complete_wq);
4372
4373         if (!IS_ERR(mmc->supply.vqmmc))
4374                 regulator_disable(mmc->supply.vqmmc);
4375
4376         if (host->align_buffer)
4377                 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
4378                                   host->adma_table_sz, host->align_buffer,
4379                                   host->align_addr);
4380
4381         host->adma_table = NULL;
4382         host->align_buffer = NULL;
4383 }
4384
4385 EXPORT_SYMBOL_GPL(sdhci_remove_host);
4386
4387 void sdhci_free_host(struct sdhci_host *host)
4388 {
4389         mmc_free_host(host->mmc);
4390 }
4391
4392 EXPORT_SYMBOL_GPL(sdhci_free_host);
4393
4394 /*****************************************************************************\
4395  *                                                                           *
4396  * Driver init/exit                                                          *
4397  *                                                                           *
4398 \*****************************************************************************/
4399
4400 static int __init sdhci_drv_init(void)
4401 {
4402         pr_info(DRIVER_NAME
4403                 ": Secure Digital Host Controller Interface driver\n");
4404         pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
4405
4406         return 0;
4407 }
4408
4409 static void __exit sdhci_drv_exit(void)
4410 {
4411 }
4412
4413 module_init(sdhci_drv_init);
4414 module_exit(sdhci_drv_exit);
4415
4416 module_param(debug_quirks, uint, 0444);
4417 module_param(debug_quirks2, uint, 0444);
4418
4419 MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
4420 MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver");
4421 MODULE_LICENSE("GPL");
4422
4423 MODULE_PARM_DESC(debug_quirks, "Force certain quirks.");
4424 MODULE_PARM_DESC(debug_quirks2, "Force certain other quirks.");