2 * linux/drivers/mmc/core/mmc_ops.h
4 * Copyright 2006-2007 Pierre Ossman
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
12 #include <linux/slab.h>
13 #include <linux/export.h>
14 #include <linux/types.h>
15 #include <linux/scatterlist.h>
17 #include <linux/mmc/host.h>
18 #include <linux/mmc/card.h>
19 #include <linux/mmc/mmc.h>
26 #define MMC_OPS_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
28 static const u8 tuning_blk_pattern_4bit[] = {
29 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
30 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
31 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
32 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
33 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
34 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
35 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
36 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
39 static const u8 tuning_blk_pattern_8bit[] = {
40 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
41 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
42 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
43 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
44 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
45 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
46 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
47 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
48 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
49 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
50 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
51 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
52 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
53 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
54 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
55 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
58 int __mmc_send_status(struct mmc_card *card, u32 *status, unsigned int retries)
61 struct mmc_command cmd = {};
63 cmd.opcode = MMC_SEND_STATUS;
64 if (!mmc_host_is_spi(card->host))
65 cmd.arg = card->rca << 16;
66 cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
68 err = mmc_wait_for_cmd(card->host, &cmd, retries);
72 /* NOTE: callers are required to understand the difference
73 * between "native" and SPI format status words!
76 *status = cmd.resp[0];
80 EXPORT_SYMBOL_GPL(__mmc_send_status);
82 int mmc_send_status(struct mmc_card *card, u32 *status)
84 return __mmc_send_status(card, status, MMC_CMD_RETRIES);
87 static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
89 struct mmc_command cmd = {};
91 cmd.opcode = MMC_SELECT_CARD;
94 cmd.arg = card->rca << 16;
95 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
98 cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
101 return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
104 int mmc_select_card(struct mmc_card *card)
107 return _mmc_select_card(card->host, card);
110 int mmc_deselect_cards(struct mmc_host *host)
112 return _mmc_select_card(host, NULL);
116 * Write the value specified in the device tree or board code into the optional
117 * 16 bit Driver Stage Register. This can be used to tune raise/fall times and
118 * drive strength of the DAT and CMD outputs. The actual meaning of a given
119 * value is hardware dependant.
120 * The presence of the DSR register can be determined from the CSD register,
123 int mmc_set_dsr(struct mmc_host *host)
125 struct mmc_command cmd = {};
127 cmd.opcode = MMC_SET_DSR;
129 cmd.arg = (host->dsr << 16) | 0xffff;
130 cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
132 return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
135 int mmc_go_idle(struct mmc_host *host)
138 struct mmc_command cmd = {};
141 * Non-SPI hosts need to prevent chipselect going active during
142 * GO_IDLE; that would put chips into SPI mode. Remind them of
143 * that in case of hardware that won't pull up DAT3/nCS otherwise.
145 * SPI hosts ignore ios.chip_select; it's managed according to
146 * rules that must accommodate non-MMC slaves which this layer
147 * won't even know about.
149 if (!mmc_host_is_spi(host)) {
150 mmc_set_chip_select(host, MMC_CS_HIGH);
154 cmd.opcode = MMC_GO_IDLE_STATE;
156 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC;
158 err = mmc_wait_for_cmd(host, &cmd, 0);
162 if (!mmc_host_is_spi(host)) {
163 mmc_set_chip_select(host, MMC_CS_DONTCARE);
167 host->use_spi_crc = 0;
172 int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
174 struct mmc_command cmd = {};
177 cmd.opcode = MMC_SEND_OP_COND;
178 cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;
179 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
181 for (i = 100; i; i--) {
182 err = mmc_wait_for_cmd(host, &cmd, 0);
186 /* if we're just probing, do a single pass */
190 /* otherwise wait until reset completes */
191 if (mmc_host_is_spi(host)) {
192 if (!(cmd.resp[0] & R1_SPI_IDLE))
195 if (cmd.resp[0] & MMC_CARD_BUSY)
204 if (rocr && !mmc_host_is_spi(host))
210 int mmc_set_relative_addr(struct mmc_card *card)
212 struct mmc_command cmd = {};
214 cmd.opcode = MMC_SET_RELATIVE_ADDR;
215 cmd.arg = card->rca << 16;
216 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
218 return mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
222 mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode)
225 struct mmc_command cmd = {};
229 cmd.flags = MMC_RSP_R2 | MMC_CMD_AC;
231 err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
235 memcpy(cxd, cmd.resp, sizeof(u32) * 4);
241 * NOTE: void *buf, caller for the buf is required to use DMA-capable
242 * buffer or on-stack buffer (with some overhead in callee).
245 mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
246 u32 opcode, void *buf, unsigned len)
248 struct mmc_request mrq = {};
249 struct mmc_command cmd = {};
250 struct mmc_data data = {};
251 struct scatterlist sg;
259 /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we
260 * rely on callers to never use this with "native" calls for reading
261 * CSD or CID. Native versions of those commands use the R2 type,
262 * not R1 plus a data block.
264 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
268 data.flags = MMC_DATA_READ;
272 sg_init_one(&sg, buf, len);
274 if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) {
276 * The spec states that CSR and CID accesses have a timeout
277 * of 64 clock cycles.
280 data.timeout_clks = 64;
282 mmc_set_data_timeout(&data, card);
284 mmc_wait_for_req(host, &mrq);
294 static int mmc_spi_send_csd(struct mmc_card *card, u32 *csd)
299 csd_tmp = kzalloc(16, GFP_KERNEL);
303 ret = mmc_send_cxd_data(card, card->host, MMC_SEND_CSD, csd_tmp, 16);
307 for (i = 0; i < 4; i++)
308 csd[i] = be32_to_cpu(csd_tmp[i]);
315 int mmc_send_csd(struct mmc_card *card, u32 *csd)
317 if (mmc_host_is_spi(card->host))
318 return mmc_spi_send_csd(card, csd);
320 return mmc_send_cxd_native(card->host, card->rca << 16, csd,
324 static int mmc_spi_send_cid(struct mmc_host *host, u32 *cid)
329 cid_tmp = kzalloc(16, GFP_KERNEL);
333 ret = mmc_send_cxd_data(NULL, host, MMC_SEND_CID, cid_tmp, 16);
337 for (i = 0; i < 4; i++)
338 cid[i] = be32_to_cpu(cid_tmp[i]);
345 int mmc_send_cid(struct mmc_host *host, u32 *cid)
347 if (mmc_host_is_spi(host))
348 return mmc_spi_send_cid(host, cid);
350 return mmc_send_cxd_native(host, 0, cid, MMC_ALL_SEND_CID);
353 int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
358 if (!card || !new_ext_csd)
361 if (!mmc_can_ext_csd(card))
365 * As the ext_csd is so large and mostly unused, we don't store the
366 * raw block in mmc_card.
368 ext_csd = kzalloc(512, GFP_KERNEL);
372 err = mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD, ext_csd,
377 *new_ext_csd = ext_csd;
381 EXPORT_SYMBOL_GPL(mmc_get_ext_csd);
383 int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
385 struct mmc_command cmd = {};
388 cmd.opcode = MMC_SPI_READ_OCR;
389 cmd.arg = highcap ? (1 << 30) : 0;
390 cmd.flags = MMC_RSP_SPI_R3;
392 err = mmc_wait_for_cmd(host, &cmd, 0);
398 int mmc_spi_set_crc(struct mmc_host *host, int use_crc)
400 struct mmc_command cmd = {};
403 cmd.opcode = MMC_SPI_CRC_ON_OFF;
404 cmd.flags = MMC_RSP_SPI_R1;
407 err = mmc_wait_for_cmd(host, &cmd, 0);
409 host->use_spi_crc = use_crc;
413 static int mmc_switch_status_error(struct mmc_host *host, u32 status)
415 if (mmc_host_is_spi(host)) {
416 if (status & R1_SPI_ILLEGAL_COMMAND)
419 if (status & 0xFDFFA000)
420 pr_warn("%s: unexpected status %#x after switch\n",
421 mmc_hostname(host), status);
422 if (status & R1_SWITCH_ERROR)
428 /* Caller must hold re-tuning */
429 int __mmc_switch_status(struct mmc_card *card, bool crc_err_fatal)
434 err = mmc_send_status(card, &status);
435 if (!crc_err_fatal && err == -EILSEQ)
440 return mmc_switch_status_error(card->host, status);
443 int mmc_switch_status(struct mmc_card *card)
445 return __mmc_switch_status(card, true);
448 static int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
449 bool send_status, bool retry_crc_err)
451 struct mmc_host *host = card->host;
453 unsigned long timeout;
455 bool expired = false;
458 /* We have an unspecified cmd timeout, use the fallback value. */
460 timeout_ms = MMC_OPS_TIMEOUT_MS;
463 * In cases when not allowed to poll by using CMD13 or because we aren't
464 * capable of polling by using ->card_busy(), then rely on waiting the
465 * stated timeout to be sufficient.
467 if (!send_status && !host->ops->card_busy) {
468 mmc_delay(timeout_ms);
472 timeout = jiffies + msecs_to_jiffies(timeout_ms) + 1;
475 * Due to the possibility of being preempted while polling,
476 * check the expiration time first.
478 expired = time_after(jiffies, timeout);
480 if (host->ops->card_busy) {
481 busy = host->ops->card_busy(host);
483 err = mmc_send_status(card, &status);
484 if (retry_crc_err && err == -EILSEQ) {
489 err = mmc_switch_status_error(host, status);
492 busy = R1_CURRENT_STATE(status) == R1_STATE_PRG;
496 /* Timeout if the device still remains busy. */
497 if (expired && busy) {
498 pr_err("%s: Card stuck being busy! %s\n",
499 mmc_hostname(host), __func__);
508 * __mmc_switch - modify EXT_CSD register
509 * @card: the MMC card associated with the data transfer
510 * @set: cmd set values
511 * @index: EXT_CSD register index
512 * @value: value to program into EXT_CSD register
513 * @timeout_ms: timeout (ms) for operation performed by register write,
514 * timeout of zero implies maximum possible timeout
515 * @timing: new timing to change to
516 * @use_busy_signal: use the busy signal as response type
517 * @send_status: send status cmd to poll for busy
518 * @retry_crc_err: retry when CRC errors when polling with CMD13 for busy
520 * Modifies the EXT_CSD register for selected card.
522 int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
523 unsigned int timeout_ms, unsigned char timing,
524 bool use_busy_signal, bool send_status, bool retry_crc_err)
526 struct mmc_host *host = card->host;
528 struct mmc_command cmd = {};
529 bool use_r1b_resp = use_busy_signal;
530 unsigned char old_timing = host->ios.timing;
532 mmc_retune_hold(host);
535 * If the cmd timeout and the max_busy_timeout of the host are both
536 * specified, let's validate them. A failure means we need to prevent
537 * the host from doing hw busy detection, which is done by converting
538 * to a R1 response instead of a R1B.
540 if (timeout_ms && host->max_busy_timeout &&
541 (timeout_ms > host->max_busy_timeout))
542 use_r1b_resp = false;
544 cmd.opcode = MMC_SWITCH;
545 cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
549 cmd.flags = MMC_CMD_AC;
551 cmd.flags |= MMC_RSP_SPI_R1B | MMC_RSP_R1B;
553 * A busy_timeout of zero means the host can decide to use
554 * whatever value it finds suitable.
556 cmd.busy_timeout = timeout_ms;
558 cmd.flags |= MMC_RSP_SPI_R1 | MMC_RSP_R1;
561 if (index == EXT_CSD_SANITIZE_START)
562 cmd.sanitize_busy = true;
564 err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
568 /* No need to check card status in case of unblocking command */
569 if (!use_busy_signal)
572 /*If SPI or used HW busy detection above, then we don't need to poll. */
573 if (((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp) ||
574 mmc_host_is_spi(host))
577 /* Let's try to poll to find out when the command is completed. */
578 err = mmc_poll_for_busy(card, timeout_ms, send_status, retry_crc_err);
583 /* Switch to new timing before check switch status. */
585 mmc_set_timing(host, timing);
588 err = mmc_switch_status(card);
590 mmc_set_timing(host, old_timing);
593 mmc_retune_release(host);
598 int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
599 unsigned int timeout_ms)
601 return __mmc_switch(card, set, index, value, timeout_ms, 0,
604 EXPORT_SYMBOL_GPL(mmc_switch);
606 int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error)
608 struct mmc_request mrq = {};
609 struct mmc_command cmd = {};
610 struct mmc_data data = {};
611 struct scatterlist sg;
612 struct mmc_ios *ios = &host->ios;
613 const u8 *tuning_block_pattern;
617 if (ios->bus_width == MMC_BUS_WIDTH_8) {
618 tuning_block_pattern = tuning_blk_pattern_8bit;
619 size = sizeof(tuning_blk_pattern_8bit);
620 } else if (ios->bus_width == MMC_BUS_WIDTH_4) {
621 tuning_block_pattern = tuning_blk_pattern_4bit;
622 size = sizeof(tuning_blk_pattern_4bit);
626 data_buf = kzalloc(size, GFP_KERNEL);
634 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
638 data.flags = MMC_DATA_READ;
641 * According to the tuning specs, Tuning process
642 * is normally shorter 40 executions of CMD19,
643 * and timeout value should be shorter than 150 ms
645 data.timeout_ns = 150 * NSEC_PER_MSEC;
649 sg_init_one(&sg, data_buf, size);
651 mmc_wait_for_req(host, &mrq);
654 *cmd_error = cmd.error;
666 if (memcmp(data_buf, tuning_block_pattern, size))
673 EXPORT_SYMBOL_GPL(mmc_send_tuning);
675 int mmc_abort_tuning(struct mmc_host *host, u32 opcode)
677 struct mmc_command cmd = {};
680 * eMMC specification specifies that CMD12 can be used to stop a tuning
681 * command, but SD specification does not, so do nothing unless it is
684 if (opcode != MMC_SEND_TUNING_BLOCK_HS200)
687 cmd.opcode = MMC_STOP_TRANSMISSION;
688 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
691 * For drivers that override R1 to R1b, set an arbitrary timeout based
692 * on the tuning timeout i.e. 150ms.
694 cmd.busy_timeout = 150;
696 return mmc_wait_for_cmd(host, &cmd, 0);
698 EXPORT_SYMBOL_GPL(mmc_abort_tuning);
701 mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
704 struct mmc_request mrq = {};
705 struct mmc_command cmd = {};
706 struct mmc_data data = {};
707 struct scatterlist sg;
711 static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 };
712 static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 };
714 /* dma onto stack is unsafe/nonportable, but callers to this
715 * routine normally provide temporary on-stack buffers ...
717 data_buf = kmalloc(len, GFP_KERNEL);
722 test_buf = testdata_8bit;
724 test_buf = testdata_4bit;
726 pr_err("%s: Invalid bus_width %d\n",
727 mmc_hostname(host), len);
732 if (opcode == MMC_BUS_TEST_W)
733 memcpy(data_buf, test_buf, len);
740 /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we
741 * rely on callers to never use this with "native" calls for reading
742 * CSD or CID. Native versions of those commands use the R2 type,
743 * not R1 plus a data block.
745 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
749 if (opcode == MMC_BUS_TEST_R)
750 data.flags = MMC_DATA_READ;
752 data.flags = MMC_DATA_WRITE;
756 mmc_set_data_timeout(&data, card);
757 sg_init_one(&sg, data_buf, len);
758 mmc_wait_for_req(host, &mrq);
760 if (opcode == MMC_BUS_TEST_R) {
761 for (i = 0; i < len / 4; i++)
762 if ((test_buf[i] ^ data_buf[i]) != 0xff) {
777 int mmc_bus_test(struct mmc_card *card, u8 bus_width)
781 if (bus_width == MMC_BUS_WIDTH_8)
783 else if (bus_width == MMC_BUS_WIDTH_4)
785 else if (bus_width == MMC_BUS_WIDTH_1)
786 return 0; /* no need for test */
791 * Ignore errors from BUS_TEST_W. BUS_TEST_R will fail if there
792 * is a problem. This improves chances that the test will work.
794 mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width);
795 return mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
798 static int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status)
800 struct mmc_command cmd = {};
804 if (!card->ext_csd.hpi) {
805 pr_warn("%s: Card didn't support HPI command\n",
806 mmc_hostname(card->host));
810 opcode = card->ext_csd.hpi_cmd;
811 if (opcode == MMC_STOP_TRANSMISSION)
812 cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
813 else if (opcode == MMC_SEND_STATUS)
814 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
817 cmd.arg = card->rca << 16 | 1;
819 err = mmc_wait_for_cmd(card->host, &cmd, 0);
821 pr_warn("%s: error %d interrupting operation. "
822 "HPI command response %#x\n", mmc_hostname(card->host),
827 *status = cmd.resp[0];
833 * mmc_interrupt_hpi - Issue for High priority Interrupt
834 * @card: the MMC card associated with the HPI transfer
836 * Issued High Priority Interrupt, and check for card status
837 * until out-of prg-state.
839 int mmc_interrupt_hpi(struct mmc_card *card)
843 unsigned long prg_wait;
845 if (!card->ext_csd.hpi_en) {
846 pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
850 mmc_claim_host(card->host);
851 err = mmc_send_status(card, &status);
853 pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
857 switch (R1_CURRENT_STATE(status)) {
863 * In idle and transfer states, HPI is not needed and the caller
864 * can issue the next intended command immediately
870 /* In all other states, it's illegal to issue HPI */
871 pr_debug("%s: HPI cannot be sent. Card state=%d\n",
872 mmc_hostname(card->host), R1_CURRENT_STATE(status));
877 err = mmc_send_hpi_cmd(card, &status);
881 prg_wait = jiffies + msecs_to_jiffies(card->ext_csd.out_of_int_time);
883 err = mmc_send_status(card, &status);
885 if (!err && R1_CURRENT_STATE(status) == R1_STATE_TRAN)
887 if (time_after(jiffies, prg_wait))
892 mmc_release_host(card->host);
896 int mmc_can_ext_csd(struct mmc_card *card)
898 return (card && card->csd.mmca_vsn > CSD_SPEC_VER_3);
902 * mmc_stop_bkops - stop ongoing BKOPS
903 * @card: MMC card to check BKOPS
905 * Send HPI command to stop ongoing background operations to
906 * allow rapid servicing of foreground operations, e.g. read/
907 * writes. Wait until the card comes out of the programming state
908 * to avoid errors in servicing read/write requests.
910 int mmc_stop_bkops(struct mmc_card *card)
914 err = mmc_interrupt_hpi(card);
917 * If err is EINVAL, we can't issue an HPI.
918 * It should complete the BKOPS.
920 if (!err || (err == -EINVAL)) {
921 mmc_card_clr_doing_bkops(card);
922 mmc_retune_release(card->host);
929 static int mmc_read_bkops_status(struct mmc_card *card)
934 mmc_claim_host(card->host);
935 err = mmc_get_ext_csd(card, &ext_csd);
936 mmc_release_host(card->host);
940 card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS];
941 card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS];
947 * mmc_start_bkops - start BKOPS for supported cards
948 * @card: MMC card to start BKOPS
949 * @form_exception: A flag to indicate if this function was
950 * called due to an exception raised by the card
952 * Start background operations whenever requested.
953 * When the urgent BKOPS bit is set in a R1 command response
954 * then background operations should be started immediately.
956 void mmc_start_bkops(struct mmc_card *card, bool from_exception)
960 bool use_busy_signal;
962 if (!card->ext_csd.man_bkops_en || mmc_card_doing_bkops(card))
965 err = mmc_read_bkops_status(card);
967 pr_err("%s: Failed to read bkops status: %d\n",
968 mmc_hostname(card->host), err);
972 if (!card->ext_csd.raw_bkops_status)
975 if (card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2 &&
979 mmc_claim_host(card->host);
980 if (card->ext_csd.raw_bkops_status >= EXT_CSD_BKOPS_LEVEL_2) {
981 timeout = MMC_OPS_TIMEOUT_MS;
982 use_busy_signal = true;
985 use_busy_signal = false;
988 mmc_retune_hold(card->host);
990 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
991 EXT_CSD_BKOPS_START, 1, timeout, 0,
992 use_busy_signal, true, false);
994 pr_warn("%s: Error %d starting bkops\n",
995 mmc_hostname(card->host), err);
996 mmc_retune_release(card->host);
1001 * For urgent bkops status (LEVEL_2 and more)
1002 * bkops executed synchronously, otherwise
1003 * the operation is in progress
1005 if (!use_busy_signal)
1006 mmc_card_set_doing_bkops(card);
1008 mmc_retune_release(card->host);
1010 mmc_release_host(card->host);
1014 * Flush the cache to the non-volatile storage.
1016 int mmc_flush_cache(struct mmc_card *card)
1020 if (mmc_card_mmc(card) &&
1021 (card->ext_csd.cache_size > 0) &&
1022 (card->ext_csd.cache_ctrl & 1)) {
1023 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1024 EXT_CSD_FLUSH_CACHE, 1, 0);
1026 pr_err("%s: cache flush error %d\n",
1027 mmc_hostname(card->host), err);
1032 EXPORT_SYMBOL(mmc_flush_cache);
1034 static int mmc_cmdq_switch(struct mmc_card *card, bool enable)
1036 u8 val = enable ? EXT_CSD_CMDQ_MODE_ENABLED : 0;
1039 if (!card->ext_csd.cmdq_support)
1042 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_CMDQ_MODE_EN,
1043 val, card->ext_csd.generic_cmd6_time);
1045 card->ext_csd.cmdq_en = enable;
1050 int mmc_cmdq_enable(struct mmc_card *card)
1052 return mmc_cmdq_switch(card, true);
1054 EXPORT_SYMBOL_GPL(mmc_cmdq_enable);
1056 int mmc_cmdq_disable(struct mmc_card *card)
1058 return mmc_cmdq_switch(card, false);
1060 EXPORT_SYMBOL_GPL(mmc_cmdq_disable);