2 libata-core.c - helper library for ATA
4 Copyright 2003-2004 Red Hat, Inc. All rights reserved.
5 Copyright 2003-2004 Jeff Garzik
7 The contents of this file are subject to the Open
8 Software License version 1.1 that can be found at
9 http://www.opensource.org/licenses/osl-1.1.txt and is included herein
12 Alternatively, the contents of this file may be used under the terms
13 of the GNU General Public License version 2 (the "GPL") as distributed
14 in the kernel source COPYING file, in which case the provisions of
15 the GPL are applicable instead of the above. If you wish to allow
16 the use of your version of this file only under the terms of the
17 GPL and not to allow others to use your version of this file under
18 the OSL, indicate your decision by deleting the provisions above and
19 replace them with the notice and other provisions required by the GPL.
20 If you do not delete the provisions above, a recipient may use your
21 version of this file under either the OSL or the GPL.
25 #include <linux/config.h>
26 #include <linux/kernel.h>
27 #include <linux/module.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/list.h>
32 #include <linux/highmem.h>
33 #include <linux/spinlock.h>
34 #include <linux/blkdev.h>
35 #include <linux/delay.h>
36 #include <linux/timer.h>
37 #include <linux/interrupt.h>
38 #include <linux/completion.h>
39 #include <linux/suspend.h>
40 #include <linux/workqueue.h>
41 #include <scsi/scsi.h>
43 #include "scsi_priv.h"
44 #include <scsi/scsi_host.h>
45 #include <linux/libata.h>
47 #include <asm/semaphore.h>
48 #include <asm/byteorder.h>
52 static unsigned int ata_busy_sleep (struct ata_port *ap,
53 unsigned long tmout_pat,
55 static void ata_set_mode(struct ata_port *ap);
56 static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev);
57 static unsigned int ata_get_mode_mask(struct ata_port *ap, int shift);
58 static int fgb(u32 bitmap);
59 static int ata_choose_xfer_mode(struct ata_port *ap,
61 unsigned int *xfer_shift_out);
62 static int ata_qc_complete_noop(struct ata_queued_cmd *qc, u8 drv_stat);
63 static void __ata_qc_complete(struct ata_queued_cmd *qc);
65 static unsigned int ata_unique_id = 1;
66 static struct workqueue_struct *ata_wq;
68 MODULE_AUTHOR("Jeff Garzik");
69 MODULE_DESCRIPTION("Library module for ATA devices");
70 MODULE_LICENSE("GPL");
71 MODULE_VERSION(DRV_VERSION);
74 * ata_tf_load - send taskfile registers to host controller
75 * @ap: Port to which output is sent
76 * @tf: ATA taskfile register set
78 * Outputs ATA taskfile to standard ATA host controller.
81 * Inherited from caller.
84 static void ata_tf_load_pio(struct ata_port *ap, struct ata_taskfile *tf)
86 struct ata_ioports *ioaddr = &ap->ioaddr;
87 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
89 if (tf->ctl != ap->last_ctl) {
90 outb(tf->ctl, ioaddr->ctl_addr);
91 ap->last_ctl = tf->ctl;
95 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
96 outb(tf->hob_feature, ioaddr->feature_addr);
97 outb(tf->hob_nsect, ioaddr->nsect_addr);
98 outb(tf->hob_lbal, ioaddr->lbal_addr);
99 outb(tf->hob_lbam, ioaddr->lbam_addr);
100 outb(tf->hob_lbah, ioaddr->lbah_addr);
101 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
110 outb(tf->feature, ioaddr->feature_addr);
111 outb(tf->nsect, ioaddr->nsect_addr);
112 outb(tf->lbal, ioaddr->lbal_addr);
113 outb(tf->lbam, ioaddr->lbam_addr);
114 outb(tf->lbah, ioaddr->lbah_addr);
115 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
123 if (tf->flags & ATA_TFLAG_DEVICE) {
124 outb(tf->device, ioaddr->device_addr);
125 VPRINTK("device 0x%X\n", tf->device);
132 * ata_tf_load_mmio - send taskfile registers to host controller
133 * @ap: Port to which output is sent
134 * @tf: ATA taskfile register set
136 * Outputs ATA taskfile to standard ATA host controller using MMIO.
139 * Inherited from caller.
142 static void ata_tf_load_mmio(struct ata_port *ap, struct ata_taskfile *tf)
144 struct ata_ioports *ioaddr = &ap->ioaddr;
145 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
147 if (tf->ctl != ap->last_ctl) {
148 writeb(tf->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
149 ap->last_ctl = tf->ctl;
153 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
154 writeb(tf->hob_feature, (void __iomem *) ioaddr->feature_addr);
155 writeb(tf->hob_nsect, (void __iomem *) ioaddr->nsect_addr);
156 writeb(tf->hob_lbal, (void __iomem *) ioaddr->lbal_addr);
157 writeb(tf->hob_lbam, (void __iomem *) ioaddr->lbam_addr);
158 writeb(tf->hob_lbah, (void __iomem *) ioaddr->lbah_addr);
159 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
168 writeb(tf->feature, (void __iomem *) ioaddr->feature_addr);
169 writeb(tf->nsect, (void __iomem *) ioaddr->nsect_addr);
170 writeb(tf->lbal, (void __iomem *) ioaddr->lbal_addr);
171 writeb(tf->lbam, (void __iomem *) ioaddr->lbam_addr);
172 writeb(tf->lbah, (void __iomem *) ioaddr->lbah_addr);
173 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
181 if (tf->flags & ATA_TFLAG_DEVICE) {
182 writeb(tf->device, (void __iomem *) ioaddr->device_addr);
183 VPRINTK("device 0x%X\n", tf->device);
189 void ata_tf_load(struct ata_port *ap, struct ata_taskfile *tf)
191 if (ap->flags & ATA_FLAG_MMIO)
192 ata_tf_load_mmio(ap, tf);
194 ata_tf_load_pio(ap, tf);
198 * ata_exec_command - issue ATA command to host controller
199 * @ap: port to which command is being issued
200 * @tf: ATA taskfile register set
202 * Issues PIO/MMIO write to ATA command register, with proper
203 * synchronization with interrupt handler / other threads.
206 * spin_lock_irqsave(host_set lock)
209 static void ata_exec_command_pio(struct ata_port *ap, struct ata_taskfile *tf)
211 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
213 outb(tf->command, ap->ioaddr.command_addr);
219 * ata_exec_command_mmio - issue ATA command to host controller
220 * @ap: port to which command is being issued
221 * @tf: ATA taskfile register set
223 * Issues MMIO write to ATA command register, with proper
224 * synchronization with interrupt handler / other threads.
227 * spin_lock_irqsave(host_set lock)
230 static void ata_exec_command_mmio(struct ata_port *ap, struct ata_taskfile *tf)
232 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
234 writeb(tf->command, (void __iomem *) ap->ioaddr.command_addr);
238 void ata_exec_command(struct ata_port *ap, struct ata_taskfile *tf)
240 if (ap->flags & ATA_FLAG_MMIO)
241 ata_exec_command_mmio(ap, tf);
243 ata_exec_command_pio(ap, tf);
247 * ata_exec - issue ATA command to host controller
248 * @ap: port to which command is being issued
249 * @tf: ATA taskfile register set
251 * Issues PIO/MMIO write to ATA command register, with proper
252 * synchronization with interrupt handler / other threads.
255 * Obtains host_set lock.
258 static inline void ata_exec(struct ata_port *ap, struct ata_taskfile *tf)
262 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
263 spin_lock_irqsave(&ap->host_set->lock, flags);
264 ap->ops->exec_command(ap, tf);
265 spin_unlock_irqrestore(&ap->host_set->lock, flags);
269 * ata_tf_to_host - issue ATA taskfile to host controller
270 * @ap: port to which command is being issued
271 * @tf: ATA taskfile register set
273 * Issues ATA taskfile register set to ATA host controller,
274 * with proper synchronization with interrupt handler and
278 * Obtains host_set lock.
281 static void ata_tf_to_host(struct ata_port *ap, struct ata_taskfile *tf)
283 ap->ops->tf_load(ap, tf);
289 * ata_tf_to_host_nolock - issue ATA taskfile to host controller
290 * @ap: port to which command is being issued
291 * @tf: ATA taskfile register set
293 * Issues ATA taskfile register set to ATA host controller,
294 * with proper synchronization with interrupt handler and
298 * spin_lock_irqsave(host_set lock)
301 void ata_tf_to_host_nolock(struct ata_port *ap, struct ata_taskfile *tf)
303 ap->ops->tf_load(ap, tf);
304 ap->ops->exec_command(ap, tf);
308 * ata_tf_read - input device's ATA taskfile shadow registers
309 * @ap: Port from which input is read
310 * @tf: ATA taskfile register set for storing input
312 * Reads ATA taskfile registers for currently-selected device
316 * Inherited from caller.
319 static void ata_tf_read_pio(struct ata_port *ap, struct ata_taskfile *tf)
321 struct ata_ioports *ioaddr = &ap->ioaddr;
323 tf->nsect = inb(ioaddr->nsect_addr);
324 tf->lbal = inb(ioaddr->lbal_addr);
325 tf->lbam = inb(ioaddr->lbam_addr);
326 tf->lbah = inb(ioaddr->lbah_addr);
327 tf->device = inb(ioaddr->device_addr);
329 if (tf->flags & ATA_TFLAG_LBA48) {
330 outb(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
331 tf->hob_feature = inb(ioaddr->error_addr);
332 tf->hob_nsect = inb(ioaddr->nsect_addr);
333 tf->hob_lbal = inb(ioaddr->lbal_addr);
334 tf->hob_lbam = inb(ioaddr->lbam_addr);
335 tf->hob_lbah = inb(ioaddr->lbah_addr);
340 * ata_tf_read_mmio - input device's ATA taskfile shadow registers
341 * @ap: Port from which input is read
342 * @tf: ATA taskfile register set for storing input
344 * Reads ATA taskfile registers for currently-selected device
348 * Inherited from caller.
351 static void ata_tf_read_mmio(struct ata_port *ap, struct ata_taskfile *tf)
353 struct ata_ioports *ioaddr = &ap->ioaddr;
355 tf->nsect = readb((void __iomem *)ioaddr->nsect_addr);
356 tf->lbal = readb((void __iomem *)ioaddr->lbal_addr);
357 tf->lbam = readb((void __iomem *)ioaddr->lbam_addr);
358 tf->lbah = readb((void __iomem *)ioaddr->lbah_addr);
359 tf->device = readb((void __iomem *)ioaddr->device_addr);
361 if (tf->flags & ATA_TFLAG_LBA48) {
362 writeb(tf->ctl | ATA_HOB, (void __iomem *) ap->ioaddr.ctl_addr);
363 tf->hob_feature = readb((void __iomem *)ioaddr->error_addr);
364 tf->hob_nsect = readb((void __iomem *)ioaddr->nsect_addr);
365 tf->hob_lbal = readb((void __iomem *)ioaddr->lbal_addr);
366 tf->hob_lbam = readb((void __iomem *)ioaddr->lbam_addr);
367 tf->hob_lbah = readb((void __iomem *)ioaddr->lbah_addr);
371 void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
373 if (ap->flags & ATA_FLAG_MMIO)
374 ata_tf_read_mmio(ap, tf);
376 ata_tf_read_pio(ap, tf);
380 * ata_check_status_pio - Read device status reg & clear interrupt
381 * @ap: port where the device is
383 * Reads ATA taskfile status register for currently-selected device
384 * and return it's value. This also clears pending interrupts
388 * Inherited from caller.
390 static u8 ata_check_status_pio(struct ata_port *ap)
392 return inb(ap->ioaddr.status_addr);
396 * ata_check_status_mmio - Read device status reg & clear interrupt
397 * @ap: port where the device is
399 * Reads ATA taskfile status register for currently-selected device
400 * via MMIO and return it's value. This also clears pending interrupts
404 * Inherited from caller.
406 static u8 ata_check_status_mmio(struct ata_port *ap)
408 return readb((void __iomem *) ap->ioaddr.status_addr);
411 u8 ata_check_status(struct ata_port *ap)
413 if (ap->flags & ATA_FLAG_MMIO)
414 return ata_check_status_mmio(ap);
415 return ata_check_status_pio(ap);
418 u8 ata_altstatus(struct ata_port *ap)
420 if (ap->ops->check_altstatus)
421 return ap->ops->check_altstatus(ap);
423 if (ap->flags & ATA_FLAG_MMIO)
424 return readb((void __iomem *)ap->ioaddr.altstatus_addr);
425 return inb(ap->ioaddr.altstatus_addr);
428 u8 ata_chk_err(struct ata_port *ap)
430 if (ap->ops->check_err)
431 return ap->ops->check_err(ap);
433 if (ap->flags & ATA_FLAG_MMIO) {
434 return readb((void __iomem *) ap->ioaddr.error_addr);
436 return inb(ap->ioaddr.error_addr);
440 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
441 * @tf: Taskfile to convert
442 * @fis: Buffer into which data will output
443 * @pmp: Port multiplier port
445 * Converts a standard ATA taskfile to a Serial ATA
446 * FIS structure (Register - Host to Device).
449 * Inherited from caller.
452 void ata_tf_to_fis(struct ata_taskfile *tf, u8 *fis, u8 pmp)
454 fis[0] = 0x27; /* Register - Host to Device FIS */
455 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
456 bit 7 indicates Command FIS */
457 fis[2] = tf->command;
458 fis[3] = tf->feature;
465 fis[8] = tf->hob_lbal;
466 fis[9] = tf->hob_lbam;
467 fis[10] = tf->hob_lbah;
468 fis[11] = tf->hob_feature;
471 fis[13] = tf->hob_nsect;
482 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
483 * @fis: Buffer from which data will be input
484 * @tf: Taskfile to output
486 * Converts a standard ATA taskfile to a Serial ATA
487 * FIS structure (Register - Host to Device).
490 * Inherited from caller.
493 void ata_tf_from_fis(u8 *fis, struct ata_taskfile *tf)
495 tf->command = fis[2]; /* status */
496 tf->feature = fis[3]; /* error */
503 tf->hob_lbal = fis[8];
504 tf->hob_lbam = fis[9];
505 tf->hob_lbah = fis[10];
508 tf->hob_nsect = fis[13];
512 * ata_prot_to_cmd - determine which read/write opcodes to use
513 * @protocol: ATA_PROT_xxx taskfile protocol
514 * @lba48: true is lba48 is present
516 * Given necessary input, determine which read/write commands
517 * to use to transfer data.
522 static int ata_prot_to_cmd(int protocol, int lba48)
524 int rcmd = 0, wcmd = 0;
529 rcmd = ATA_CMD_PIO_READ_EXT;
530 wcmd = ATA_CMD_PIO_WRITE_EXT;
532 rcmd = ATA_CMD_PIO_READ;
533 wcmd = ATA_CMD_PIO_WRITE;
539 rcmd = ATA_CMD_READ_EXT;
540 wcmd = ATA_CMD_WRITE_EXT;
543 wcmd = ATA_CMD_WRITE;
551 return rcmd | (wcmd << 8);
555 * ata_dev_set_protocol - set taskfile protocol and r/w commands
556 * @dev: device to examine and configure
558 * Examine the device configuration, after we have
559 * read the identify-device page and configured the
560 * data transfer mode. Set internal state related to
561 * the ATA taskfile protocol (pio, pio mult, dma, etc.)
562 * and calculate the proper read/write commands to use.
567 static void ata_dev_set_protocol(struct ata_device *dev)
569 int pio = (dev->flags & ATA_DFLAG_PIO);
570 int lba48 = (dev->flags & ATA_DFLAG_LBA48);
574 proto = dev->xfer_protocol = ATA_PROT_PIO;
576 proto = dev->xfer_protocol = ATA_PROT_DMA;
578 cmd = ata_prot_to_cmd(proto, lba48);
582 dev->read_cmd = cmd & 0xff;
583 dev->write_cmd = (cmd >> 8) & 0xff;
586 static const char * xfer_mode_str[] = {
606 * ata_udma_string - convert UDMA bit offset to string
607 * @mask: mask of bits supported; only highest bit counts.
609 * Determine string which represents the highest speed
610 * (highest bit in @udma_mask).
616 * Constant C string representing highest speed listed in
617 * @udma_mask, or the constant C string "<n/a>".
620 static const char *ata_mode_string(unsigned int mask)
624 for (i = 7; i >= 0; i--)
627 for (i = ATA_SHIFT_MWDMA + 2; i >= ATA_SHIFT_MWDMA; i--)
630 for (i = ATA_SHIFT_PIO + 4; i >= ATA_SHIFT_PIO; i--)
637 return xfer_mode_str[i];
641 * ata_pio_devchk - PATA device presence detection
642 * @ap: ATA channel to examine
643 * @device: Device to examine (starting at zero)
645 * This technique was originally described in
646 * Hale Landis's ATADRVR (www.ata-atapi.com), and
647 * later found its way into the ATA/ATAPI spec.
649 * Write a pattern to the ATA shadow registers,
650 * and if a device is present, it will respond by
651 * correctly storing and echoing back the
652 * ATA shadow register contents.
658 static unsigned int ata_pio_devchk(struct ata_port *ap,
661 struct ata_ioports *ioaddr = &ap->ioaddr;
664 ap->ops->dev_select(ap, device);
666 outb(0x55, ioaddr->nsect_addr);
667 outb(0xaa, ioaddr->lbal_addr);
669 outb(0xaa, ioaddr->nsect_addr);
670 outb(0x55, ioaddr->lbal_addr);
672 outb(0x55, ioaddr->nsect_addr);
673 outb(0xaa, ioaddr->lbal_addr);
675 nsect = inb(ioaddr->nsect_addr);
676 lbal = inb(ioaddr->lbal_addr);
678 if ((nsect == 0x55) && (lbal == 0xaa))
679 return 1; /* we found a device */
681 return 0; /* nothing found */
685 * ata_mmio_devchk - PATA device presence detection
686 * @ap: ATA channel to examine
687 * @device: Device to examine (starting at zero)
689 * This technique was originally described in
690 * Hale Landis's ATADRVR (www.ata-atapi.com), and
691 * later found its way into the ATA/ATAPI spec.
693 * Write a pattern to the ATA shadow registers,
694 * and if a device is present, it will respond by
695 * correctly storing and echoing back the
696 * ATA shadow register contents.
702 static unsigned int ata_mmio_devchk(struct ata_port *ap,
705 struct ata_ioports *ioaddr = &ap->ioaddr;
708 ap->ops->dev_select(ap, device);
710 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
711 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
713 writeb(0xaa, (void __iomem *) ioaddr->nsect_addr);
714 writeb(0x55, (void __iomem *) ioaddr->lbal_addr);
716 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
717 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
719 nsect = readb((void __iomem *) ioaddr->nsect_addr);
720 lbal = readb((void __iomem *) ioaddr->lbal_addr);
722 if ((nsect == 0x55) && (lbal == 0xaa))
723 return 1; /* we found a device */
725 return 0; /* nothing found */
729 * ata_devchk - PATA device presence detection
730 * @ap: ATA channel to examine
731 * @device: Device to examine (starting at zero)
733 * Dispatch ATA device presence detection, depending
734 * on whether we are using PIO or MMIO to talk to the
735 * ATA shadow registers.
741 static unsigned int ata_devchk(struct ata_port *ap,
744 if (ap->flags & ATA_FLAG_MMIO)
745 return ata_mmio_devchk(ap, device);
746 return ata_pio_devchk(ap, device);
750 * ata_dev_classify - determine device type based on ATA-spec signature
751 * @tf: ATA taskfile register set for device to be identified
753 * Determine from taskfile register contents whether a device is
754 * ATA or ATAPI, as per "Signature and persistence" section
755 * of ATA/PI spec (volume 1, sect 5.14).
761 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
762 * the event of failure.
765 unsigned int ata_dev_classify(struct ata_taskfile *tf)
767 /* Apple's open source Darwin code hints that some devices only
768 * put a proper signature into the LBA mid/high registers,
769 * So, we only check those. It's sufficient for uniqueness.
772 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
773 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
774 DPRINTK("found ATA device by sig\n");
778 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
779 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
780 DPRINTK("found ATAPI device by sig\n");
781 return ATA_DEV_ATAPI;
784 DPRINTK("unknown device\n");
785 return ATA_DEV_UNKNOWN;
789 * ata_dev_try_classify - Parse returned ATA device signature
790 * @ap: ATA channel to examine
791 * @device: Device to examine (starting at zero)
793 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
794 * an ATA/ATAPI-defined set of values is placed in the ATA
795 * shadow registers, indicating the results of device detection
798 * Select the ATA device, and read the values from the ATA shadow
799 * registers. Then parse according to the Error register value,
800 * and the spec-defined values examined by ata_dev_classify().
806 static u8 ata_dev_try_classify(struct ata_port *ap, unsigned int device)
808 struct ata_device *dev = &ap->device[device];
809 struct ata_taskfile tf;
813 ap->ops->dev_select(ap, device);
815 memset(&tf, 0, sizeof(tf));
817 err = ata_chk_err(ap);
818 ap->ops->tf_read(ap, &tf);
820 dev->class = ATA_DEV_NONE;
822 /* see if device passed diags */
825 else if ((device == 0) && (err == 0x81))
830 /* determine if device if ATA or ATAPI */
831 class = ata_dev_classify(&tf);
832 if (class == ATA_DEV_UNKNOWN)
834 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
843 * ata_dev_id_string - Convert IDENTIFY DEVICE page into string
844 * @id: IDENTIFY DEVICE results we will examine
845 * @s: string into which data is output
846 * @ofs: offset into identify device page
847 * @len: length of string to return. must be an even number.
849 * The strings in the IDENTIFY DEVICE page are broken up into
850 * 16-bit chunks. Run through the string, and output each
851 * 8-bit chunk linearly, regardless of platform.
857 void ata_dev_id_string(u16 *id, unsigned char *s,
858 unsigned int ofs, unsigned int len)
876 void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
881 * ata_std_dev_select - Select device 0/1 on ATA bus
882 * @ap: ATA channel to manipulate
883 * @device: ATA device (numbered from zero) to select
885 * Use the method defined in the ATA specification to
886 * make either device 0, or device 1, active on the
893 void ata_std_dev_select (struct ata_port *ap, unsigned int device)
898 tmp = ATA_DEVICE_OBS;
900 tmp = ATA_DEVICE_OBS | ATA_DEV1;
902 if (ap->flags & ATA_FLAG_MMIO) {
903 writeb(tmp, (void __iomem *) ap->ioaddr.device_addr);
905 outb(tmp, ap->ioaddr.device_addr);
907 ata_pause(ap); /* needed; also flushes, for mmio */
911 * ata_dev_select - Select device 0/1 on ATA bus
912 * @ap: ATA channel to manipulate
913 * @device: ATA device (numbered from zero) to select
914 * @wait: non-zero to wait for Status register BSY bit to clear
915 * @can_sleep: non-zero if context allows sleeping
917 * Use the method defined in the ATA specification to
918 * make either device 0, or device 1, active on the
921 * This is a high-level version of ata_std_dev_select(),
922 * which additionally provides the services of inserting
923 * the proper pauses and status polling, where needed.
929 void ata_dev_select(struct ata_port *ap, unsigned int device,
930 unsigned int wait, unsigned int can_sleep)
932 VPRINTK("ENTER, ata%u: device %u, wait %u\n",
933 ap->id, device, wait);
938 ap->ops->dev_select(ap, device);
941 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
948 * ata_dump_id - IDENTIFY DEVICE info debugging output
949 * @dev: Device whose IDENTIFY DEVICE page we will dump
951 * Dump selected 16-bit words from a detected device's
952 * IDENTIFY PAGE page.
958 static inline void ata_dump_id(struct ata_device *dev)
960 DPRINTK("49==0x%04x "
970 DPRINTK("80==0x%04x "
980 DPRINTK("88==0x%04x "
987 * ata_dev_identify - obtain IDENTIFY x DEVICE page
988 * @ap: port on which device we wish to probe resides
989 * @device: device bus address, starting at zero
991 * Following bus reset, we issue the IDENTIFY [PACKET] DEVICE
992 * command, and read back the 512-byte device information page.
993 * The device information page is fed to us via the standard
994 * PIO-IN protocol, but we hand-code it here. (TODO: investigate
995 * using standard PIO-IN paths)
997 * After reading the device information page, we use several
998 * bits of information from it to initialize data structures
999 * that will be used during the lifetime of the ata_device.
1000 * Other data from the info page is used to disqualify certain
1001 * older ATA devices we do not wish to support.
1004 * Inherited from caller. Some functions called by this function
1005 * obtain the host_set lock.
1008 static void ata_dev_identify(struct ata_port *ap, unsigned int device)
1010 struct ata_device *dev = &ap->device[device];
1013 unsigned long xfer_modes;
1015 unsigned int using_edd;
1016 DECLARE_COMPLETION(wait);
1017 struct ata_queued_cmd *qc;
1018 unsigned long flags;
1021 if (!ata_dev_present(dev)) {
1022 DPRINTK("ENTER/EXIT (host %u, dev %u) -- nodev\n",
1027 if (ap->flags & (ATA_FLAG_SRST | ATA_FLAG_SATA_RESET))
1032 DPRINTK("ENTER, host %u, dev %u\n", ap->id, device);
1034 assert (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ATAPI ||
1035 dev->class == ATA_DEV_NONE);
1037 ata_dev_select(ap, device, 1, 1); /* select device 0/1 */
1039 qc = ata_qc_new_init(ap, dev);
1042 ata_sg_init_one(qc, dev->id, sizeof(dev->id));
1043 qc->dma_dir = DMA_FROM_DEVICE;
1044 qc->tf.protocol = ATA_PROT_PIO;
1048 if (dev->class == ATA_DEV_ATA) {
1049 qc->tf.command = ATA_CMD_ID_ATA;
1050 DPRINTK("do ATA identify\n");
1052 qc->tf.command = ATA_CMD_ID_ATAPI;
1053 DPRINTK("do ATAPI identify\n");
1056 qc->waiting = &wait;
1057 qc->complete_fn = ata_qc_complete_noop;
1059 spin_lock_irqsave(&ap->host_set->lock, flags);
1060 rc = ata_qc_issue(qc);
1061 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1066 wait_for_completion(&wait);
1068 status = ata_chk_status(ap);
1069 if (status & ATA_ERR) {
1071 * arg! EDD works for all test cases, but seems to return
1072 * the ATA signature for some ATAPI devices. Until the
1073 * reason for this is found and fixed, we fix up the mess
1074 * here. If IDENTIFY DEVICE returns command aborted
1075 * (as ATAPI devices do), then we issue an
1076 * IDENTIFY PACKET DEVICE.
1078 * ATA software reset (SRST, the default) does not appear
1079 * to have this problem.
1081 if ((using_edd) && (qc->tf.command == ATA_CMD_ID_ATA)) {
1082 u8 err = ata_chk_err(ap);
1083 if (err & ATA_ABORTED) {
1084 dev->class = ATA_DEV_ATAPI;
1095 swap_buf_le16(dev->id, ATA_ID_WORDS);
1097 /* print device capabilities */
1098 printk(KERN_DEBUG "ata%u: dev %u cfg "
1099 "49:%04x 82:%04x 83:%04x 84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n",
1100 ap->id, device, dev->id[49],
1101 dev->id[82], dev->id[83], dev->id[84],
1102 dev->id[85], dev->id[86], dev->id[87],
1106 * common ATA, ATAPI feature tests
1109 /* we require LBA and DMA support (bits 8 & 9 of word 49) */
1110 if (!ata_id_has_dma(dev->id) || !ata_id_has_lba(dev->id)) {
1111 printk(KERN_DEBUG "ata%u: no dma/lba\n", ap->id);
1115 /* quick-n-dirty find max transfer mode; for printk only */
1116 xfer_modes = dev->id[ATA_ID_UDMA_MODES];
1118 xfer_modes = (dev->id[ATA_ID_MWDMA_MODES]) << ATA_SHIFT_MWDMA;
1120 xfer_modes = (dev->id[ATA_ID_PIO_MODES]) << (ATA_SHIFT_PIO + 3);
1121 xfer_modes |= (0x7 << ATA_SHIFT_PIO);
1126 /* ATA-specific feature tests */
1127 if (dev->class == ATA_DEV_ATA) {
1128 if (!ata_id_is_ata(dev->id)) /* sanity check */
1131 tmp = dev->id[ATA_ID_MAJOR_VER];
1132 for (i = 14; i >= 1; i--)
1136 /* we require at least ATA-3 */
1138 printk(KERN_DEBUG "ata%u: no ATA-3\n", ap->id);
1142 if (ata_id_has_lba48(dev->id)) {
1143 dev->flags |= ATA_DFLAG_LBA48;
1144 dev->n_sectors = ata_id_u64(dev->id, 100);
1146 dev->n_sectors = ata_id_u32(dev->id, 60);
1149 ap->host->max_cmd_len = 16;
1151 /* print device info to dmesg */
1152 printk(KERN_INFO "ata%u: dev %u ATA, max %s, %Lu sectors:%s\n",
1154 ata_mode_string(xfer_modes),
1155 (unsigned long long)dev->n_sectors,
1156 dev->flags & ATA_DFLAG_LBA48 ? " lba48" : "");
1159 /* ATAPI-specific feature tests */
1161 if (ata_id_is_ata(dev->id)) /* sanity check */
1164 rc = atapi_cdb_len(dev->id);
1165 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
1166 printk(KERN_WARNING "ata%u: unsupported CDB len\n", ap->id);
1169 ap->cdb_len = (unsigned int) rc;
1170 ap->host->max_cmd_len = (unsigned char) ap->cdb_len;
1172 /* print device info to dmesg */
1173 printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n",
1175 ata_mode_string(xfer_modes));
1178 DPRINTK("EXIT, drv_stat = 0x%x\n", ata_chk_status(ap));
1182 printk(KERN_WARNING "ata%u: dev %u not supported, ignoring\n",
1185 dev->class++; /* converts ATA_DEV_xxx into ATA_DEV_xxx_UNSUP */
1186 DPRINTK("EXIT, err\n");
1190 * ata_bus_probe - Reset and probe ATA bus
1196 * Zero on success, non-zero on error.
1199 static int ata_bus_probe(struct ata_port *ap)
1201 unsigned int i, found = 0;
1203 ap->ops->phy_reset(ap);
1204 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1207 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1208 ata_dev_identify(ap, i);
1209 if (ata_dev_present(&ap->device[i])) {
1211 if (ap->ops->dev_config)
1212 ap->ops->dev_config(ap, &ap->device[i]);
1216 if ((!found) || (ap->flags & ATA_FLAG_PORT_DISABLED))
1217 goto err_out_disable;
1220 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1221 goto err_out_disable;
1226 ap->ops->port_disable(ap);
1238 void ata_port_probe(struct ata_port *ap)
1240 ap->flags &= ~ATA_FLAG_PORT_DISABLED;
1244 * __sata_phy_reset - Wake/reset a low-level SATA PHY
1245 * @ap: SATA port associated with target SATA PHY.
1247 * This function issues commands to standard SATA Sxxx
1248 * PHY registers, to wake up the phy (and device), and
1249 * clear any reset condition.
1251 * LOCKING: None. Serialized during ata_bus_probe().
1254 void __sata_phy_reset(struct ata_port *ap)
1257 unsigned long timeout = jiffies + (HZ * 5);
1259 if (ap->flags & ATA_FLAG_SATA_RESET) {
1260 /* issue phy wake/reset */
1261 scr_write_flush(ap, SCR_CONTROL, 0x301);
1262 udelay(400); /* FIXME: a guess */
1264 scr_write_flush(ap, SCR_CONTROL, 0x300); /* phy wake/clear reset */
1266 /* wait for phy to become ready, if necessary */
1269 sstatus = scr_read(ap, SCR_STATUS);
1270 if ((sstatus & 0xf) != 1)
1272 } while (time_before(jiffies, timeout));
1274 /* TODO: phy layer with polling, timeouts, etc. */
1275 if (sata_dev_present(ap))
1278 sstatus = scr_read(ap, SCR_STATUS);
1279 printk(KERN_INFO "ata%u: no device found (phy stat %08x)\n",
1281 ata_port_disable(ap);
1284 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1287 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
1288 ata_port_disable(ap);
1292 ap->cbl = ATA_CBL_SATA;
1296 * sata_phy_reset - Reset SATA bus.
1297 * @ap: SATA port associated with target SATA PHY.
1299 * This function resets the SATA bus, and then probes
1300 * the bus for devices.
1302 * LOCKING: None. Serialized during ata_bus_probe().
1305 void sata_phy_reset(struct ata_port *ap)
1307 __sata_phy_reset(ap);
1308 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1314 * ata_port_disable - Disable port.
1315 * @ap: Port to be disabled.
1317 * Modify @ap data structure such that the system
1318 * thinks that the entire port is disabled, and should
1319 * never attempt to probe or communicate with devices
1322 * LOCKING: host_set lock, or some other form of
1326 void ata_port_disable(struct ata_port *ap)
1328 ap->device[0].class = ATA_DEV_NONE;
1329 ap->device[1].class = ATA_DEV_NONE;
1330 ap->flags |= ATA_FLAG_PORT_DISABLED;
1336 } xfer_mode_classes[] = {
1337 { ATA_SHIFT_UDMA, XFER_UDMA_0 },
1338 { ATA_SHIFT_MWDMA, XFER_MW_DMA_0 },
1339 { ATA_SHIFT_PIO, XFER_PIO_0 },
1342 static inline u8 base_from_shift(unsigned int shift)
1346 for (i = 0; i < ARRAY_SIZE(xfer_mode_classes); i++)
1347 if (xfer_mode_classes[i].shift == shift)
1348 return xfer_mode_classes[i].base;
1353 static void ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev)
1358 if (!ata_dev_present(dev) || (ap->flags & ATA_FLAG_PORT_DISABLED))
1361 if (dev->xfer_shift == ATA_SHIFT_PIO)
1362 dev->flags |= ATA_DFLAG_PIO;
1364 ata_dev_set_xfermode(ap, dev);
1366 base = base_from_shift(dev->xfer_shift);
1367 ofs = dev->xfer_mode - base;
1368 idx = ofs + dev->xfer_shift;
1369 WARN_ON(idx >= ARRAY_SIZE(xfer_mode_str));
1371 DPRINTK("idx=%d xfer_shift=%u, xfer_mode=0x%x, base=0x%x, offset=%d\n",
1372 idx, dev->xfer_shift, (int)dev->xfer_mode, (int)base, ofs);
1374 printk(KERN_INFO "ata%u: dev %u configured for %s\n",
1375 ap->id, dev->devno, xfer_mode_str[idx]);
1378 static int ata_host_set_pio(struct ata_port *ap)
1384 mask = ata_get_mode_mask(ap, ATA_SHIFT_PIO);
1387 printk(KERN_WARNING "ata%u: no PIO support\n", ap->id);
1391 base = base_from_shift(ATA_SHIFT_PIO);
1392 xfer_mode = base + x;
1394 DPRINTK("base 0x%x xfer_mode 0x%x mask 0x%x x %d\n",
1395 (int)base, (int)xfer_mode, mask, x);
1397 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1398 struct ata_device *dev = &ap->device[i];
1399 if (ata_dev_present(dev)) {
1400 dev->pio_mode = xfer_mode;
1401 dev->xfer_mode = xfer_mode;
1402 dev->xfer_shift = ATA_SHIFT_PIO;
1403 if (ap->ops->set_piomode)
1404 ap->ops->set_piomode(ap, dev);
1411 static void ata_host_set_dma(struct ata_port *ap, u8 xfer_mode,
1412 unsigned int xfer_shift)
1416 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1417 struct ata_device *dev = &ap->device[i];
1418 if (ata_dev_present(dev)) {
1419 dev->dma_mode = xfer_mode;
1420 dev->xfer_mode = xfer_mode;
1421 dev->xfer_shift = xfer_shift;
1422 if (ap->ops->set_dmamode)
1423 ap->ops->set_dmamode(ap, dev);
1429 * ata_set_mode - Program timings and issue SET FEATURES - XFER
1430 * @ap: port on which timings will be programmed
1432 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.).
1434 * LOCKING: None. Serialized during ata_bus_probe().
1437 static void ata_set_mode(struct ata_port *ap)
1439 unsigned int i, xfer_shift;
1443 /* step 1: always set host PIO timings */
1444 rc = ata_host_set_pio(ap);
1448 /* step 2: choose the best data xfer mode */
1449 xfer_mode = xfer_shift = 0;
1450 rc = ata_choose_xfer_mode(ap, &xfer_mode, &xfer_shift);
1454 /* step 3: if that xfer mode isn't PIO, set host DMA timings */
1455 if (xfer_shift != ATA_SHIFT_PIO)
1456 ata_host_set_dma(ap, xfer_mode, xfer_shift);
1458 /* step 4: update devices' xfer mode */
1459 ata_dev_set_mode(ap, &ap->device[0]);
1460 ata_dev_set_mode(ap, &ap->device[1]);
1462 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1465 if (ap->ops->post_set_mode)
1466 ap->ops->post_set_mode(ap);
1468 for (i = 0; i < 2; i++) {
1469 struct ata_device *dev = &ap->device[i];
1470 ata_dev_set_protocol(dev);
1476 ata_port_disable(ap);
1480 * ata_busy_sleep - sleep until BSY clears, or timeout
1481 * @ap: port containing status register to be polled
1482 * @tmout_pat: impatience timeout
1483 * @tmout: overall timeout
1485 * Sleep until ATA Status register bit BSY clears,
1486 * or a timeout occurs.
1492 static unsigned int ata_busy_sleep (struct ata_port *ap,
1493 unsigned long tmout_pat,
1494 unsigned long tmout)
1496 unsigned long timer_start, timeout;
1499 status = ata_busy_wait(ap, ATA_BUSY, 300);
1500 timer_start = jiffies;
1501 timeout = timer_start + tmout_pat;
1502 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
1504 status = ata_busy_wait(ap, ATA_BUSY, 3);
1507 if (status & ATA_BUSY)
1508 printk(KERN_WARNING "ata%u is slow to respond, "
1509 "please be patient\n", ap->id);
1511 timeout = timer_start + tmout;
1512 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
1514 status = ata_chk_status(ap);
1517 if (status & ATA_BUSY) {
1518 printk(KERN_ERR "ata%u failed to respond (%lu secs)\n",
1519 ap->id, tmout / HZ);
1526 static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
1528 struct ata_ioports *ioaddr = &ap->ioaddr;
1529 unsigned int dev0 = devmask & (1 << 0);
1530 unsigned int dev1 = devmask & (1 << 1);
1531 unsigned long timeout;
1533 /* if device 0 was found in ata_devchk, wait for its
1537 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1539 /* if device 1 was found in ata_devchk, wait for
1540 * register access, then wait for BSY to clear
1542 timeout = jiffies + ATA_TMOUT_BOOT;
1546 ap->ops->dev_select(ap, 1);
1547 if (ap->flags & ATA_FLAG_MMIO) {
1548 nsect = readb((void __iomem *) ioaddr->nsect_addr);
1549 lbal = readb((void __iomem *) ioaddr->lbal_addr);
1551 nsect = inb(ioaddr->nsect_addr);
1552 lbal = inb(ioaddr->lbal_addr);
1554 if ((nsect == 1) && (lbal == 1))
1556 if (time_after(jiffies, timeout)) {
1560 msleep(50); /* give drive a breather */
1563 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1565 /* is all this really necessary? */
1566 ap->ops->dev_select(ap, 0);
1568 ap->ops->dev_select(ap, 1);
1570 ap->ops->dev_select(ap, 0);
1577 * LOCKING: None. Serialized during ata_bus_probe().
1581 static unsigned int ata_bus_edd(struct ata_port *ap)
1583 struct ata_taskfile tf;
1585 /* set up execute-device-diag (bus reset) taskfile */
1586 /* also, take interrupts to a known state (disabled) */
1587 DPRINTK("execute-device-diag\n");
1588 ata_tf_init(ap, &tf, 0);
1590 tf.command = ATA_CMD_EDD;
1591 tf.protocol = ATA_PROT_NODATA;
1594 ata_tf_to_host(ap, &tf);
1596 /* spec says at least 2ms. but who knows with those
1597 * crazy ATAPI devices...
1601 return ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1604 static unsigned int ata_bus_softreset(struct ata_port *ap,
1605 unsigned int devmask)
1607 struct ata_ioports *ioaddr = &ap->ioaddr;
1609 DPRINTK("ata%u: bus reset via SRST\n", ap->id);
1611 /* software reset. causes dev0 to be selected */
1612 if (ap->flags & ATA_FLAG_MMIO) {
1613 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
1614 udelay(20); /* FIXME: flush */
1615 writeb(ap->ctl | ATA_SRST, (void __iomem *) ioaddr->ctl_addr);
1616 udelay(20); /* FIXME: flush */
1617 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
1619 outb(ap->ctl, ioaddr->ctl_addr);
1621 outb(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
1623 outb(ap->ctl, ioaddr->ctl_addr);
1626 /* spec mandates ">= 2ms" before checking status.
1627 * We wait 150ms, because that was the magic delay used for
1628 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
1629 * between when the ATA command register is written, and then
1630 * status is checked. Because waiting for "a while" before
1631 * checking status is fine, post SRST, we perform this magic
1632 * delay here as well.
1636 ata_bus_post_reset(ap, devmask);
1642 * ata_bus_reset - reset host port and associated ATA channel
1643 * @ap: port to reset
1645 * This is typically the first time we actually start issuing
1646 * commands to the ATA channel. We wait for BSY to clear, then
1647 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
1648 * result. Determine what devices, if any, are on the channel
1649 * by looking at the device 0/1 error register. Look at the signature
1650 * stored in each device's taskfile registers, to determine if
1651 * the device is ATA or ATAPI.
1654 * Inherited from caller. Some functions called by this function
1655 * obtain the host_set lock.
1658 * Sets ATA_FLAG_PORT_DISABLED if bus reset fails.
1661 void ata_bus_reset(struct ata_port *ap)
1663 struct ata_ioports *ioaddr = &ap->ioaddr;
1664 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
1666 unsigned int dev0, dev1 = 0, rc = 0, devmask = 0;
1668 DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no);
1670 /* determine if device 0/1 are present */
1671 if (ap->flags & ATA_FLAG_SATA_RESET)
1674 dev0 = ata_devchk(ap, 0);
1676 dev1 = ata_devchk(ap, 1);
1680 devmask |= (1 << 0);
1682 devmask |= (1 << 1);
1684 /* select device 0 again */
1685 ap->ops->dev_select(ap, 0);
1687 /* issue bus reset */
1688 if (ap->flags & ATA_FLAG_SRST)
1689 rc = ata_bus_softreset(ap, devmask);
1690 else if ((ap->flags & ATA_FLAG_SATA_RESET) == 0) {
1691 /* set up device control */
1692 if (ap->flags & ATA_FLAG_MMIO)
1693 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
1695 outb(ap->ctl, ioaddr->ctl_addr);
1696 rc = ata_bus_edd(ap);
1703 * determine by signature whether we have ATA or ATAPI devices
1705 err = ata_dev_try_classify(ap, 0);
1706 if ((slave_possible) && (err != 0x81))
1707 ata_dev_try_classify(ap, 1);
1709 /* re-enable interrupts */
1710 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
1713 /* is double-select really necessary? */
1714 if (ap->device[1].class != ATA_DEV_NONE)
1715 ap->ops->dev_select(ap, 1);
1716 if (ap->device[0].class != ATA_DEV_NONE)
1717 ap->ops->dev_select(ap, 0);
1719 /* if no devices were detected, disable this port */
1720 if ((ap->device[0].class == ATA_DEV_NONE) &&
1721 (ap->device[1].class == ATA_DEV_NONE))
1724 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
1725 /* set up device control for ATA_FLAG_SATA_RESET */
1726 if (ap->flags & ATA_FLAG_MMIO)
1727 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
1729 outb(ap->ctl, ioaddr->ctl_addr);
1736 printk(KERN_ERR "ata%u: disabling port\n", ap->id);
1737 ap->ops->port_disable(ap);
1742 static void ata_pr_blacklisted(struct ata_port *ap, struct ata_device *dev)
1744 printk(KERN_WARNING "ata%u: dev %u is on DMA blacklist, disabling DMA\n",
1745 ap->id, dev->devno);
1748 static const char * ata_dma_blacklist [] = {
1767 "Toshiba CD-ROM XM-6202B",
1769 "E-IDE CD-ROM CR-840",
1772 "SAMSUNG CD-ROM SC-148C",
1773 "SAMSUNG CD-ROM SC",
1775 "SAMSUNG CD-ROM SN-124",
1776 "ATAPI CD-ROM DRIVE 40X MAXIMUM",
1780 static int ata_dma_blacklisted(struct ata_port *ap, struct ata_device *dev)
1782 unsigned char model_num[40];
1787 ata_dev_id_string(dev->id, model_num, ATA_ID_PROD_OFS,
1790 len = strnlen(s, sizeof(model_num));
1792 /* ATAPI specifies that empty space is blank-filled; remove blanks */
1793 while ((len > 0) && (s[len - 1] == ' ')) {
1798 for (i = 0; i < ARRAY_SIZE(ata_dma_blacklist); i++)
1799 if (!strncmp(ata_dma_blacklist[i], s, len))
1805 static unsigned int ata_get_mode_mask(struct ata_port *ap, int shift)
1807 struct ata_device *master, *slave;
1810 master = &ap->device[0];
1811 slave = &ap->device[1];
1813 assert (ata_dev_present(master) || ata_dev_present(slave));
1815 if (shift == ATA_SHIFT_UDMA) {
1816 mask = ap->udma_mask;
1817 if (ata_dev_present(master)) {
1818 mask &= (master->id[ATA_ID_UDMA_MODES] & 0xff);
1819 if (ata_dma_blacklisted(ap, master)) {
1821 ata_pr_blacklisted(ap, master);
1824 if (ata_dev_present(slave)) {
1825 mask &= (slave->id[ATA_ID_UDMA_MODES] & 0xff);
1826 if (ata_dma_blacklisted(ap, slave)) {
1828 ata_pr_blacklisted(ap, slave);
1832 else if (shift == ATA_SHIFT_MWDMA) {
1833 mask = ap->mwdma_mask;
1834 if (ata_dev_present(master)) {
1835 mask &= (master->id[ATA_ID_MWDMA_MODES] & 0x07);
1836 if (ata_dma_blacklisted(ap, master)) {
1838 ata_pr_blacklisted(ap, master);
1841 if (ata_dev_present(slave)) {
1842 mask &= (slave->id[ATA_ID_MWDMA_MODES] & 0x07);
1843 if (ata_dma_blacklisted(ap, slave)) {
1845 ata_pr_blacklisted(ap, slave);
1849 else if (shift == ATA_SHIFT_PIO) {
1850 mask = ap->pio_mask;
1851 if (ata_dev_present(master)) {
1852 /* spec doesn't return explicit support for
1853 * PIO0-2, so we fake it
1855 u16 tmp_mode = master->id[ATA_ID_PIO_MODES] & 0x03;
1860 if (ata_dev_present(slave)) {
1861 /* spec doesn't return explicit support for
1862 * PIO0-2, so we fake it
1864 u16 tmp_mode = slave->id[ATA_ID_PIO_MODES] & 0x03;
1871 mask = 0xffffffff; /* shut up compiler warning */
1878 /* find greatest bit */
1879 static int fgb(u32 bitmap)
1884 for (i = 0; i < 32; i++)
1885 if (bitmap & (1 << i))
1892 * ata_choose_xfer_mode - attempt to find best transfer mode
1893 * @ap: Port for which an xfer mode will be selected
1894 * @xfer_mode_out: (output) SET FEATURES - XFER MODE code
1895 * @xfer_shift_out: (output) bit shift that selects this mode
1900 * Zero on success, negative on error.
1903 static int ata_choose_xfer_mode(struct ata_port *ap,
1905 unsigned int *xfer_shift_out)
1907 unsigned int mask, shift;
1910 for (i = 0; i < ARRAY_SIZE(xfer_mode_classes); i++) {
1911 shift = xfer_mode_classes[i].shift;
1912 mask = ata_get_mode_mask(ap, shift);
1916 *xfer_mode_out = xfer_mode_classes[i].base + x;
1917 *xfer_shift_out = shift;
1926 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
1927 * @ap: Port associated with device @dev
1928 * @dev: Device to which command will be sent
1930 * Issue SET FEATURES - XFER MODE command to device @dev
1933 * LOCKING: None. Serialized during ata_bus_probe().
1936 static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev)
1938 DECLARE_COMPLETION(wait);
1939 struct ata_queued_cmd *qc;
1941 unsigned long flags;
1943 /* set up set-features taskfile */
1944 DPRINTK("set features - xfer mode\n");
1946 qc = ata_qc_new_init(ap, dev);
1949 qc->tf.command = ATA_CMD_SET_FEATURES;
1950 qc->tf.feature = SETFEATURES_XFER;
1951 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1952 qc->tf.protocol = ATA_PROT_NODATA;
1953 qc->tf.nsect = dev->xfer_mode;
1955 qc->waiting = &wait;
1956 qc->complete_fn = ata_qc_complete_noop;
1958 spin_lock_irqsave(&ap->host_set->lock, flags);
1959 rc = ata_qc_issue(qc);
1960 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1963 ata_port_disable(ap);
1965 wait_for_completion(&wait);
1977 static void ata_sg_clean(struct ata_queued_cmd *qc)
1979 struct ata_port *ap = qc->ap;
1980 struct scatterlist *sg = qc->sg;
1981 int dir = qc->dma_dir;
1983 assert(qc->flags & ATA_QCFLAG_DMAMAP);
1986 if (qc->flags & ATA_QCFLAG_SINGLE)
1987 assert(qc->n_elem == 1);
1989 DPRINTK("unmapping %u sg elements\n", qc->n_elem);
1991 if (qc->flags & ATA_QCFLAG_SG)
1992 dma_unmap_sg(ap->host_set->dev, sg, qc->n_elem, dir);
1994 dma_unmap_single(ap->host_set->dev, sg_dma_address(&sg[0]),
1995 sg_dma_len(&sg[0]), dir);
1997 qc->flags &= ~ATA_QCFLAG_DMAMAP;
2002 * ata_fill_sg - Fill PCI IDE PRD table
2003 * @qc: Metadata associated with taskfile to be transferred
2005 * Fill PCI IDE PRD (scatter-gather) table with segments
2006 * associated with the current disk command.
2009 * spin_lock_irqsave(host_set lock)
2012 static void ata_fill_sg(struct ata_queued_cmd *qc)
2014 struct scatterlist *sg = qc->sg;
2015 struct ata_port *ap = qc->ap;
2016 unsigned int idx, nelem;
2019 assert(qc->n_elem > 0);
2022 for (nelem = qc->n_elem; nelem; nelem--,sg++) {
2026 /* determine if physical DMA addr spans 64K boundary.
2027 * Note h/w doesn't support 64-bit, so we unconditionally
2028 * truncate dma_addr_t to u32.
2030 addr = (u32) sg_dma_address(sg);
2031 sg_len = sg_dma_len(sg);
2034 offset = addr & 0xffff;
2036 if ((offset + sg_len) > 0x10000)
2037 len = 0x10000 - offset;
2039 ap->prd[idx].addr = cpu_to_le32(addr);
2040 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
2041 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
2050 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2053 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
2054 * @qc: Metadata associated with taskfile to check
2056 * Allow low-level driver to filter ATA PACKET commands, returning
2057 * a status indicating whether or not it is OK to use DMA for the
2058 * supplied PACKET command.
2061 * RETURNS: 0 when ATAPI DMA can be used
2064 int ata_check_atapi_dma(struct ata_queued_cmd *qc)
2066 struct ata_port *ap = qc->ap;
2067 int rc = 0; /* Assume ATAPI DMA is OK by default */
2069 if (ap->ops->check_atapi_dma)
2070 rc = ap->ops->check_atapi_dma(qc);
2075 * ata_qc_prep - Prepare taskfile for submission
2076 * @qc: Metadata associated with taskfile to be prepared
2078 * Prepare ATA taskfile for submission.
2081 * spin_lock_irqsave(host_set lock)
2083 void ata_qc_prep(struct ata_queued_cmd *qc)
2085 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2091 void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
2093 struct scatterlist *sg;
2095 qc->flags |= ATA_QCFLAG_SINGLE;
2097 memset(&qc->sgent, 0, sizeof(qc->sgent));
2098 qc->sg = &qc->sgent;
2103 sg->page = virt_to_page(buf);
2104 sg->offset = (unsigned long) buf & ~PAGE_MASK;
2105 sg->length = buflen;
2108 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
2109 unsigned int n_elem)
2111 qc->flags |= ATA_QCFLAG_SG;
2113 qc->n_elem = n_elem;
2117 * ata_sg_setup_one -
2121 * spin_lock_irqsave(host_set lock)
2127 static int ata_sg_setup_one(struct ata_queued_cmd *qc)
2129 struct ata_port *ap = qc->ap;
2130 int dir = qc->dma_dir;
2131 struct scatterlist *sg = qc->sg;
2132 dma_addr_t dma_address;
2134 dma_address = dma_map_single(ap->host_set->dev, qc->buf_virt,
2136 if (dma_mapping_error(dma_address))
2139 sg_dma_address(sg) = dma_address;
2140 sg_dma_len(sg) = sg->length;
2142 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
2143 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
2153 * spin_lock_irqsave(host_set lock)
2159 static int ata_sg_setup(struct ata_queued_cmd *qc)
2161 struct ata_port *ap = qc->ap;
2162 struct scatterlist *sg = qc->sg;
2165 VPRINTK("ENTER, ata%u\n", ap->id);
2166 assert(qc->flags & ATA_QCFLAG_SG);
2169 n_elem = dma_map_sg(ap->host_set->dev, sg, qc->n_elem, dir);
2173 DPRINTK("%d sg elements mapped\n", n_elem);
2175 qc->n_elem = n_elem;
2190 static unsigned long ata_pio_poll(struct ata_port *ap)
2193 unsigned int poll_state = PIO_ST_UNKNOWN;
2194 unsigned int reg_state = PIO_ST_UNKNOWN;
2195 const unsigned int tmout_state = PIO_ST_TMOUT;
2197 switch (ap->pio_task_state) {
2200 poll_state = PIO_ST_POLL;
2204 case PIO_ST_LAST_POLL:
2205 poll_state = PIO_ST_LAST_POLL;
2206 reg_state = PIO_ST_LAST;
2213 status = ata_chk_status(ap);
2214 if (status & ATA_BUSY) {
2215 if (time_after(jiffies, ap->pio_task_timeout)) {
2216 ap->pio_task_state = tmout_state;
2219 ap->pio_task_state = poll_state;
2220 return ATA_SHORT_PAUSE;
2223 ap->pio_task_state = reg_state;
2228 * ata_pio_complete -
2234 static void ata_pio_complete (struct ata_port *ap)
2236 struct ata_queued_cmd *qc;
2240 * This is purely hueristic. This is a fast path.
2241 * Sometimes when we enter, BSY will be cleared in
2242 * a chk-status or two. If not, the drive is probably seeking
2243 * or something. Snooze for a couple msecs, then
2244 * chk-status again. If still busy, fall back to
2245 * PIO_ST_POLL state.
2247 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10);
2248 if (drv_stat & (ATA_BUSY | ATA_DRQ)) {
2250 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10);
2251 if (drv_stat & (ATA_BUSY | ATA_DRQ)) {
2252 ap->pio_task_state = PIO_ST_LAST_POLL;
2253 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
2258 drv_stat = ata_wait_idle(ap);
2259 if (!ata_ok(drv_stat)) {
2260 ap->pio_task_state = PIO_ST_ERR;
2264 qc = ata_qc_from_tag(ap, ap->active_tag);
2267 ap->pio_task_state = PIO_ST_IDLE;
2271 ata_qc_complete(qc, drv_stat);
2274 void swap_buf_le16(u16 *buf, unsigned int buf_words)
2279 for (i = 0; i < buf_words; i++)
2280 buf[i] = le16_to_cpu(buf[i]);
2281 #endif /* __BIG_ENDIAN */
2284 static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf,
2285 unsigned int buflen, int write_data)
2288 unsigned int words = buflen >> 1;
2289 u16 *buf16 = (u16 *) buf;
2290 void __iomem *mmio = (void __iomem *)ap->ioaddr.data_addr;
2293 for (i = 0; i < words; i++)
2294 writew(le16_to_cpu(buf16[i]), mmio);
2296 for (i = 0; i < words; i++)
2297 buf16[i] = cpu_to_le16(readw(mmio));
2301 static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf,
2302 unsigned int buflen, int write_data)
2304 unsigned int dwords = buflen >> 1;
2307 outsw(ap->ioaddr.data_addr, buf, dwords);
2309 insw(ap->ioaddr.data_addr, buf, dwords);
2312 static void ata_data_xfer(struct ata_port *ap, unsigned char *buf,
2313 unsigned int buflen, int do_write)
2315 if (ap->flags & ATA_FLAG_MMIO)
2316 ata_mmio_data_xfer(ap, buf, buflen, do_write);
2318 ata_pio_data_xfer(ap, buf, buflen, do_write);
2321 static void ata_pio_sector(struct ata_queued_cmd *qc)
2323 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
2324 struct scatterlist *sg = qc->sg;
2325 struct ata_port *ap = qc->ap;
2327 unsigned int offset;
2330 if (qc->cursect == (qc->nsect - 1))
2331 ap->pio_task_state = PIO_ST_LAST;
2333 page = sg[qc->cursg].page;
2334 offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE;
2336 /* get the current page and offset */
2337 page = nth_page(page, (offset >> PAGE_SHIFT));
2338 offset %= PAGE_SIZE;
2340 buf = kmap(page) + offset;
2345 if ((qc->cursg_ofs * ATA_SECT_SIZE) == (&sg[qc->cursg])->length) {
2350 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
2352 /* do the actual data transfer */
2353 do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
2354 ata_data_xfer(ap, buf, ATA_SECT_SIZE, do_write);
2359 static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
2361 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
2362 struct scatterlist *sg = qc->sg;
2363 struct ata_port *ap = qc->ap;
2366 unsigned int offset, count;
2368 if (qc->curbytes == qc->nbytes - bytes)
2369 ap->pio_task_state = PIO_ST_LAST;
2372 sg = &qc->sg[qc->cursg];
2376 offset = sg->offset + qc->cursg_ofs;
2378 /* get the current page and offset */
2379 page = nth_page(page, (offset >> PAGE_SHIFT));
2380 offset %= PAGE_SIZE;
2382 count = min(sg->length - qc->cursg_ofs, bytes);
2384 /* don't cross page boundaries */
2385 count = min(count, (unsigned int)PAGE_SIZE - offset);
2387 buf = kmap(page) + offset;
2390 qc->curbytes += count;
2391 qc->cursg_ofs += count;
2393 if (qc->cursg_ofs == sg->length) {
2398 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
2400 /* do the actual data transfer */
2401 ata_data_xfer(ap, buf, count, do_write);
2406 if (qc->cursg_ofs < sg->length)
2412 static void atapi_pio_bytes(struct ata_queued_cmd *qc)
2414 struct ata_port *ap = qc->ap;
2415 struct ata_device *dev = qc->dev;
2416 unsigned int ireason, bc_lo, bc_hi, bytes;
2417 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
2419 ap->ops->tf_read(ap, &qc->tf);
2420 ireason = qc->tf.nsect;
2421 bc_lo = qc->tf.lbam;
2422 bc_hi = qc->tf.lbah;
2423 bytes = (bc_hi << 8) | bc_lo;
2425 /* shall be cleared to zero, indicating xfer of data */
2426 if (ireason & (1 << 0))
2429 /* make sure transfer direction matches expected */
2430 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
2431 if (do_write != i_write)
2434 __atapi_pio_bytes(qc, bytes);
2439 printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n",
2440 ap->id, dev->devno);
2441 ap->pio_task_state = PIO_ST_ERR;
2451 static void ata_pio_block(struct ata_port *ap)
2453 struct ata_queued_cmd *qc;
2457 * This is purely hueristic. This is a fast path.
2458 * Sometimes when we enter, BSY will be cleared in
2459 * a chk-status or two. If not, the drive is probably seeking
2460 * or something. Snooze for a couple msecs, then
2461 * chk-status again. If still busy, fall back to
2462 * PIO_ST_POLL state.
2464 status = ata_busy_wait(ap, ATA_BUSY, 5);
2465 if (status & ATA_BUSY) {
2467 status = ata_busy_wait(ap, ATA_BUSY, 10);
2468 if (status & ATA_BUSY) {
2469 ap->pio_task_state = PIO_ST_POLL;
2470 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
2475 qc = ata_qc_from_tag(ap, ap->active_tag);
2478 if (is_atapi_taskfile(&qc->tf)) {
2479 /* no more data to transfer or unsupported ATAPI command */
2480 if ((status & ATA_DRQ) == 0) {
2481 ap->pio_task_state = PIO_ST_IDLE;
2485 ata_qc_complete(qc, status);
2489 atapi_pio_bytes(qc);
2491 /* handle BSY=0, DRQ=0 as error */
2492 if ((status & ATA_DRQ) == 0) {
2493 ap->pio_task_state = PIO_ST_ERR;
2501 static void ata_pio_error(struct ata_port *ap)
2503 struct ata_queued_cmd *qc;
2506 qc = ata_qc_from_tag(ap, ap->active_tag);
2509 drv_stat = ata_chk_status(ap);
2510 printk(KERN_WARNING "ata%u: PIO error, drv_stat 0x%x\n",
2513 ap->pio_task_state = PIO_ST_IDLE;
2517 ata_qc_complete(qc, drv_stat | ATA_ERR);
2520 static void ata_pio_task(void *_data)
2522 struct ata_port *ap = _data;
2523 unsigned long timeout = 0;
2525 switch (ap->pio_task_state) {
2534 ata_pio_complete(ap);
2538 case PIO_ST_LAST_POLL:
2539 timeout = ata_pio_poll(ap);
2549 queue_delayed_work(ata_wq, &ap->pio_task,
2552 queue_work(ata_wq, &ap->pio_task);
2555 static void atapi_request_sense(struct ata_port *ap, struct ata_device *dev,
2556 struct scsi_cmnd *cmd)
2558 DECLARE_COMPLETION(wait);
2559 struct ata_queued_cmd *qc;
2560 unsigned long flags;
2563 DPRINTK("ATAPI request sense\n");
2565 qc = ata_qc_new_init(ap, dev);
2568 /* FIXME: is this needed? */
2569 memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
2571 ata_sg_init_one(qc, cmd->sense_buffer, sizeof(cmd->sense_buffer));
2572 qc->dma_dir = DMA_FROM_DEVICE;
2574 memset(&qc->cdb, 0, ap->cdb_len);
2575 qc->cdb[0] = REQUEST_SENSE;
2576 qc->cdb[4] = SCSI_SENSE_BUFFERSIZE;
2578 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2579 qc->tf.command = ATA_CMD_PACKET;
2581 qc->tf.protocol = ATA_PROT_ATAPI;
2582 qc->tf.lbam = (8 * 1024) & 0xff;
2583 qc->tf.lbah = (8 * 1024) >> 8;
2584 qc->nbytes = SCSI_SENSE_BUFFERSIZE;
2586 qc->waiting = &wait;
2587 qc->complete_fn = ata_qc_complete_noop;
2589 spin_lock_irqsave(&ap->host_set->lock, flags);
2590 rc = ata_qc_issue(qc);
2591 spin_unlock_irqrestore(&ap->host_set->lock, flags);
2594 ata_port_disable(ap);
2596 wait_for_completion(&wait);
2602 * ata_qc_timeout - Handle timeout of queued command
2603 * @qc: Command that timed out
2605 * Some part of the kernel (currently, only the SCSI layer)
2606 * has noticed that the active command on port @ap has not
2607 * completed after a specified length of time. Handle this
2608 * condition by disabling DMA (if necessary) and completing
2609 * transactions, with error if necessary.
2611 * This also handles the case of the "lost interrupt", where
2612 * for some reason (possibly hardware bug, possibly driver bug)
2613 * an interrupt was not delivered to the driver, even though the
2614 * transaction completed successfully.
2619 static void ata_qc_timeout(struct ata_queued_cmd *qc)
2621 struct ata_port *ap = qc->ap;
2622 struct ata_device *dev = qc->dev;
2623 u8 host_stat = 0, drv_stat;
2627 /* FIXME: doesn't this conflict with timeout handling? */
2628 if (qc->dev->class == ATA_DEV_ATAPI && qc->scsicmd) {
2629 struct scsi_cmnd *cmd = qc->scsicmd;
2631 if (!scsi_eh_eflags_chk(cmd, SCSI_EH_CANCEL_CMD)) {
2633 /* finish completing original command */
2634 __ata_qc_complete(qc);
2636 atapi_request_sense(ap, dev, cmd);
2638 cmd->result = (CHECK_CONDITION << 1) | (DID_OK << 16);
2639 scsi_finish_command(cmd);
2645 /* hack alert! We cannot use the supplied completion
2646 * function from inside the ->eh_strategy_handler() thread.
2647 * libata is the only user of ->eh_strategy_handler() in
2648 * any kernel, so the default scsi_done() assumes it is
2649 * not being called from the SCSI EH.
2651 qc->scsidone = scsi_finish_command;
2653 switch (qc->tf.protocol) {
2656 case ATA_PROT_ATAPI_DMA:
2657 host_stat = ap->ops->bmdma_status(ap);
2659 /* before we do anything else, clear DMA-Start bit */
2660 ap->ops->bmdma_stop(ap);
2666 drv_stat = ata_chk_status(ap);
2668 /* ack bmdma irq events */
2669 ap->ops->irq_clear(ap);
2671 printk(KERN_ERR "ata%u: command 0x%x timeout, stat 0x%x host_stat 0x%x\n",
2672 ap->id, qc->tf.command, drv_stat, host_stat);
2674 /* complete taskfile transaction */
2675 ata_qc_complete(qc, drv_stat);
2683 * ata_eng_timeout - Handle timeout of queued command
2684 * @ap: Port on which timed-out command is active
2686 * Some part of the kernel (currently, only the SCSI layer)
2687 * has noticed that the active command on port @ap has not
2688 * completed after a specified length of time. Handle this
2689 * condition by disabling DMA (if necessary) and completing
2690 * transactions, with error if necessary.
2692 * This also handles the case of the "lost interrupt", where
2693 * for some reason (possibly hardware bug, possibly driver bug)
2694 * an interrupt was not delivered to the driver, even though the
2695 * transaction completed successfully.
2698 * Inherited from SCSI layer (none, can sleep)
2701 void ata_eng_timeout(struct ata_port *ap)
2703 struct ata_queued_cmd *qc;
2707 qc = ata_qc_from_tag(ap, ap->active_tag);
2709 printk(KERN_ERR "ata%u: BUG: timeout without command\n",
2721 * ata_qc_new - Request an available ATA command, for queueing
2722 * @ap: Port associated with device @dev
2723 * @dev: Device from whom we request an available command structure
2728 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
2730 struct ata_queued_cmd *qc = NULL;
2733 for (i = 0; i < ATA_MAX_QUEUE; i++)
2734 if (!test_and_set_bit(i, &ap->qactive)) {
2735 qc = ata_qc_from_tag(ap, i);
2746 * ata_qc_new_init - Request an available ATA command, and initialize it
2747 * @ap: Port associated with device @dev
2748 * @dev: Device from whom we request an available command structure
2753 struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
2754 struct ata_device *dev)
2756 struct ata_queued_cmd *qc;
2758 qc = ata_qc_new(ap);
2765 qc->cursect = qc->cursg = qc->cursg_ofs = 0;
2767 qc->nbytes = qc->curbytes = 0;
2769 ata_tf_init(ap, &qc->tf, dev->devno);
2771 if (dev->flags & ATA_DFLAG_LBA48)
2772 qc->tf.flags |= ATA_TFLAG_LBA48;
2778 static int ata_qc_complete_noop(struct ata_queued_cmd *qc, u8 drv_stat)
2783 static void __ata_qc_complete(struct ata_queued_cmd *qc)
2785 struct ata_port *ap = qc->ap;
2786 unsigned int tag, do_clear = 0;
2790 if (likely(ata_tag_valid(tag))) {
2791 if (tag == ap->active_tag)
2792 ap->active_tag = ATA_TAG_POISON;
2793 qc->tag = ATA_TAG_POISON;
2798 struct completion *waiting = qc->waiting;
2803 if (likely(do_clear))
2804 clear_bit(tag, &ap->qactive);
2808 * ata_qc_free - free unused ata_queued_cmd
2809 * @qc: Command to complete
2811 * Designed to free unused ata_queued_cmd object
2812 * in case something prevents using it.
2817 void ata_qc_free(struct ata_queued_cmd *qc)
2819 assert(qc != NULL); /* ata_qc_from_tag _might_ return NULL */
2820 assert(qc->waiting == NULL); /* nothing should be waiting */
2822 __ata_qc_complete(qc);
2826 * ata_qc_complete - Complete an active ATA command
2827 * @qc: Command to complete
2828 * @drv_stat: ATA status register contents
2834 void ata_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat)
2838 assert(qc != NULL); /* ata_qc_from_tag _might_ return NULL */
2839 assert(qc->flags & ATA_QCFLAG_ACTIVE);
2841 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
2844 /* call completion callback */
2845 rc = qc->complete_fn(qc, drv_stat);
2846 qc->flags &= ~ATA_QCFLAG_ACTIVE;
2848 /* if callback indicates not to complete command (non-zero),
2849 * return immediately
2854 __ata_qc_complete(qc);
2859 static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
2861 struct ata_port *ap = qc->ap;
2863 switch (qc->tf.protocol) {
2865 case ATA_PROT_ATAPI_DMA:
2868 case ATA_PROT_ATAPI:
2870 case ATA_PROT_PIO_MULT:
2871 if (ap->flags & ATA_FLAG_PIO_DMA)
2884 * ata_qc_issue - issue taskfile to device
2885 * @qc: command to issue to device
2887 * Prepare an ATA command to submission to device.
2888 * This includes mapping the data into a DMA-able
2889 * area, filling in the S/G table, and finally
2890 * writing the taskfile to hardware, starting the command.
2893 * spin_lock_irqsave(host_set lock)
2896 * Zero on success, negative on error.
2899 int ata_qc_issue(struct ata_queued_cmd *qc)
2901 struct ata_port *ap = qc->ap;
2903 if (ata_should_dma_map(qc)) {
2904 if (qc->flags & ATA_QCFLAG_SG) {
2905 if (ata_sg_setup(qc))
2907 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
2908 if (ata_sg_setup_one(qc))
2912 qc->flags &= ~ATA_QCFLAG_DMAMAP;
2915 ap->ops->qc_prep(qc);
2917 qc->ap->active_tag = qc->tag;
2918 qc->flags |= ATA_QCFLAG_ACTIVE;
2920 return ap->ops->qc_issue(qc);
2927 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
2928 * @qc: command to issue to device
2930 * Using various libata functions and hooks, this function
2931 * starts an ATA command. ATA commands are grouped into
2932 * classes called "protocols", and issuing each type of protocol
2933 * is slightly different.
2936 * spin_lock_irqsave(host_set lock)
2939 * Zero on success, negative on error.
2942 int ata_qc_issue_prot(struct ata_queued_cmd *qc)
2944 struct ata_port *ap = qc->ap;
2946 ata_dev_select(ap, qc->dev->devno, 1, 0);
2948 switch (qc->tf.protocol) {
2949 case ATA_PROT_NODATA:
2950 ata_tf_to_host_nolock(ap, &qc->tf);
2954 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
2955 ap->ops->bmdma_setup(qc); /* set up bmdma */
2956 ap->ops->bmdma_start(qc); /* initiate bmdma */
2959 case ATA_PROT_PIO: /* load tf registers, initiate polling pio */
2960 ata_qc_set_polling(qc);
2961 ata_tf_to_host_nolock(ap, &qc->tf);
2962 ap->pio_task_state = PIO_ST;
2963 queue_work(ata_wq, &ap->pio_task);
2966 case ATA_PROT_ATAPI:
2967 ata_qc_set_polling(qc);
2968 ata_tf_to_host_nolock(ap, &qc->tf);
2969 queue_work(ata_wq, &ap->packet_task);
2972 case ATA_PROT_ATAPI_NODATA:
2973 ata_tf_to_host_nolock(ap, &qc->tf);
2974 queue_work(ata_wq, &ap->packet_task);
2977 case ATA_PROT_ATAPI_DMA:
2978 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
2979 ap->ops->bmdma_setup(qc); /* set up bmdma */
2980 queue_work(ata_wq, &ap->packet_task);
2992 * ata_bmdma_setup - Set up PCI IDE BMDMA transaction
2993 * @qc: Info associated with this ATA transaction.
2996 * spin_lock_irqsave(host_set lock)
2999 static void ata_bmdma_setup_mmio (struct ata_queued_cmd *qc)
3001 struct ata_port *ap = qc->ap;
3002 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
3004 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
3006 /* load PRD table addr. */
3007 mb(); /* make sure PRD table writes are visible to controller */
3008 writel(ap->prd_dma, mmio + ATA_DMA_TABLE_OFS);
3010 /* specify data direction, triple-check start bit is clear */
3011 dmactl = readb(mmio + ATA_DMA_CMD);
3012 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
3014 dmactl |= ATA_DMA_WR;
3015 writeb(dmactl, mmio + ATA_DMA_CMD);
3017 /* issue r/w command */
3018 ap->ops->exec_command(ap, &qc->tf);
3022 * ata_bmdma_start - Start a PCI IDE BMDMA transaction
3023 * @qc: Info associated with this ATA transaction.
3026 * spin_lock_irqsave(host_set lock)
3029 static void ata_bmdma_start_mmio (struct ata_queued_cmd *qc)
3031 struct ata_port *ap = qc->ap;
3032 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
3035 /* start host DMA transaction */
3036 dmactl = readb(mmio + ATA_DMA_CMD);
3037 writeb(dmactl | ATA_DMA_START, mmio + ATA_DMA_CMD);
3039 /* Strictly, one may wish to issue a readb() here, to
3040 * flush the mmio write. However, control also passes
3041 * to the hardware at this point, and it will interrupt
3042 * us when we are to resume control. So, in effect,
3043 * we don't care when the mmio write flushes.
3044 * Further, a read of the DMA status register _immediately_
3045 * following the write may not be what certain flaky hardware
3046 * is expected, so I think it is best to not add a readb()
3047 * without first all the MMIO ATA cards/mobos.
3048 * Or maybe I'm just being paranoid.
3053 * ata_bmdma_setup_pio - Set up PCI IDE BMDMA transaction (PIO)
3054 * @qc: Info associated with this ATA transaction.
3057 * spin_lock_irqsave(host_set lock)
3060 static void ata_bmdma_setup_pio (struct ata_queued_cmd *qc)
3062 struct ata_port *ap = qc->ap;
3063 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
3066 /* load PRD table addr. */
3067 outl(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
3069 /* specify data direction, triple-check start bit is clear */
3070 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
3071 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
3073 dmactl |= ATA_DMA_WR;
3074 outb(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
3076 /* issue r/w command */
3077 ap->ops->exec_command(ap, &qc->tf);
3081 * ata_bmdma_start_pio - Start a PCI IDE BMDMA transaction (PIO)
3082 * @qc: Info associated with this ATA transaction.
3085 * spin_lock_irqsave(host_set lock)
3088 static void ata_bmdma_start_pio (struct ata_queued_cmd *qc)
3090 struct ata_port *ap = qc->ap;
3093 /* start host DMA transaction */
3094 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
3095 outb(dmactl | ATA_DMA_START,
3096 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
3099 void ata_bmdma_start(struct ata_queued_cmd *qc)
3101 if (qc->ap->flags & ATA_FLAG_MMIO)
3102 ata_bmdma_start_mmio(qc);
3104 ata_bmdma_start_pio(qc);
3107 void ata_bmdma_setup(struct ata_queued_cmd *qc)
3109 if (qc->ap->flags & ATA_FLAG_MMIO)
3110 ata_bmdma_setup_mmio(qc);
3112 ata_bmdma_setup_pio(qc);
3115 void ata_bmdma_irq_clear(struct ata_port *ap)
3117 if (ap->flags & ATA_FLAG_MMIO) {
3118 void __iomem *mmio = ((void __iomem *) ap->ioaddr.bmdma_addr) + ATA_DMA_STATUS;
3119 writeb(readb(mmio), mmio);
3121 unsigned long addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS;
3122 outb(inb(addr), addr);
3127 u8 ata_bmdma_status(struct ata_port *ap)
3130 if (ap->flags & ATA_FLAG_MMIO) {
3131 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
3132 host_stat = readb(mmio + ATA_DMA_STATUS);
3134 host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
3138 void ata_bmdma_stop(struct ata_port *ap)
3140 if (ap->flags & ATA_FLAG_MMIO) {
3141 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
3143 /* clear start/stop bit */
3144 writeb(readb(mmio + ATA_DMA_CMD) & ~ATA_DMA_START,
3145 mmio + ATA_DMA_CMD);
3147 /* clear start/stop bit */
3148 outb(inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD) & ~ATA_DMA_START,
3149 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
3152 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
3153 ata_altstatus(ap); /* dummy read */
3157 * ata_host_intr - Handle host interrupt for given (port, task)
3158 * @ap: Port on which interrupt arrived (possibly...)
3159 * @qc: Taskfile currently active in engine
3161 * Handle host interrupt for given queued command. Currently,
3162 * only DMA interrupts are handled. All other commands are
3163 * handled via polling with interrupts disabled (nIEN bit).
3166 * spin_lock_irqsave(host_set lock)
3169 * One if interrupt was handled, zero if not (shared irq).
3172 inline unsigned int ata_host_intr (struct ata_port *ap,
3173 struct ata_queued_cmd *qc)
3175 u8 status, host_stat;
3177 switch (qc->tf.protocol) {
3180 case ATA_PROT_ATAPI_DMA:
3181 case ATA_PROT_ATAPI:
3182 /* check status of DMA engine */
3183 host_stat = ap->ops->bmdma_status(ap);
3184 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
3186 /* if it's not our irq... */
3187 if (!(host_stat & ATA_DMA_INTR))
3190 /* before we do anything else, clear DMA-Start bit */
3191 ap->ops->bmdma_stop(ap);
3195 case ATA_PROT_ATAPI_NODATA:
3196 case ATA_PROT_NODATA:
3197 /* check altstatus */
3198 status = ata_altstatus(ap);
3199 if (status & ATA_BUSY)
3202 /* check main status, clearing INTRQ */
3203 status = ata_chk_status(ap);
3204 if (unlikely(status & ATA_BUSY))
3206 DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
3207 ap->id, qc->tf.protocol, status);
3209 /* ack bmdma irq events */
3210 ap->ops->irq_clear(ap);
3212 /* complete taskfile transaction */
3213 ata_qc_complete(qc, status);
3220 return 1; /* irq handled */
3223 ap->stats.idle_irq++;
3226 if ((ap->stats.idle_irq % 1000) == 0) {
3228 ata_irq_ack(ap, 0); /* debug trap */
3229 printk(KERN_WARNING "ata%d: irq trap\n", ap->id);
3232 return 0; /* irq not handled */
3236 * ata_interrupt - Default ATA host interrupt handler
3238 * @dev_instance: pointer to our host information structure
3247 irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
3249 struct ata_host_set *host_set = dev_instance;
3251 unsigned int handled = 0;
3252 unsigned long flags;
3254 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
3255 spin_lock_irqsave(&host_set->lock, flags);
3257 for (i = 0; i < host_set->n_ports; i++) {
3258 struct ata_port *ap;
3260 ap = host_set->ports[i];
3261 if (ap && (!(ap->flags & ATA_FLAG_PORT_DISABLED))) {
3262 struct ata_queued_cmd *qc;
3264 qc = ata_qc_from_tag(ap, ap->active_tag);
3265 if (qc && (!(qc->tf.ctl & ATA_NIEN)) &&
3266 (qc->flags & ATA_QCFLAG_ACTIVE))
3267 handled |= ata_host_intr(ap, qc);
3271 spin_unlock_irqrestore(&host_set->lock, flags);
3273 return IRQ_RETVAL(handled);
3277 * atapi_packet_task - Write CDB bytes to hardware
3278 * @_data: Port to which ATAPI device is attached.
3280 * When device has indicated its readiness to accept
3281 * a CDB, this function is called. Send the CDB.
3282 * If DMA is to be performed, exit immediately.
3283 * Otherwise, we are in polling mode, so poll
3284 * status under operation succeeds or fails.
3287 * Kernel thread context (may sleep)
3290 static void atapi_packet_task(void *_data)
3292 struct ata_port *ap = _data;
3293 struct ata_queued_cmd *qc;
3296 qc = ata_qc_from_tag(ap, ap->active_tag);
3298 assert(qc->flags & ATA_QCFLAG_ACTIVE);
3300 /* sleep-wait for BSY to clear */
3301 DPRINTK("busy wait\n");
3302 if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB))
3305 /* make sure DRQ is set */
3306 status = ata_chk_status(ap);
3307 if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ)
3311 DPRINTK("send cdb\n");
3312 assert(ap->cdb_len >= 12);
3313 ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1);
3315 /* if we are DMA'ing, irq handler takes over from here */
3316 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA)
3317 ap->ops->bmdma_start(qc); /* initiate bmdma */
3319 /* non-data commands are also handled via irq */
3320 else if (qc->tf.protocol == ATA_PROT_ATAPI_NODATA) {
3324 /* PIO commands are handled by polling */
3326 ap->pio_task_state = PIO_ST;
3327 queue_work(ata_wq, &ap->pio_task);
3333 ata_qc_complete(qc, ATA_ERR);
3336 int ata_port_start (struct ata_port *ap)
3338 struct device *dev = ap->host_set->dev;
3340 ap->prd = dma_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, GFP_KERNEL);
3344 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma);
3349 void ata_port_stop (struct ata_port *ap)
3351 struct device *dev = ap->host_set->dev;
3353 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
3357 * ata_host_remove - Unregister SCSI host structure with upper layers
3358 * @ap: Port to unregister
3359 * @do_unregister: 1 if we fully unregister, 0 to just stop the port
3364 static void ata_host_remove(struct ata_port *ap, unsigned int do_unregister)
3366 struct Scsi_Host *sh = ap->host;
3371 scsi_remove_host(sh);
3373 ap->ops->port_stop(ap);
3377 * ata_host_init - Initialize an ata_port structure
3378 * @ap: Structure to initialize
3379 * @host: associated SCSI mid-layer structure
3380 * @host_set: Collection of hosts to which @ap belongs
3381 * @ent: Probe information provided by low-level driver
3382 * @port_no: Port number associated with this ata_port
3388 static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
3389 struct ata_host_set *host_set,
3390 struct ata_probe_ent *ent, unsigned int port_no)
3396 host->max_channel = 1;
3397 host->unique_id = ata_unique_id++;
3398 host->max_cmd_len = 12;
3399 scsi_set_device(host, ent->dev);
3400 scsi_assign_lock(host, &host_set->lock);
3402 ap->flags = ATA_FLAG_PORT_DISABLED;
3403 ap->id = host->unique_id;
3405 ap->ctl = ATA_DEVCTL_OBS;
3406 ap->host_set = host_set;
3407 ap->port_no = port_no;
3409 ent->legacy_mode ? ent->hard_port_no : port_no;
3410 ap->pio_mask = ent->pio_mask;
3411 ap->mwdma_mask = ent->mwdma_mask;
3412 ap->udma_mask = ent->udma_mask;
3413 ap->flags |= ent->host_flags;
3414 ap->ops = ent->port_ops;
3415 ap->cbl = ATA_CBL_NONE;
3416 ap->active_tag = ATA_TAG_POISON;
3417 ap->last_ctl = 0xFF;
3419 INIT_WORK(&ap->packet_task, atapi_packet_task, ap);
3420 INIT_WORK(&ap->pio_task, ata_pio_task, ap);
3422 for (i = 0; i < ATA_MAX_DEVICES; i++)
3423 ap->device[i].devno = i;
3426 ap->stats.unhandled_irq = 1;
3427 ap->stats.idle_irq = 1;
3430 memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports));
3434 * ata_host_add - Attach low-level ATA driver to system
3435 * @ent: Information provided by low-level driver
3436 * @host_set: Collections of ports to which we add
3437 * @port_no: Port number associated with this host
3445 static struct ata_port * ata_host_add(struct ata_probe_ent *ent,
3446 struct ata_host_set *host_set,
3447 unsigned int port_no)
3449 struct Scsi_Host *host;
3450 struct ata_port *ap;
3454 host = scsi_host_alloc(ent->sht, sizeof(struct ata_port));
3458 ap = (struct ata_port *) &host->hostdata[0];
3460 ata_host_init(ap, host, host_set, ent, port_no);
3462 rc = ap->ops->port_start(ap);
3469 scsi_host_put(host);
3483 int ata_device_add(struct ata_probe_ent *ent)
3485 unsigned int count = 0, i;
3486 struct device *dev = ent->dev;
3487 struct ata_host_set *host_set;
3490 /* alloc a container for our list of ATA ports (buses) */
3491 host_set = kmalloc(sizeof(struct ata_host_set) +
3492 (ent->n_ports * sizeof(void *)), GFP_KERNEL);
3495 memset(host_set, 0, sizeof(struct ata_host_set) + (ent->n_ports * sizeof(void *)));
3496 spin_lock_init(&host_set->lock);
3498 host_set->dev = dev;
3499 host_set->n_ports = ent->n_ports;
3500 host_set->irq = ent->irq;
3501 host_set->mmio_base = ent->mmio_base;
3502 host_set->private_data = ent->private_data;
3503 host_set->ops = ent->port_ops;
3505 /* register each port bound to this device */
3506 for (i = 0; i < ent->n_ports; i++) {
3507 struct ata_port *ap;
3508 unsigned long xfer_mode_mask;
3510 ap = ata_host_add(ent, host_set, i);
3514 host_set->ports[i] = ap;
3515 xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) |
3516 (ap->mwdma_mask << ATA_SHIFT_MWDMA) |
3517 (ap->pio_mask << ATA_SHIFT_PIO);
3519 /* print per-port info to dmesg */
3520 printk(KERN_INFO "ata%u: %cATA max %s cmd 0x%lX ctl 0x%lX "
3521 "bmdma 0x%lX irq %lu\n",
3523 ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
3524 ata_mode_string(xfer_mode_mask),
3525 ap->ioaddr.cmd_addr,
3526 ap->ioaddr.ctl_addr,
3527 ap->ioaddr.bmdma_addr,
3531 host_set->ops->irq_clear(ap);
3540 /* obtain irq, that is shared between channels */
3541 if (request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags,
3542 DRV_NAME, host_set))
3545 /* perform each probe synchronously */
3546 DPRINTK("probe begin\n");
3547 for (i = 0; i < count; i++) {
3548 struct ata_port *ap;
3551 ap = host_set->ports[i];
3553 DPRINTK("ata%u: probe begin\n", ap->id);
3554 rc = ata_bus_probe(ap);
3555 DPRINTK("ata%u: probe end\n", ap->id);
3558 /* FIXME: do something useful here?
3559 * Current libata behavior will
3560 * tear down everything when
3561 * the module is removed
3562 * or the h/w is unplugged.
3566 rc = scsi_add_host(ap->host, dev);
3568 printk(KERN_ERR "ata%u: scsi_add_host failed\n",
3570 /* FIXME: do something useful here */
3571 /* FIXME: handle unconditional calls to
3572 * scsi_scan_host and ata_host_remove, below,
3578 /* probes are done, now scan each port's disk(s) */
3579 DPRINTK("probe begin\n");
3580 for (i = 0; i < count; i++) {
3581 struct ata_port *ap = host_set->ports[i];
3583 scsi_scan_host(ap->host);
3586 dev_set_drvdata(dev, host_set);
3588 VPRINTK("EXIT, returning %u\n", ent->n_ports);
3589 return ent->n_ports; /* success */
3592 for (i = 0; i < count; i++) {
3593 ata_host_remove(host_set->ports[i], 1);
3594 scsi_host_put(host_set->ports[i]->host);
3597 VPRINTK("EXIT, returning 0\n");
3602 * ata_scsi_release - SCSI layer callback hook for host unload
3603 * @host: libata host to be unloaded
3605 * Performs all duties necessary to shut down a libata port...
3606 * Kill port kthread, disable port, and release resources.
3609 * Inherited from SCSI layer.
3615 int ata_scsi_release(struct Scsi_Host *host)
3617 struct ata_port *ap = (struct ata_port *) &host->hostdata[0];
3621 ap->ops->port_disable(ap);
3622 ata_host_remove(ap, 0);
3629 * ata_std_ports - initialize ioaddr with standard port offsets.
3630 * @ioaddr: IO address structure to be initialized
3632 void ata_std_ports(struct ata_ioports *ioaddr)
3634 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
3635 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
3636 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
3637 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
3638 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
3639 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
3640 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
3641 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
3642 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
3643 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
3646 static struct ata_probe_ent *
3647 ata_probe_ent_alloc(struct device *dev, struct ata_port_info *port)
3649 struct ata_probe_ent *probe_ent;
3651 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
3653 printk(KERN_ERR DRV_NAME "(%s): out of memory\n",
3654 kobject_name(&(dev->kobj)));
3658 memset(probe_ent, 0, sizeof(*probe_ent));
3660 INIT_LIST_HEAD(&probe_ent->node);
3661 probe_ent->dev = dev;
3663 probe_ent->sht = port->sht;
3664 probe_ent->host_flags = port->host_flags;
3665 probe_ent->pio_mask = port->pio_mask;
3666 probe_ent->mwdma_mask = port->mwdma_mask;
3667 probe_ent->udma_mask = port->udma_mask;
3668 probe_ent->port_ops = port->port_ops;
3674 struct ata_probe_ent *
3675 ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port)
3677 struct ata_probe_ent *probe_ent =
3678 ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]);
3682 probe_ent->n_ports = 2;
3683 probe_ent->irq = pdev->irq;
3684 probe_ent->irq_flags = SA_SHIRQ;
3686 probe_ent->port[0].cmd_addr = pci_resource_start(pdev, 0);
3687 probe_ent->port[0].altstatus_addr =
3688 probe_ent->port[0].ctl_addr =
3689 pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS;
3690 probe_ent->port[0].bmdma_addr = pci_resource_start(pdev, 4);
3692 probe_ent->port[1].cmd_addr = pci_resource_start(pdev, 2);
3693 probe_ent->port[1].altstatus_addr =
3694 probe_ent->port[1].ctl_addr =
3695 pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS;
3696 probe_ent->port[1].bmdma_addr = pci_resource_start(pdev, 4) + 8;
3698 ata_std_ports(&probe_ent->port[0]);
3699 ata_std_ports(&probe_ent->port[1]);
3704 static struct ata_probe_ent *
3705 ata_pci_init_legacy_mode(struct pci_dev *pdev, struct ata_port_info **port,
3706 struct ata_probe_ent **ppe2)
3708 struct ata_probe_ent *probe_ent, *probe_ent2;
3710 probe_ent = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]);
3713 probe_ent2 = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[1]);
3719 probe_ent->n_ports = 1;
3720 probe_ent->irq = 14;
3722 probe_ent->hard_port_no = 0;
3723 probe_ent->legacy_mode = 1;
3725 probe_ent2->n_ports = 1;
3726 probe_ent2->irq = 15;
3728 probe_ent2->hard_port_no = 1;
3729 probe_ent2->legacy_mode = 1;
3731 probe_ent->port[0].cmd_addr = 0x1f0;
3732 probe_ent->port[0].altstatus_addr =
3733 probe_ent->port[0].ctl_addr = 0x3f6;
3734 probe_ent->port[0].bmdma_addr = pci_resource_start(pdev, 4);
3736 probe_ent2->port[0].cmd_addr = 0x170;
3737 probe_ent2->port[0].altstatus_addr =
3738 probe_ent2->port[0].ctl_addr = 0x376;
3739 probe_ent2->port[0].bmdma_addr = pci_resource_start(pdev, 4)+8;
3741 ata_std_ports(&probe_ent->port[0]);
3742 ata_std_ports(&probe_ent2->port[0]);
3749 * ata_pci_init_one - Initialize/register PCI IDE host controller
3750 * @pdev: Controller to be initialized
3751 * @port_info: Information from low-level host driver
3752 * @n_ports: Number of ports attached to host controller
3755 * Inherited from PCI layer (may sleep).
3761 int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
3762 unsigned int n_ports)
3764 struct ata_probe_ent *probe_ent, *probe_ent2 = NULL;
3765 struct ata_port_info *port[2];
3767 unsigned int legacy_mode = 0;
3768 int disable_dev_on_err = 1;
3773 port[0] = port_info[0];
3775 port[1] = port_info[1];
3779 if ((port[0]->host_flags & ATA_FLAG_NO_LEGACY) == 0
3780 && (pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
3781 /* TODO: support transitioning to native mode? */
3782 pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
3783 mask = (1 << 2) | (1 << 0);
3784 if ((tmp8 & mask) != mask)
3785 legacy_mode = (1 << 3);
3789 if ((!legacy_mode) && (n_ports > 1)) {
3790 printk(KERN_ERR "ata: BUG: native mode, n_ports > 1\n");
3794 rc = pci_enable_device(pdev);
3798 rc = pci_request_regions(pdev, DRV_NAME);
3800 disable_dev_on_err = 0;
3805 if (!request_region(0x1f0, 8, "libata")) {
3806 struct resource *conflict, res;
3808 res.end = 0x1f0 + 8 - 1;
3809 conflict = ____request_resource(&ioport_resource, &res);
3810 if (!strcmp(conflict->name, "libata"))
3811 legacy_mode |= (1 << 0);
3813 disable_dev_on_err = 0;
3814 printk(KERN_WARNING "ata: 0x1f0 IDE port busy\n");
3817 legacy_mode |= (1 << 0);
3819 if (!request_region(0x170, 8, "libata")) {
3820 struct resource *conflict, res;
3822 res.end = 0x170 + 8 - 1;
3823 conflict = ____request_resource(&ioport_resource, &res);
3824 if (!strcmp(conflict->name, "libata"))
3825 legacy_mode |= (1 << 1);
3827 disable_dev_on_err = 0;
3828 printk(KERN_WARNING "ata: 0x170 IDE port busy\n");
3831 legacy_mode |= (1 << 1);
3834 /* we have legacy mode, but all ports are unavailable */
3835 if (legacy_mode == (1 << 3)) {
3837 goto err_out_regions;
3840 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
3842 goto err_out_regions;
3843 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
3845 goto err_out_regions;
3848 probe_ent = ata_pci_init_legacy_mode(pdev, port, &probe_ent2);
3850 probe_ent = ata_pci_init_native_mode(pdev, port);
3853 goto err_out_regions;
3856 pci_set_master(pdev);
3858 /* FIXME: check ata_device_add return */
3860 if (legacy_mode & (1 << 0))
3861 ata_device_add(probe_ent);
3862 if (legacy_mode & (1 << 1))
3863 ata_device_add(probe_ent2);
3865 ata_device_add(probe_ent);
3873 if (legacy_mode & (1 << 0))
3874 release_region(0x1f0, 8);
3875 if (legacy_mode & (1 << 1))
3876 release_region(0x170, 8);
3877 pci_release_regions(pdev);
3879 if (disable_dev_on_err)
3880 pci_disable_device(pdev);
3885 * ata_pci_remove_one - PCI layer callback for device removal
3886 * @pdev: PCI device that was removed
3888 * PCI layer indicates to libata via this hook that
3889 * hot-unplug or module unload event has occured.
3890 * Handle this by unregistering all objects associated
3891 * with this PCI device. Free those objects. Then finally
3892 * release PCI resources and disable device.
3895 * Inherited from PCI layer (may sleep).
3898 void ata_pci_remove_one (struct pci_dev *pdev)
3900 struct device *dev = pci_dev_to_dev(pdev);
3901 struct ata_host_set *host_set = dev_get_drvdata(dev);
3902 struct ata_port *ap;
3905 for (i = 0; i < host_set->n_ports; i++) {
3906 ap = host_set->ports[i];
3908 scsi_remove_host(ap->host);
3911 free_irq(host_set->irq, host_set);
3912 if (host_set->ops->host_stop)
3913 host_set->ops->host_stop(host_set);
3914 if (host_set->mmio_base)
3915 iounmap(host_set->mmio_base);
3917 for (i = 0; i < host_set->n_ports; i++) {
3918 ap = host_set->ports[i];
3920 ata_scsi_release(ap->host);
3922 if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) {
3923 struct ata_ioports *ioaddr = &ap->ioaddr;
3925 if (ioaddr->cmd_addr == 0x1f0)
3926 release_region(0x1f0, 8);
3927 else if (ioaddr->cmd_addr == 0x170)
3928 release_region(0x170, 8);
3931 scsi_host_put(ap->host);
3936 pci_release_regions(pdev);
3937 pci_disable_device(pdev);
3938 dev_set_drvdata(dev, NULL);
3941 /* move to PCI subsystem */
3942 int pci_test_config_bits(struct pci_dev *pdev, struct pci_bits *bits)
3944 unsigned long tmp = 0;
3946 switch (bits->width) {
3949 pci_read_config_byte(pdev, bits->reg, &tmp8);
3955 pci_read_config_word(pdev, bits->reg, &tmp16);
3961 pci_read_config_dword(pdev, bits->reg, &tmp32);
3972 return (tmp == bits->val) ? 1 : 0;
3974 #endif /* CONFIG_PCI */
3986 static int __init ata_init(void)
3988 ata_wq = create_workqueue("ata");
3992 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
3996 static void __exit ata_exit(void)
3998 destroy_workqueue(ata_wq);
4001 module_init(ata_init);
4002 module_exit(ata_exit);
4005 * libata is essentially a library of internal helper functions for
4006 * low-level ATA host controller drivers. As such, the API/ABI is
4007 * likely to change as new drivers are added and updated.
4008 * Do not depend on ABI/API stability.
4011 EXPORT_SYMBOL_GPL(ata_std_bios_param);
4012 EXPORT_SYMBOL_GPL(ata_std_ports);
4013 EXPORT_SYMBOL_GPL(ata_device_add);
4014 EXPORT_SYMBOL_GPL(ata_sg_init);
4015 EXPORT_SYMBOL_GPL(ata_sg_init_one);
4016 EXPORT_SYMBOL_GPL(ata_qc_complete);
4017 EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
4018 EXPORT_SYMBOL_GPL(ata_eng_timeout);
4019 EXPORT_SYMBOL_GPL(ata_tf_load);
4020 EXPORT_SYMBOL_GPL(ata_tf_read);
4021 EXPORT_SYMBOL_GPL(ata_noop_dev_select);
4022 EXPORT_SYMBOL_GPL(ata_std_dev_select);
4023 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
4024 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
4025 EXPORT_SYMBOL_GPL(ata_check_status);
4026 EXPORT_SYMBOL_GPL(ata_altstatus);
4027 EXPORT_SYMBOL_GPL(ata_chk_err);
4028 EXPORT_SYMBOL_GPL(ata_exec_command);
4029 EXPORT_SYMBOL_GPL(ata_port_start);
4030 EXPORT_SYMBOL_GPL(ata_port_stop);
4031 EXPORT_SYMBOL_GPL(ata_interrupt);
4032 EXPORT_SYMBOL_GPL(ata_qc_prep);
4033 EXPORT_SYMBOL_GPL(ata_bmdma_setup);
4034 EXPORT_SYMBOL_GPL(ata_bmdma_start);
4035 EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
4036 EXPORT_SYMBOL_GPL(ata_bmdma_status);
4037 EXPORT_SYMBOL_GPL(ata_bmdma_stop);
4038 EXPORT_SYMBOL_GPL(ata_port_probe);
4039 EXPORT_SYMBOL_GPL(sata_phy_reset);
4040 EXPORT_SYMBOL_GPL(__sata_phy_reset);
4041 EXPORT_SYMBOL_GPL(ata_bus_reset);
4042 EXPORT_SYMBOL_GPL(ata_port_disable);
4043 EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
4044 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
4045 EXPORT_SYMBOL_GPL(ata_scsi_error);
4046 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
4047 EXPORT_SYMBOL_GPL(ata_scsi_release);
4048 EXPORT_SYMBOL_GPL(ata_host_intr);
4049 EXPORT_SYMBOL_GPL(ata_dev_classify);
4050 EXPORT_SYMBOL_GPL(ata_dev_id_string);
4051 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
4054 EXPORT_SYMBOL_GPL(pci_test_config_bits);
4055 EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
4056 EXPORT_SYMBOL_GPL(ata_pci_init_one);
4057 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
4058 #endif /* CONFIG_PCI */