2 * sata_mv.c - Marvell SATA support
4 * Copyright 2005: EMC Corporation, all rights reserved.
5 * Copyright 2005 Red Hat, Inc. All rights reserved.
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
32 2) Improve/fix IRQ and error handling sequences.
34 3) ATAPI support (Marvell claims the 60xx/70xx chips can do it).
36 4) Think about TCQ support here, and for libata in general
37 with controllers that suppport it via host-queuing hardware
38 (a software-only implementation could be a nightmare).
40 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
42 6) Add port multiplier support (intermediate)
44 8) Develop a low-power-consumption strategy, and implement it.
46 9) [Experiment, low priority] See if ATAPI can be supported using
47 "unknown FIS" or "vendor-specific FIS" support, or something creative
50 10) [Experiment, low priority] Investigate interrupt coalescing.
51 Quite often, especially with PCI Message Signalled Interrupts (MSI),
52 the overhead reduced by interrupt mitigation is quite often not
53 worth the latency cost.
55 11) [Experiment, Marvell value added] Is it possible to use target
56 mode to cross-connect two Linux boxes with Marvell cards? If so,
57 creating LibATA target mode support would be very interesting.
59 Target mode, for those without docs, is the ability to directly
60 connect two SATA controllers.
65 #include <linux/kernel.h>
66 #include <linux/module.h>
67 #include <linux/pci.h>
68 #include <linux/init.h>
69 #include <linux/blkdev.h>
70 #include <linux/delay.h>
71 #include <linux/interrupt.h>
72 #include <linux/dma-mapping.h>
73 #include <linux/device.h>
74 #include <linux/platform_device.h>
75 #include <linux/ata_platform.h>
76 #include <scsi/scsi_host.h>
77 #include <scsi/scsi_cmnd.h>
78 #include <scsi/scsi_device.h>
79 #include <linux/libata.h>
81 #define DRV_NAME "sata_mv"
82 #define DRV_VERSION "1.20"
85 /* BAR's are enumerated in terms of pci_resource_start() terms */
86 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
87 MV_IO_BAR = 2, /* offset 0x18: IO space */
88 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
90 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
91 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
94 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
95 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
96 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
97 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
98 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
99 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
101 MV_SATAHC0_REG_BASE = 0x20000,
102 MV_FLASH_CTL = 0x1046c,
103 MV_GPIO_PORT_CTL = 0x104f0,
104 MV_RESET_CFG = 0x180d8,
106 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
107 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
108 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
109 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
112 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
114 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
115 * CRPB needs alignment on a 256B boundary. Size == 256B
116 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
118 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
119 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
121 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
124 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
125 MV_PORT_HC_SHIFT = 2,
126 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
130 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
131 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
132 /* SoC integrated controllers, no PCI interface */
133 MV_FLAG_SOC = (1 << 28),
135 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
136 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
137 ATA_FLAG_PIO_POLLING,
138 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
140 CRQB_FLAG_READ = (1 << 0),
142 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
143 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
144 CRQB_CMD_ADDR_SHIFT = 8,
145 CRQB_CMD_CS = (0x2 << 11),
146 CRQB_CMD_LAST = (1 << 15),
148 CRPB_FLAG_STATUS_SHIFT = 8,
149 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
150 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
152 EPRD_FLAG_END_OF_TBL = (1 << 31),
154 /* PCI interface registers */
156 PCI_COMMAND_OFS = 0xc00,
158 PCI_MAIN_CMD_STS_OFS = 0xd30,
159 STOP_PCI_MASTER = (1 << 2),
160 PCI_MASTER_EMPTY = (1 << 3),
161 GLOB_SFT_RST = (1 << 4),
164 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
165 MV_PCI_DISC_TIMER = 0xd04,
166 MV_PCI_MSI_TRIGGER = 0xc38,
167 MV_PCI_SERR_MASK = 0xc28,
168 MV_PCI_XBAR_TMOUT = 0x1d04,
169 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
170 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
171 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
172 MV_PCI_ERR_COMMAND = 0x1d50,
174 PCI_IRQ_CAUSE_OFS = 0x1d58,
175 PCI_IRQ_MASK_OFS = 0x1d5c,
176 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
178 PCIE_IRQ_CAUSE_OFS = 0x1900,
179 PCIE_IRQ_MASK_OFS = 0x1910,
180 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
182 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
183 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
184 HC_SOC_MAIN_IRQ_CAUSE_OFS = 0x20020,
185 HC_SOC_MAIN_IRQ_MASK_OFS = 0x20024,
186 PORT0_ERR = (1 << 0), /* shift by port # */
187 PORT0_DONE = (1 << 1), /* shift by port # */
188 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
189 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
191 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
192 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
193 PORTS_0_3_COAL_DONE = (1 << 8),
194 PORTS_4_7_COAL_DONE = (1 << 17),
195 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
196 GPIO_INT = (1 << 22),
197 SELF_INT = (1 << 23),
198 TWSI_INT = (1 << 24),
199 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
200 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
201 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
202 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
203 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
205 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
207 HC_MAIN_MASKED_IRQS_SOC = (PORTS_0_3_COAL_DONE | HC_MAIN_RSVD_SOC),
209 /* SATAHC registers */
212 HC_IRQ_CAUSE_OFS = 0x14,
213 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
214 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
215 DEV_IRQ = (1 << 8), /* shift by port # */
217 /* Shadow block registers */
219 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
222 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
223 SATA_ACTIVE_OFS = 0x350,
224 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
231 SATA_INTERFACE_CTL = 0x050,
233 MV_M2_PREAMP_MASK = 0x7e0,
237 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
238 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
239 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
240 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
241 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
243 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
244 EDMA_ERR_IRQ_MASK_OFS = 0xc,
245 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
246 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
247 EDMA_ERR_DEV = (1 << 2), /* device error */
248 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
249 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
250 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
251 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
252 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
253 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
254 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
255 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
256 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
257 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
258 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
260 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
261 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
262 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
263 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
264 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
266 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
268 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
269 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
270 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
271 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
272 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
273 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
275 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
277 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
278 EDMA_ERR_OVERRUN_5 = (1 << 5),
279 EDMA_ERR_UNDERRUN_5 = (1 << 6),
281 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
282 EDMA_ERR_LNK_CTRL_RX_1 |
283 EDMA_ERR_LNK_CTRL_RX_3 |
284 EDMA_ERR_LNK_CTRL_TX,
286 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
296 EDMA_ERR_LNK_CTRL_RX_2 |
297 EDMA_ERR_LNK_DATA_RX |
298 EDMA_ERR_LNK_DATA_TX |
299 EDMA_ERR_TRANS_PROTO,
300 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
305 EDMA_ERR_UNDERRUN_5 |
306 EDMA_ERR_SELF_DIS_5 |
312 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
313 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
315 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
316 EDMA_REQ_Q_PTR_SHIFT = 5,
318 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
319 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
320 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
321 EDMA_RSP_Q_PTR_SHIFT = 3,
323 EDMA_CMD_OFS = 0x28, /* EDMA command register */
324 EDMA_EN = (1 << 0), /* enable EDMA */
325 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
326 ATA_RST = (1 << 2), /* reset trans/link/phy */
328 EDMA_IORDY_TMOUT = 0x34,
331 /* Host private flags (hp_flags) */
332 MV_HP_FLAG_MSI = (1 << 0),
333 MV_HP_ERRATA_50XXB0 = (1 << 1),
334 MV_HP_ERRATA_50XXB2 = (1 << 2),
335 MV_HP_ERRATA_60X1B2 = (1 << 3),
336 MV_HP_ERRATA_60X1C0 = (1 << 4),
337 MV_HP_ERRATA_XX42A0 = (1 << 5),
338 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
339 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
340 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
341 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
343 /* Port private flags (pp_flags) */
344 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
345 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
346 MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */
349 #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
350 #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
351 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
352 #define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC))
355 /* DMA boundary 0xffff is required by the s/g splitting
356 * we need on /length/ in mv_fill-sg().
358 MV_DMA_BOUNDARY = 0xffffU,
360 /* mask of register bits containing lower 32 bits
361 * of EDMA request queue DMA address
363 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
365 /* ditto, for response queue */
366 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
380 /* Command ReQuest Block: 32B */
396 /* Command ResPonse Block: 8B */
403 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
411 struct mv_port_priv {
412 struct mv_crqb *crqb;
414 struct mv_crpb *crpb;
416 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
417 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
419 unsigned int req_idx;
420 unsigned int resp_idx;
425 struct mv_port_signal {
430 struct mv_host_priv {
432 struct mv_port_signal signal[8];
433 const struct mv_hw_ops *ops;
436 void __iomem *main_cause_reg_addr;
437 void __iomem *main_mask_reg_addr;
442 * These consistent DMA memory pools give us guaranteed
443 * alignment for hardware-accessed data structures,
444 * and less memory waste in accomplishing the alignment.
446 struct dma_pool *crqb_pool;
447 struct dma_pool *crpb_pool;
448 struct dma_pool *sg_tbl_pool;
452 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
454 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
455 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
457 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
459 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
460 void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
463 static void mv_irq_clear(struct ata_port *ap);
464 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
465 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
466 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
467 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
468 static int mv_port_start(struct ata_port *ap);
469 static void mv_port_stop(struct ata_port *ap);
470 static void mv_qc_prep(struct ata_queued_cmd *qc);
471 static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
472 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
473 static void mv_error_handler(struct ata_port *ap);
474 static void mv_eh_freeze(struct ata_port *ap);
475 static void mv_eh_thaw(struct ata_port *ap);
476 static void mv6_dev_config(struct ata_device *dev);
478 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
480 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
481 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
483 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
485 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
486 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
488 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
490 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
491 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
493 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
495 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
496 static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
498 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
500 static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
501 void __iomem *mmio, unsigned int n_hc);
502 static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
504 static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
505 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
506 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
507 unsigned int port_no);
508 static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
509 void __iomem *port_mmio, int want_ncq);
510 static int __mv_stop_dma(struct ata_port *ap);
512 /* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
513 * because we have to allow room for worst case splitting of
514 * PRDs for 64K boundaries in mv_fill_sg().
516 static struct scsi_host_template mv5_sht = {
517 .module = THIS_MODULE,
519 .ioctl = ata_scsi_ioctl,
520 .queuecommand = ata_scsi_queuecmd,
521 .can_queue = ATA_DEF_QUEUE,
522 .this_id = ATA_SHT_THIS_ID,
523 .sg_tablesize = MV_MAX_SG_CT / 2,
524 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
525 .emulated = ATA_SHT_EMULATED,
527 .proc_name = DRV_NAME,
528 .dma_boundary = MV_DMA_BOUNDARY,
529 .slave_configure = ata_scsi_slave_config,
530 .slave_destroy = ata_scsi_slave_destroy,
531 .bios_param = ata_std_bios_param,
534 static struct scsi_host_template mv6_sht = {
535 .module = THIS_MODULE,
537 .ioctl = ata_scsi_ioctl,
538 .queuecommand = ata_scsi_queuecmd,
539 .change_queue_depth = ata_scsi_change_queue_depth,
540 .can_queue = MV_MAX_Q_DEPTH - 1,
541 .this_id = ATA_SHT_THIS_ID,
542 .sg_tablesize = MV_MAX_SG_CT / 2,
543 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
544 .emulated = ATA_SHT_EMULATED,
546 .proc_name = DRV_NAME,
547 .dma_boundary = MV_DMA_BOUNDARY,
548 .slave_configure = ata_scsi_slave_config,
549 .slave_destroy = ata_scsi_slave_destroy,
550 .bios_param = ata_std_bios_param,
553 static const struct ata_port_operations mv5_ops = {
554 .tf_load = ata_tf_load,
555 .tf_read = ata_tf_read,
556 .check_status = ata_check_status,
557 .exec_command = ata_exec_command,
558 .dev_select = ata_std_dev_select,
560 .cable_detect = ata_cable_sata,
562 .qc_prep = mv_qc_prep,
563 .qc_issue = mv_qc_issue,
564 .data_xfer = ata_data_xfer,
566 .irq_clear = mv_irq_clear,
567 .irq_on = ata_irq_on,
569 .error_handler = mv_error_handler,
570 .freeze = mv_eh_freeze,
573 .scr_read = mv5_scr_read,
574 .scr_write = mv5_scr_write,
576 .port_start = mv_port_start,
577 .port_stop = mv_port_stop,
580 static const struct ata_port_operations mv6_ops = {
581 .dev_config = mv6_dev_config,
582 .tf_load = ata_tf_load,
583 .tf_read = ata_tf_read,
584 .check_status = ata_check_status,
585 .exec_command = ata_exec_command,
586 .dev_select = ata_std_dev_select,
588 .cable_detect = ata_cable_sata,
590 .qc_prep = mv_qc_prep,
591 .qc_issue = mv_qc_issue,
592 .data_xfer = ata_data_xfer,
594 .irq_clear = mv_irq_clear,
595 .irq_on = ata_irq_on,
597 .error_handler = mv_error_handler,
598 .freeze = mv_eh_freeze,
600 .qc_defer = ata_std_qc_defer,
602 .scr_read = mv_scr_read,
603 .scr_write = mv_scr_write,
605 .port_start = mv_port_start,
606 .port_stop = mv_port_stop,
609 static const struct ata_port_operations mv_iie_ops = {
610 .tf_load = ata_tf_load,
611 .tf_read = ata_tf_read,
612 .check_status = ata_check_status,
613 .exec_command = ata_exec_command,
614 .dev_select = ata_std_dev_select,
616 .cable_detect = ata_cable_sata,
618 .qc_prep = mv_qc_prep_iie,
619 .qc_issue = mv_qc_issue,
620 .data_xfer = ata_data_xfer,
622 .irq_clear = mv_irq_clear,
623 .irq_on = ata_irq_on,
625 .error_handler = mv_error_handler,
626 .freeze = mv_eh_freeze,
628 .qc_defer = ata_std_qc_defer,
630 .scr_read = mv_scr_read,
631 .scr_write = mv_scr_write,
633 .port_start = mv_port_start,
634 .port_stop = mv_port_stop,
637 static const struct ata_port_info mv_port_info[] = {
639 .flags = MV_COMMON_FLAGS,
640 .pio_mask = 0x1f, /* pio0-4 */
641 .udma_mask = ATA_UDMA6,
642 .port_ops = &mv5_ops,
645 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
646 .pio_mask = 0x1f, /* pio0-4 */
647 .udma_mask = ATA_UDMA6,
648 .port_ops = &mv5_ops,
651 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
652 .pio_mask = 0x1f, /* pio0-4 */
653 .udma_mask = ATA_UDMA6,
654 .port_ops = &mv5_ops,
657 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
659 .pio_mask = 0x1f, /* pio0-4 */
660 .udma_mask = ATA_UDMA6,
661 .port_ops = &mv6_ops,
664 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
665 ATA_FLAG_NCQ | MV_FLAG_DUAL_HC,
666 .pio_mask = 0x1f, /* pio0-4 */
667 .udma_mask = ATA_UDMA6,
668 .port_ops = &mv6_ops,
671 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
673 .pio_mask = 0x1f, /* pio0-4 */
674 .udma_mask = ATA_UDMA6,
675 .port_ops = &mv_iie_ops,
678 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
680 .pio_mask = 0x1f, /* pio0-4 */
681 .udma_mask = ATA_UDMA6,
682 .port_ops = &mv_iie_ops,
685 .flags = MV_COMMON_FLAGS | MV_FLAG_SOC,
686 .pio_mask = 0x1f, /* pio0-4 */
687 .udma_mask = ATA_UDMA6,
688 .port_ops = &mv_iie_ops,
692 static const struct pci_device_id mv_pci_tbl[] = {
693 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
694 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
695 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
696 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
697 /* RocketRAID 1740/174x have different identifiers */
698 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
699 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
701 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
702 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
703 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
704 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
705 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
707 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
710 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
712 /* Marvell 7042 support */
713 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
715 /* Highpoint RocketRAID PCIe series */
716 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
717 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
719 { } /* terminate list */
722 static const struct mv_hw_ops mv5xxx_ops = {
723 .phy_errata = mv5_phy_errata,
724 .enable_leds = mv5_enable_leds,
725 .read_preamp = mv5_read_preamp,
726 .reset_hc = mv5_reset_hc,
727 .reset_flash = mv5_reset_flash,
728 .reset_bus = mv5_reset_bus,
731 static const struct mv_hw_ops mv6xxx_ops = {
732 .phy_errata = mv6_phy_errata,
733 .enable_leds = mv6_enable_leds,
734 .read_preamp = mv6_read_preamp,
735 .reset_hc = mv6_reset_hc,
736 .reset_flash = mv6_reset_flash,
737 .reset_bus = mv_reset_pci_bus,
740 static const struct mv_hw_ops mv_soc_ops = {
741 .phy_errata = mv6_phy_errata,
742 .enable_leds = mv_soc_enable_leds,
743 .read_preamp = mv_soc_read_preamp,
744 .reset_hc = mv_soc_reset_hc,
745 .reset_flash = mv_soc_reset_flash,
746 .reset_bus = mv_soc_reset_bus,
753 static inline void writelfl(unsigned long data, void __iomem *addr)
756 (void) readl(addr); /* flush to avoid PCI posted write */
759 static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
761 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
764 static inline unsigned int mv_hc_from_port(unsigned int port)
766 return port >> MV_PORT_HC_SHIFT;
769 static inline unsigned int mv_hardport_from_port(unsigned int port)
771 return port & MV_PORT_MASK;
774 static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
777 return mv_hc_base(base, mv_hc_from_port(port));
780 static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
782 return mv_hc_base_from_port(base, port) +
783 MV_SATAHC_ARBTR_REG_SZ +
784 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
787 static inline void __iomem *mv_host_base(struct ata_host *host)
789 struct mv_host_priv *hpriv = host->private_data;
793 static inline void __iomem *mv_ap_base(struct ata_port *ap)
795 return mv_port_base(mv_host_base(ap->host), ap->port_no);
798 static inline int mv_get_hc_count(unsigned long port_flags)
800 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
803 static void mv_irq_clear(struct ata_port *ap)
807 static void mv_set_edma_ptrs(void __iomem *port_mmio,
808 struct mv_host_priv *hpriv,
809 struct mv_port_priv *pp)
814 * initialize request queue
816 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
818 WARN_ON(pp->crqb_dma & 0x3ff);
819 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
820 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
821 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
823 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
824 writelfl((pp->crqb_dma & 0xffffffff) | index,
825 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
827 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
830 * initialize response queue
832 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
834 WARN_ON(pp->crpb_dma & 0xff);
835 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
837 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
838 writelfl((pp->crpb_dma & 0xffffffff) | index,
839 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
841 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
843 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
844 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
848 * mv_start_dma - Enable eDMA engine
849 * @base: port base address
850 * @pp: port private data
852 * Verify the local cache of the eDMA state is accurate with a
856 * Inherited from caller.
858 static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
859 struct mv_port_priv *pp, u8 protocol)
861 int want_ncq = (protocol == ATA_PROT_NCQ);
863 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
864 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
865 if (want_ncq != using_ncq)
868 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
869 struct mv_host_priv *hpriv = ap->host->private_data;
870 int hard_port = mv_hardport_from_port(ap->port_no);
871 void __iomem *hc_mmio = mv_hc_base_from_port(
872 ap->host->iomap[MV_PRIMARY_BAR], hard_port);
873 u32 hc_irq_cause, ipending;
875 /* clear EDMA event indicators, if any */
876 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
878 /* clear EDMA interrupt indicator, if any */
879 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
880 ipending = (DEV_IRQ << hard_port) |
881 (CRPB_DMA_DONE << hard_port);
882 if (hc_irq_cause & ipending) {
883 writelfl(hc_irq_cause & ~ipending,
884 hc_mmio + HC_IRQ_CAUSE_OFS);
887 mv_edma_cfg(pp, hpriv, port_mmio, want_ncq);
889 /* clear FIS IRQ Cause */
890 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
892 mv_set_edma_ptrs(port_mmio, hpriv, pp);
894 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
895 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
897 WARN_ON(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
901 * __mv_stop_dma - Disable eDMA engine
902 * @ap: ATA channel to manipulate
904 * Verify the local cache of the eDMA state is accurate with a
908 * Inherited from caller.
910 static int __mv_stop_dma(struct ata_port *ap)
912 void __iomem *port_mmio = mv_ap_base(ap);
913 struct mv_port_priv *pp = ap->private_data;
917 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
918 /* Disable EDMA if active. The disable bit auto clears.
920 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
921 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
923 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
926 /* now properly wait for the eDMA to stop */
927 for (i = 1000; i > 0; i--) {
928 reg = readl(port_mmio + EDMA_CMD_OFS);
929 if (!(reg & EDMA_EN))
936 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
943 static int mv_stop_dma(struct ata_port *ap)
948 spin_lock_irqsave(&ap->host->lock, flags);
949 rc = __mv_stop_dma(ap);
950 spin_unlock_irqrestore(&ap->host->lock, flags);
956 static void mv_dump_mem(void __iomem *start, unsigned bytes)
959 for (b = 0; b < bytes; ) {
960 DPRINTK("%p: ", start + b);
961 for (w = 0; b < bytes && w < 4; w++) {
962 printk("%08x ", readl(start + b));
970 static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
975 for (b = 0; b < bytes; ) {
976 DPRINTK("%02x: ", b);
977 for (w = 0; b < bytes && w < 4; w++) {
978 (void) pci_read_config_dword(pdev, b, &dw);
986 static void mv_dump_all_regs(void __iomem *mmio_base, int port,
987 struct pci_dev *pdev)
990 void __iomem *hc_base = mv_hc_base(mmio_base,
991 port >> MV_PORT_HC_SHIFT);
992 void __iomem *port_base;
993 int start_port, num_ports, p, start_hc, num_hcs, hc;
996 start_hc = start_port = 0;
997 num_ports = 8; /* shld be benign for 4 port devs */
1000 start_hc = port >> MV_PORT_HC_SHIFT;
1002 num_ports = num_hcs = 1;
1004 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
1005 num_ports > 1 ? num_ports - 1 : start_port);
1008 DPRINTK("PCI config space regs:\n");
1009 mv_dump_pci_cfg(pdev, 0x68);
1011 DPRINTK("PCI regs:\n");
1012 mv_dump_mem(mmio_base+0xc00, 0x3c);
1013 mv_dump_mem(mmio_base+0xd00, 0x34);
1014 mv_dump_mem(mmio_base+0xf00, 0x4);
1015 mv_dump_mem(mmio_base+0x1d00, 0x6c);
1016 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
1017 hc_base = mv_hc_base(mmio_base, hc);
1018 DPRINTK("HC regs (HC %i):\n", hc);
1019 mv_dump_mem(hc_base, 0x1c);
1021 for (p = start_port; p < start_port + num_ports; p++) {
1022 port_base = mv_port_base(mmio_base, p);
1023 DPRINTK("EDMA regs (port %i):\n", p);
1024 mv_dump_mem(port_base, 0x54);
1025 DPRINTK("SATA regs (port %i):\n", p);
1026 mv_dump_mem(port_base+0x300, 0x60);
1031 static unsigned int mv_scr_offset(unsigned int sc_reg_in)
1035 switch (sc_reg_in) {
1039 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
1042 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
1051 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
1053 unsigned int ofs = mv_scr_offset(sc_reg_in);
1055 if (ofs != 0xffffffffU) {
1056 *val = readl(mv_ap_base(ap) + ofs);
1062 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1064 unsigned int ofs = mv_scr_offset(sc_reg_in);
1066 if (ofs != 0xffffffffU) {
1067 writelfl(val, mv_ap_base(ap) + ofs);
1073 static void mv6_dev_config(struct ata_device *adev)
1076 * We don't have hob_nsect when doing NCQ commands on Gen-II.
1077 * See mv_qc_prep() for more info.
1079 if (adev->flags & ATA_DFLAG_NCQ)
1080 if (adev->max_sectors > ATA_MAX_SECTORS)
1081 adev->max_sectors = ATA_MAX_SECTORS;
1084 static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
1085 void __iomem *port_mmio, int want_ncq)
1089 /* set up non-NCQ EDMA configuration */
1090 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
1092 if (IS_GEN_I(hpriv))
1093 cfg |= (1 << 8); /* enab config burst size mask */
1095 else if (IS_GEN_II(hpriv))
1096 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1098 else if (IS_GEN_IIE(hpriv)) {
1099 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1100 cfg |= (1 << 22); /* enab 4-entry host queue cache */
1101 cfg |= (1 << 18); /* enab early completion */
1102 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
1106 cfg |= EDMA_CFG_NCQ;
1107 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1109 pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;
1111 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1114 static void mv_port_free_dma_mem(struct ata_port *ap)
1116 struct mv_host_priv *hpriv = ap->host->private_data;
1117 struct mv_port_priv *pp = ap->private_data;
1121 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1125 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1129 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1130 * For later hardware, we have one unique sg_tbl per NCQ tag.
1132 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1133 if (pp->sg_tbl[tag]) {
1134 if (tag == 0 || !IS_GEN_I(hpriv))
1135 dma_pool_free(hpriv->sg_tbl_pool,
1137 pp->sg_tbl_dma[tag]);
1138 pp->sg_tbl[tag] = NULL;
1144 * mv_port_start - Port specific init/start routine.
1145 * @ap: ATA channel to manipulate
1147 * Allocate and point to DMA memory, init port private memory,
1151 * Inherited from caller.
1153 static int mv_port_start(struct ata_port *ap)
1155 struct device *dev = ap->host->dev;
1156 struct mv_host_priv *hpriv = ap->host->private_data;
1157 struct mv_port_priv *pp;
1158 void __iomem *port_mmio = mv_ap_base(ap);
1159 unsigned long flags;
1162 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1165 ap->private_data = pp;
1167 rc = ata_pad_alloc(ap, dev);
1171 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1174 memset(pp->crqb, 0, MV_CRQB_Q_SZ);
1176 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1178 goto out_port_free_dma_mem;
1179 memset(pp->crpb, 0, MV_CRPB_Q_SZ);
1182 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1183 * For later hardware, we need one unique sg_tbl per NCQ tag.
1185 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1186 if (tag == 0 || !IS_GEN_I(hpriv)) {
1187 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1188 GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1189 if (!pp->sg_tbl[tag])
1190 goto out_port_free_dma_mem;
1192 pp->sg_tbl[tag] = pp->sg_tbl[0];
1193 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1197 spin_lock_irqsave(&ap->host->lock, flags);
1199 mv_edma_cfg(pp, hpriv, port_mmio, 0);
1200 mv_set_edma_ptrs(port_mmio, hpriv, pp);
1202 spin_unlock_irqrestore(&ap->host->lock, flags);
1204 /* Don't turn on EDMA here...do it before DMA commands only. Else
1205 * we'll be unable to send non-data, PIO, etc due to restricted access
1210 out_port_free_dma_mem:
1211 mv_port_free_dma_mem(ap);
1216 * mv_port_stop - Port specific cleanup/stop routine.
1217 * @ap: ATA channel to manipulate
1219 * Stop DMA, cleanup port memory.
1222 * This routine uses the host lock to protect the DMA stop.
1224 static void mv_port_stop(struct ata_port *ap)
1227 mv_port_free_dma_mem(ap);
1231 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1232 * @qc: queued command whose SG list to source from
1234 * Populate the SG list and mark the last entry.
1237 * Inherited from caller.
1239 static void mv_fill_sg(struct ata_queued_cmd *qc)
1241 struct mv_port_priv *pp = qc->ap->private_data;
1242 struct scatterlist *sg;
1243 struct mv_sg *mv_sg, *last_sg = NULL;
1246 mv_sg = pp->sg_tbl[qc->tag];
1247 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1248 dma_addr_t addr = sg_dma_address(sg);
1249 u32 sg_len = sg_dma_len(sg);
1252 u32 offset = addr & 0xffff;
1255 if ((offset + sg_len > 0x10000))
1256 len = 0x10000 - offset;
1258 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1259 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1260 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
1270 if (likely(last_sg))
1271 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1274 static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1276 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1277 (last ? CRQB_CMD_LAST : 0);
1278 *cmdw = cpu_to_le16(tmp);
1282 * mv_qc_prep - Host specific command preparation.
1283 * @qc: queued command to prepare
1285 * This routine simply redirects to the general purpose routine
1286 * if command is not DMA. Else, it handles prep of the CRQB
1287 * (command request block), does some sanity checking, and calls
1288 * the SG load routine.
1291 * Inherited from caller.
1293 static void mv_qc_prep(struct ata_queued_cmd *qc)
1295 struct ata_port *ap = qc->ap;
1296 struct mv_port_priv *pp = ap->private_data;
1298 struct ata_taskfile *tf;
1302 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1303 (qc->tf.protocol != ATA_PROT_NCQ))
1306 /* Fill in command request block
1308 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1309 flags |= CRQB_FLAG_READ;
1310 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1311 flags |= qc->tag << CRQB_TAG_SHIFT;
1313 /* get current queue index from software */
1314 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1316 pp->crqb[in_index].sg_addr =
1317 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1318 pp->crqb[in_index].sg_addr_hi =
1319 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
1320 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1322 cw = &pp->crqb[in_index].ata_cmd[0];
1325 /* Sadly, the CRQB cannot accomodate all registers--there are
1326 * only 11 bytes...so we must pick and choose required
1327 * registers based on the command. So, we drop feature and
1328 * hob_feature for [RW] DMA commands, but they are needed for
1329 * NCQ. NCQ will drop hob_nsect.
1331 switch (tf->command) {
1333 case ATA_CMD_READ_EXT:
1335 case ATA_CMD_WRITE_EXT:
1336 case ATA_CMD_WRITE_FUA_EXT:
1337 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1339 case ATA_CMD_FPDMA_READ:
1340 case ATA_CMD_FPDMA_WRITE:
1341 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
1342 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1345 /* The only other commands EDMA supports in non-queued and
1346 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1347 * of which are defined/used by Linux. If we get here, this
1348 * driver needs work.
1350 * FIXME: modify libata to give qc_prep a return value and
1351 * return error here.
1353 BUG_ON(tf->command);
1356 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1357 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1358 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1359 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1360 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1361 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1362 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1363 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1364 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1366 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1372 * mv_qc_prep_iie - Host specific command preparation.
1373 * @qc: queued command to prepare
1375 * This routine simply redirects to the general purpose routine
1376 * if command is not DMA. Else, it handles prep of the CRQB
1377 * (command request block), does some sanity checking, and calls
1378 * the SG load routine.
1381 * Inherited from caller.
1383 static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1385 struct ata_port *ap = qc->ap;
1386 struct mv_port_priv *pp = ap->private_data;
1387 struct mv_crqb_iie *crqb;
1388 struct ata_taskfile *tf;
1392 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1393 (qc->tf.protocol != ATA_PROT_NCQ))
1396 /* Fill in Gen IIE command request block
1398 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1399 flags |= CRQB_FLAG_READ;
1401 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1402 flags |= qc->tag << CRQB_TAG_SHIFT;
1403 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
1405 /* get current queue index from software */
1406 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1408 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
1409 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1410 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
1411 crqb->flags = cpu_to_le32(flags);
1414 crqb->ata_cmd[0] = cpu_to_le32(
1415 (tf->command << 16) |
1418 crqb->ata_cmd[1] = cpu_to_le32(
1424 crqb->ata_cmd[2] = cpu_to_le32(
1425 (tf->hob_lbal << 0) |
1426 (tf->hob_lbam << 8) |
1427 (tf->hob_lbah << 16) |
1428 (tf->hob_feature << 24)
1430 crqb->ata_cmd[3] = cpu_to_le32(
1432 (tf->hob_nsect << 8)
1435 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1441 * mv_qc_issue - Initiate a command to the host
1442 * @qc: queued command to start
1444 * This routine simply redirects to the general purpose routine
1445 * if command is not DMA. Else, it sanity checks our local
1446 * caches of the request producer/consumer indices then enables
1447 * DMA and bumps the request producer index.
1450 * Inherited from caller.
1452 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1454 struct ata_port *ap = qc->ap;
1455 void __iomem *port_mmio = mv_ap_base(ap);
1456 struct mv_port_priv *pp = ap->private_data;
1459 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1460 (qc->tf.protocol != ATA_PROT_NCQ)) {
1461 /* We're about to send a non-EDMA capable command to the
1462 * port. Turn off EDMA so there won't be problems accessing
1463 * shadow block, etc registers.
1466 return ata_qc_issue_prot(qc);
1469 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
1473 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
1475 /* and write the request in pointer to kick the EDMA to life */
1476 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1477 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1483 * mv_err_intr - Handle error interrupts on the port
1484 * @ap: ATA channel to manipulate
1485 * @reset_allowed: bool: 0 == don't trigger from reset here
1487 * In most cases, just clear the interrupt and move on. However,
1488 * some cases require an eDMA reset, which is done right before
1489 * the COMRESET in mv_phy_reset(). The SERR case requires a
1490 * clear of pending errors in the SATA SERROR register. Finally,
1491 * if the port disabled DMA, update our cached copy to match.
1494 * Inherited from caller.
1496 static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1498 void __iomem *port_mmio = mv_ap_base(ap);
1499 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1500 struct mv_port_priv *pp = ap->private_data;
1501 struct mv_host_priv *hpriv = ap->host->private_data;
1502 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1503 unsigned int action = 0, err_mask = 0;
1504 struct ata_eh_info *ehi = &ap->link.eh_info;
1506 ata_ehi_clear_desc(ehi);
1508 if (!edma_enabled) {
1509 /* just a guess: do we need to do this? should we
1510 * expand this, and do it in all cases?
1512 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1513 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1516 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1518 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1521 * all generations share these EDMA error cause bits
1524 if (edma_err_cause & EDMA_ERR_DEV)
1525 err_mask |= AC_ERR_DEV;
1526 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
1527 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
1528 EDMA_ERR_INTRL_PAR)) {
1529 err_mask |= AC_ERR_ATA_BUS;
1530 action |= ATA_EH_HARDRESET;
1531 ata_ehi_push_desc(ehi, "parity error");
1533 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1534 ata_ehi_hotplugged(ehi);
1535 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
1536 "dev disconnect" : "dev connect");
1537 action |= ATA_EH_HARDRESET;
1540 if (IS_GEN_I(hpriv)) {
1541 eh_freeze_mask = EDMA_EH_FREEZE_5;
1543 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1544 struct mv_port_priv *pp = ap->private_data;
1545 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1546 ata_ehi_push_desc(ehi, "EDMA self-disable");
1549 eh_freeze_mask = EDMA_EH_FREEZE;
1551 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1552 struct mv_port_priv *pp = ap->private_data;
1553 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1554 ata_ehi_push_desc(ehi, "EDMA self-disable");
1557 if (edma_err_cause & EDMA_ERR_SERR) {
1558 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1559 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1560 err_mask = AC_ERR_ATA_BUS;
1561 action |= ATA_EH_HARDRESET;
1565 /* Clear EDMA now that SERR cleanup done */
1566 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1569 err_mask = AC_ERR_OTHER;
1570 action |= ATA_EH_HARDRESET;
1573 ehi->serror |= serr;
1574 ehi->action |= action;
1577 qc->err_mask |= err_mask;
1579 ehi->err_mask |= err_mask;
1581 if (edma_err_cause & eh_freeze_mask)
1582 ata_port_freeze(ap);
1587 static void mv_intr_pio(struct ata_port *ap)
1589 struct ata_queued_cmd *qc;
1592 /* ignore spurious intr if drive still BUSY */
1593 ata_status = readb(ap->ioaddr.status_addr);
1594 if (unlikely(ata_status & ATA_BUSY))
1597 /* get active ATA command */
1598 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1599 if (unlikely(!qc)) /* no active tag */
1601 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1604 /* and finally, complete the ATA command */
1605 qc->err_mask |= ac_err_mask(ata_status);
1606 ata_qc_complete(qc);
1609 static void mv_intr_edma(struct ata_port *ap)
1611 void __iomem *port_mmio = mv_ap_base(ap);
1612 struct mv_host_priv *hpriv = ap->host->private_data;
1613 struct mv_port_priv *pp = ap->private_data;
1614 struct ata_queued_cmd *qc;
1615 u32 out_index, in_index;
1616 bool work_done = false;
1618 /* get h/w response queue pointer */
1619 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1620 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1626 /* get s/w response queue last-read pointer, and compare */
1627 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1628 if (in_index == out_index)
1631 /* 50xx: get active ATA command */
1632 if (IS_GEN_I(hpriv))
1633 tag = ap->link.active_tag;
1635 /* Gen II/IIE: get active ATA command via tag, to enable
1636 * support for queueing. this works transparently for
1637 * queued and non-queued modes.
1640 tag = le16_to_cpu(pp->crpb[out_index].id) & 0x1f;
1642 qc = ata_qc_from_tag(ap, tag);
1644 /* For non-NCQ mode, the lower 8 bits of status
1645 * are from EDMA_ERR_IRQ_CAUSE_OFS,
1646 * which should be zero if all went well.
1648 status = le16_to_cpu(pp->crpb[out_index].flags);
1649 if ((status & 0xff) && !(pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
1650 mv_err_intr(ap, qc);
1654 /* and finally, complete the ATA command */
1657 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1658 ata_qc_complete(qc);
1661 /* advance software response queue pointer, to
1662 * indicate (after the loop completes) to hardware
1663 * that we have consumed a response queue entry.
1670 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1671 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1672 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1676 * mv_host_intr - Handle all interrupts on the given host controller
1677 * @host: host specific structure
1678 * @relevant: port error bits relevant to this host controller
1679 * @hc: which host controller we're to look at
1681 * Read then write clear the HC interrupt status then walk each
1682 * port connected to the HC and see if it needs servicing. Port
1683 * success ints are reported in the HC interrupt status reg, the
1684 * port error ints are reported in the higher level main
1685 * interrupt status register and thus are passed in via the
1686 * 'relevant' argument.
1689 * Inherited from caller.
1691 static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
1693 struct mv_host_priv *hpriv = host->private_data;
1694 void __iomem *mmio = hpriv->base;
1695 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1697 int port, port0, last_port;
1702 port0 = MV_PORTS_PER_HC;
1705 last_port = port0 + MV_PORTS_PER_HC;
1707 last_port = port0 + hpriv->n_ports;
1708 /* we'll need the HC success int register in most cases */
1709 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
1713 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
1715 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1716 hc, relevant, hc_irq_cause);
1718 for (port = port0; port < port0 + last_port; port++) {
1719 struct ata_port *ap = host->ports[port];
1720 struct mv_port_priv *pp = ap->private_data;
1721 int have_err_bits, hard_port, shift;
1723 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
1726 shift = port << 1; /* (port * 2) */
1727 if (port >= MV_PORTS_PER_HC) {
1728 shift++; /* skip bit 8 in the HC Main IRQ reg */
1730 have_err_bits = ((PORT0_ERR << shift) & relevant);
1732 if (unlikely(have_err_bits)) {
1733 struct ata_queued_cmd *qc;
1735 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1736 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1739 mv_err_intr(ap, qc);
1743 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1745 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1746 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1749 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1756 static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1758 struct mv_host_priv *hpriv = host->private_data;
1759 struct ata_port *ap;
1760 struct ata_queued_cmd *qc;
1761 struct ata_eh_info *ehi;
1762 unsigned int i, err_mask, printed = 0;
1765 err_cause = readl(mmio + hpriv->irq_cause_ofs);
1767 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1770 DPRINTK("All regs @ PCI error\n");
1771 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1773 writelfl(0, mmio + hpriv->irq_cause_ofs);
1775 for (i = 0; i < host->n_ports; i++) {
1776 ap = host->ports[i];
1777 if (!ata_link_offline(&ap->link)) {
1778 ehi = &ap->link.eh_info;
1779 ata_ehi_clear_desc(ehi);
1781 ata_ehi_push_desc(ehi,
1782 "PCI err cause 0x%08x", err_cause);
1783 err_mask = AC_ERR_HOST_BUS;
1784 ehi->action = ATA_EH_HARDRESET;
1785 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1787 qc->err_mask |= err_mask;
1789 ehi->err_mask |= err_mask;
1791 ata_port_freeze(ap);
1797 * mv_interrupt - Main interrupt event handler
1799 * @dev_instance: private data; in this case the host structure
1801 * Read the read only register to determine if any host
1802 * controllers have pending interrupts. If so, call lower level
1803 * routine to handle. Also check for PCI errors which are only
1807 * This routine holds the host lock while processing pending
1810 static irqreturn_t mv_interrupt(int irq, void *dev_instance)
1812 struct ata_host *host = dev_instance;
1813 struct mv_host_priv *hpriv = host->private_data;
1814 unsigned int hc, handled = 0, n_hcs;
1815 void __iomem *mmio = hpriv->base;
1816 u32 irq_stat, irq_mask;
1818 spin_lock(&host->lock);
1820 irq_stat = readl(hpriv->main_cause_reg_addr);
1821 irq_mask = readl(hpriv->main_mask_reg_addr);
1823 /* check the cases where we either have nothing pending or have read
1824 * a bogus register value which can indicate HW removal or PCI fault
1826 if (!(irq_stat & irq_mask) || (0xffffffffU == irq_stat))
1829 n_hcs = mv_get_hc_count(host->ports[0]->flags);
1831 if (unlikely((irq_stat & PCI_ERR) && HAS_PCI(host))) {
1832 mv_pci_error(host, mmio);
1834 goto out_unlock; /* skip all other HC irq handling */
1837 for (hc = 0; hc < n_hcs; hc++) {
1838 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1840 mv_host_intr(host, relevant, hc);
1846 spin_unlock(&host->lock);
1848 return IRQ_RETVAL(handled);
1851 static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1853 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1854 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1856 return hc_mmio + ofs;
1859 static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1863 switch (sc_reg_in) {
1867 ofs = sc_reg_in * sizeof(u32);
1876 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
1878 struct mv_host_priv *hpriv = ap->host->private_data;
1879 void __iomem *mmio = hpriv->base;
1880 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1881 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1883 if (ofs != 0xffffffffU) {
1884 *val = readl(addr + ofs);
1890 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1892 struct mv_host_priv *hpriv = ap->host->private_data;
1893 void __iomem *mmio = hpriv->base;
1894 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1895 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1897 if (ofs != 0xffffffffU) {
1898 writelfl(val, addr + ofs);
1904 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
1906 struct pci_dev *pdev = to_pci_dev(host->dev);
1909 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
1912 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1914 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1917 mv_reset_pci_bus(host, mmio);
1920 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1922 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1925 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
1928 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1931 tmp = readl(phy_mmio + MV5_PHY_MODE);
1933 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1934 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
1937 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1941 writel(0, mmio + MV_GPIO_PORT_CTL);
1943 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1945 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1947 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1950 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1953 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1954 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1956 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1959 tmp = readl(phy_mmio + MV5_LT_MODE);
1961 writel(tmp, phy_mmio + MV5_LT_MODE);
1963 tmp = readl(phy_mmio + MV5_PHY_CTL);
1966 writel(tmp, phy_mmio + MV5_PHY_CTL);
1969 tmp = readl(phy_mmio + MV5_PHY_MODE);
1971 tmp |= hpriv->signal[port].pre;
1972 tmp |= hpriv->signal[port].amps;
1973 writel(tmp, phy_mmio + MV5_PHY_MODE);
1978 #define ZERO(reg) writel(0, port_mmio + (reg))
1979 static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1982 void __iomem *port_mmio = mv_port_base(mmio, port);
1984 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1986 mv_channel_reset(hpriv, mmio, port);
1988 ZERO(0x028); /* command */
1989 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1990 ZERO(0x004); /* timer */
1991 ZERO(0x008); /* irq err cause */
1992 ZERO(0x00c); /* irq err mask */
1993 ZERO(0x010); /* rq bah */
1994 ZERO(0x014); /* rq inp */
1995 ZERO(0x018); /* rq outp */
1996 ZERO(0x01c); /* respq bah */
1997 ZERO(0x024); /* respq outp */
1998 ZERO(0x020); /* respq inp */
1999 ZERO(0x02c); /* test control */
2000 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
2004 #define ZERO(reg) writel(0, hc_mmio + (reg))
2005 static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2008 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2016 tmp = readl(hc_mmio + 0x20);
2019 writel(tmp, hc_mmio + 0x20);
2023 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2026 unsigned int hc, port;
2028 for (hc = 0; hc < n_hc; hc++) {
2029 for (port = 0; port < MV_PORTS_PER_HC; port++)
2030 mv5_reset_hc_port(hpriv, mmio,
2031 (hc * MV_PORTS_PER_HC) + port);
2033 mv5_reset_one_hc(hpriv, mmio, hc);
2040 #define ZERO(reg) writel(0, mmio + (reg))
2041 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
2043 struct mv_host_priv *hpriv = host->private_data;
2046 tmp = readl(mmio + MV_PCI_MODE);
2048 writel(tmp, mmio + MV_PCI_MODE);
2050 ZERO(MV_PCI_DISC_TIMER);
2051 ZERO(MV_PCI_MSI_TRIGGER);
2052 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
2053 ZERO(HC_MAIN_IRQ_MASK_OFS);
2054 ZERO(MV_PCI_SERR_MASK);
2055 ZERO(hpriv->irq_cause_ofs);
2056 ZERO(hpriv->irq_mask_ofs);
2057 ZERO(MV_PCI_ERR_LOW_ADDRESS);
2058 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
2059 ZERO(MV_PCI_ERR_ATTRIBUTE);
2060 ZERO(MV_PCI_ERR_COMMAND);
2064 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
2068 mv5_reset_flash(hpriv, mmio);
2070 tmp = readl(mmio + MV_GPIO_PORT_CTL);
2072 tmp |= (1 << 5) | (1 << 6);
2073 writel(tmp, mmio + MV_GPIO_PORT_CTL);
2077 * mv6_reset_hc - Perform the 6xxx global soft reset
2078 * @mmio: base address of the HBA
2080 * This routine only applies to 6xxx parts.
2083 * Inherited from caller.
2085 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2088 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2092 /* Following procedure defined in PCI "main command and status
2096 writel(t | STOP_PCI_MASTER, reg);
2098 for (i = 0; i < 1000; i++) {
2101 if (PCI_MASTER_EMPTY & t)
2104 if (!(PCI_MASTER_EMPTY & t)) {
2105 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2113 writel(t | GLOB_SFT_RST, reg);
2116 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2118 if (!(GLOB_SFT_RST & t)) {
2119 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2124 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2127 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2130 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2132 if (GLOB_SFT_RST & t) {
2133 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2140 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
2143 void __iomem *port_mmio;
2146 tmp = readl(mmio + MV_RESET_CFG);
2147 if ((tmp & (1 << 0)) == 0) {
2148 hpriv->signal[idx].amps = 0x7 << 8;
2149 hpriv->signal[idx].pre = 0x1 << 5;
2153 port_mmio = mv_port_base(mmio, idx);
2154 tmp = readl(port_mmio + PHY_MODE2);
2156 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2157 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2160 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
2162 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
2165 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2168 void __iomem *port_mmio = mv_port_base(mmio, port);
2170 u32 hp_flags = hpriv->hp_flags;
2172 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2174 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2177 if (fix_phy_mode2) {
2178 m2 = readl(port_mmio + PHY_MODE2);
2181 writel(m2, port_mmio + PHY_MODE2);
2185 m2 = readl(port_mmio + PHY_MODE2);
2186 m2 &= ~((1 << 16) | (1 << 31));
2187 writel(m2, port_mmio + PHY_MODE2);
2192 /* who knows what this magic does */
2193 tmp = readl(port_mmio + PHY_MODE3);
2196 writel(tmp, port_mmio + PHY_MODE3);
2198 if (fix_phy_mode4) {
2201 m4 = readl(port_mmio + PHY_MODE4);
2203 if (hp_flags & MV_HP_ERRATA_60X1B2)
2204 tmp = readl(port_mmio + 0x310);
2206 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2208 writel(m4, port_mmio + PHY_MODE4);
2210 if (hp_flags & MV_HP_ERRATA_60X1B2)
2211 writel(tmp, port_mmio + 0x310);
2214 /* Revert values of pre-emphasis and signal amps to the saved ones */
2215 m2 = readl(port_mmio + PHY_MODE2);
2217 m2 &= ~MV_M2_PREAMP_MASK;
2218 m2 |= hpriv->signal[port].amps;
2219 m2 |= hpriv->signal[port].pre;
2222 /* according to mvSata 3.6.1, some IIE values are fixed */
2223 if (IS_GEN_IIE(hpriv)) {
2228 writel(m2, port_mmio + PHY_MODE2);
2231 /* TODO: use the generic LED interface to configure the SATA Presence */
2232 /* & Acitivy LEDs on the board */
2233 static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
2239 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
2242 void __iomem *port_mmio;
2245 port_mmio = mv_port_base(mmio, idx);
2246 tmp = readl(port_mmio + PHY_MODE2);
2248 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2249 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2253 #define ZERO(reg) writel(0, port_mmio + (reg))
2254 static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
2255 void __iomem *mmio, unsigned int port)
2257 void __iomem *port_mmio = mv_port_base(mmio, port);
2259 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
2261 mv_channel_reset(hpriv, mmio, port);
2263 ZERO(0x028); /* command */
2264 writel(0x101f, port_mmio + EDMA_CFG_OFS);
2265 ZERO(0x004); /* timer */
2266 ZERO(0x008); /* irq err cause */
2267 ZERO(0x00c); /* irq err mask */
2268 ZERO(0x010); /* rq bah */
2269 ZERO(0x014); /* rq inp */
2270 ZERO(0x018); /* rq outp */
2271 ZERO(0x01c); /* respq bah */
2272 ZERO(0x024); /* respq outp */
2273 ZERO(0x020); /* respq inp */
2274 ZERO(0x02c); /* test control */
2275 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
2280 #define ZERO(reg) writel(0, hc_mmio + (reg))
2281 static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
2284 void __iomem *hc_mmio = mv_hc_base(mmio, 0);
2294 static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
2295 void __iomem *mmio, unsigned int n_hc)
2299 for (port = 0; port < hpriv->n_ports; port++)
2300 mv_soc_reset_hc_port(hpriv, mmio, port);
2302 mv_soc_reset_one_hc(hpriv, mmio);
2307 static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
2313 static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
2318 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
2319 unsigned int port_no)
2321 void __iomem *port_mmio = mv_port_base(mmio, port_no);
2323 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2325 if (IS_GEN_II(hpriv)) {
2326 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2327 ifctl |= (1 << 7); /* enable gen2i speed */
2328 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2329 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2332 udelay(25); /* allow reset propagation */
2334 /* Spec never mentions clearing the bit. Marvell's driver does
2335 * clear the bit, however.
2337 writelfl(0, port_mmio + EDMA_CMD_OFS);
2339 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2341 if (IS_GEN_I(hpriv))
2346 * mv_phy_reset - Perform eDMA reset followed by COMRESET
2347 * @ap: ATA channel to manipulate
2349 * Part of this is taken from __sata_phy_reset and modified to
2350 * not sleep since this routine gets called from interrupt level.
2353 * Inherited from caller. This is coded to safe to call at
2354 * interrupt level, i.e. it does not sleep.
2356 static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2357 unsigned long deadline)
2359 struct mv_port_priv *pp = ap->private_data;
2360 struct mv_host_priv *hpriv = ap->host->private_data;
2361 void __iomem *port_mmio = mv_ap_base(ap);
2365 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
2369 u32 sstatus, serror, scontrol;
2371 mv_scr_read(ap, SCR_STATUS, &sstatus);
2372 mv_scr_read(ap, SCR_ERROR, &serror);
2373 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2374 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
2375 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2379 /* Issue COMRESET via SControl */
2381 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x301);
2384 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x300);
2388 sata_scr_read(&ap->link, SCR_STATUS, &sstatus);
2389 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
2393 } while (time_before(jiffies, deadline));
2395 /* work around errata */
2396 if (IS_GEN_II(hpriv) &&
2397 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2399 goto comreset_retry;
2403 u32 sstatus, serror, scontrol;
2405 mv_scr_read(ap, SCR_STATUS, &sstatus);
2406 mv_scr_read(ap, SCR_ERROR, &serror);
2407 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2408 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2409 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2413 if (ata_link_offline(&ap->link)) {
2414 *class = ATA_DEV_NONE;
2418 /* even after SStatus reflects that device is ready,
2419 * it seems to take a while for link to be fully
2420 * established (and thus Status no longer 0x80/0x7F),
2421 * so we poll a bit for that, here.
2425 u8 drv_stat = ata_check_status(ap);
2426 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2431 if (time_after(jiffies, deadline))
2435 /* FIXME: if we passed the deadline, the following
2436 * code probably produces an invalid result
2439 /* finally, read device signature from TF registers */
2440 *class = ata_dev_try_classify(ap->link.device, 1, NULL);
2442 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2444 WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
2449 static int mv_prereset(struct ata_link *link, unsigned long deadline)
2451 struct ata_port *ap = link->ap;
2452 struct mv_port_priv *pp = ap->private_data;
2453 struct ata_eh_context *ehc = &link->eh_context;
2456 rc = mv_stop_dma(ap);
2458 ehc->i.action |= ATA_EH_HARDRESET;
2460 if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET)) {
2461 pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
2462 ehc->i.action |= ATA_EH_HARDRESET;
2465 /* if we're about to do hardreset, nothing more to do */
2466 if (ehc->i.action & ATA_EH_HARDRESET)
2469 if (ata_link_online(link))
2470 rc = ata_wait_ready(ap, deadline);
2477 static int mv_hardreset(struct ata_link *link, unsigned int *class,
2478 unsigned long deadline)
2480 struct ata_port *ap = link->ap;
2481 struct mv_host_priv *hpriv = ap->host->private_data;
2482 void __iomem *mmio = hpriv->base;
2486 mv_channel_reset(hpriv, mmio, ap->port_no);
2488 mv_phy_reset(ap, class, deadline);
2493 static void mv_postreset(struct ata_link *link, unsigned int *classes)
2495 struct ata_port *ap = link->ap;
2498 /* print link status */
2499 sata_print_link_status(link);
2502 sata_scr_read(link, SCR_ERROR, &serr);
2503 sata_scr_write_flush(link, SCR_ERROR, serr);
2505 /* bail out if no device is present */
2506 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2507 DPRINTK("EXIT, no device\n");
2511 /* set up device control */
2512 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2515 static void mv_error_handler(struct ata_port *ap)
2517 ata_do_eh(ap, mv_prereset, ata_std_softreset,
2518 mv_hardreset, mv_postreset);
2521 static void mv_eh_freeze(struct ata_port *ap)
2523 struct mv_host_priv *hpriv = ap->host->private_data;
2524 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2528 /* FIXME: handle coalescing completion events properly */
2530 shift = ap->port_no * 2;
2534 mask = 0x3 << shift;
2536 /* disable assertion of portN err, done events */
2537 tmp = readl(hpriv->main_mask_reg_addr);
2538 writelfl(tmp & ~mask, hpriv->main_mask_reg_addr);
2541 static void mv_eh_thaw(struct ata_port *ap)
2543 struct mv_host_priv *hpriv = ap->host->private_data;
2544 void __iomem *mmio = hpriv->base;
2545 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2546 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2547 void __iomem *port_mmio = mv_ap_base(ap);
2548 u32 tmp, mask, hc_irq_cause;
2549 unsigned int shift, hc_port_no = ap->port_no;
2551 /* FIXME: handle coalescing completion events properly */
2553 shift = ap->port_no * 2;
2559 mask = 0x3 << shift;
2561 /* clear EDMA errors on this port */
2562 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2564 /* clear pending irq events */
2565 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2566 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2567 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2568 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2570 /* enable assertion of portN err, done events */
2571 tmp = readl(hpriv->main_mask_reg_addr);
2572 writelfl(tmp | mask, hpriv->main_mask_reg_addr);
2576 * mv_port_init - Perform some early initialization on a single port.
2577 * @port: libata data structure storing shadow register addresses
2578 * @port_mmio: base address of the port
2580 * Initialize shadow register mmio addresses, clear outstanding
2581 * interrupts on the port, and unmask interrupts for the future
2582 * start of the port.
2585 * Inherited from caller.
2587 static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2589 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
2592 /* PIO related setup
2594 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
2596 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2597 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2598 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2599 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2600 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2601 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
2603 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2604 /* special case: control/altstatus doesn't have ATA_REG_ address */
2605 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2608 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
2610 /* Clear any currently outstanding port interrupt conditions */
2611 serr_ofs = mv_scr_offset(SCR_ERROR);
2612 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2613 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2615 /* unmask all non-transient EDMA error interrupts */
2616 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
2618 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2619 readl(port_mmio + EDMA_CFG_OFS),
2620 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2621 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
2624 static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
2626 struct pci_dev *pdev = to_pci_dev(host->dev);
2627 struct mv_host_priv *hpriv = host->private_data;
2628 u32 hp_flags = hpriv->hp_flags;
2630 switch (board_idx) {
2632 hpriv->ops = &mv5xxx_ops;
2633 hp_flags |= MV_HP_GEN_I;
2635 switch (pdev->revision) {
2637 hp_flags |= MV_HP_ERRATA_50XXB0;
2640 hp_flags |= MV_HP_ERRATA_50XXB2;
2643 dev_printk(KERN_WARNING, &pdev->dev,
2644 "Applying 50XXB2 workarounds to unknown rev\n");
2645 hp_flags |= MV_HP_ERRATA_50XXB2;
2652 hpriv->ops = &mv5xxx_ops;
2653 hp_flags |= MV_HP_GEN_I;
2655 switch (pdev->revision) {
2657 hp_flags |= MV_HP_ERRATA_50XXB0;
2660 hp_flags |= MV_HP_ERRATA_50XXB2;
2663 dev_printk(KERN_WARNING, &pdev->dev,
2664 "Applying B2 workarounds to unknown rev\n");
2665 hp_flags |= MV_HP_ERRATA_50XXB2;
2672 hpriv->ops = &mv6xxx_ops;
2673 hp_flags |= MV_HP_GEN_II;
2675 switch (pdev->revision) {
2677 hp_flags |= MV_HP_ERRATA_60X1B2;
2680 hp_flags |= MV_HP_ERRATA_60X1C0;
2683 dev_printk(KERN_WARNING, &pdev->dev,
2684 "Applying B2 workarounds to unknown rev\n");
2685 hp_flags |= MV_HP_ERRATA_60X1B2;
2691 hp_flags |= MV_HP_PCIE;
2692 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2693 (pdev->device == 0x2300 || pdev->device == 0x2310))
2696 * Highpoint RocketRAID PCIe 23xx series cards:
2698 * Unconfigured drives are treated as "Legacy"
2699 * by the BIOS, and it overwrites sector 8 with
2700 * a "Lgcy" metadata block prior to Linux boot.
2702 * Configured drives (RAID or JBOD) leave sector 8
2703 * alone, but instead overwrite a high numbered
2704 * sector for the RAID metadata. This sector can
2705 * be determined exactly, by truncating the physical
2706 * drive capacity to a nice even GB value.
2708 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2710 * Warn the user, lest they think we're just buggy.
2712 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2713 " BIOS CORRUPTS DATA on all attached drives,"
2714 " regardless of if/how they are configured."
2716 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2717 " use sectors 8-9 on \"Legacy\" drives,"
2718 " and avoid the final two gigabytes on"
2719 " all RocketRAID BIOS initialized drives.\n");
2722 hpriv->ops = &mv6xxx_ops;
2723 hp_flags |= MV_HP_GEN_IIE;
2725 switch (pdev->revision) {
2727 hp_flags |= MV_HP_ERRATA_XX42A0;
2730 hp_flags |= MV_HP_ERRATA_60X1C0;
2733 dev_printk(KERN_WARNING, &pdev->dev,
2734 "Applying 60X1C0 workarounds to unknown rev\n");
2735 hp_flags |= MV_HP_ERRATA_60X1C0;
2740 hpriv->ops = &mv_soc_ops;
2741 hp_flags |= MV_HP_ERRATA_60X1C0;
2745 dev_printk(KERN_ERR, host->dev,
2746 "BUG: invalid board index %u\n", board_idx);
2750 hpriv->hp_flags = hp_flags;
2751 if (hp_flags & MV_HP_PCIE) {
2752 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
2753 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
2754 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
2756 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
2757 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
2758 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
2765 * mv_init_host - Perform some early initialization of the host.
2766 * @host: ATA host to initialize
2767 * @board_idx: controller index
2769 * If possible, do an early global reset of the host. Then do
2770 * our port init and clear/unmask all/relevant host interrupts.
2773 * Inherited from caller.
2775 static int mv_init_host(struct ata_host *host, unsigned int board_idx)
2777 int rc = 0, n_hc, port, hc;
2778 struct mv_host_priv *hpriv = host->private_data;
2779 void __iomem *mmio = hpriv->base;
2781 rc = mv_chip_id(host, board_idx);
2785 if (HAS_PCI(host)) {
2786 hpriv->main_cause_reg_addr = hpriv->base +
2787 HC_MAIN_IRQ_CAUSE_OFS;
2788 hpriv->main_mask_reg_addr = hpriv->base + HC_MAIN_IRQ_MASK_OFS;
2790 hpriv->main_cause_reg_addr = hpriv->base +
2791 HC_SOC_MAIN_IRQ_CAUSE_OFS;
2792 hpriv->main_mask_reg_addr = hpriv->base +
2793 HC_SOC_MAIN_IRQ_MASK_OFS;
2795 /* global interrupt mask */
2796 writel(0, hpriv->main_mask_reg_addr);
2798 n_hc = mv_get_hc_count(host->ports[0]->flags);
2800 for (port = 0; port < host->n_ports; port++)
2801 hpriv->ops->read_preamp(hpriv, port, mmio);
2803 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
2807 hpriv->ops->reset_flash(hpriv, mmio);
2808 hpriv->ops->reset_bus(host, mmio);
2809 hpriv->ops->enable_leds(hpriv, mmio);
2811 for (port = 0; port < host->n_ports; port++) {
2812 if (IS_GEN_II(hpriv)) {
2813 void __iomem *port_mmio = mv_port_base(mmio, port);
2815 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2816 ifctl |= (1 << 7); /* enable gen2i speed */
2817 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2818 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2821 hpriv->ops->phy_errata(hpriv, mmio, port);
2824 for (port = 0; port < host->n_ports; port++) {
2825 struct ata_port *ap = host->ports[port];
2826 void __iomem *port_mmio = mv_port_base(mmio, port);
2828 mv_port_init(&ap->ioaddr, port_mmio);
2831 if (HAS_PCI(host)) {
2832 unsigned int offset = port_mmio - mmio;
2833 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2834 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
2839 for (hc = 0; hc < n_hc; hc++) {
2840 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2842 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2843 "(before clear)=0x%08x\n", hc,
2844 readl(hc_mmio + HC_CFG_OFS),
2845 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2847 /* Clear any currently outstanding hc interrupt conditions */
2848 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
2851 if (HAS_PCI(host)) {
2852 /* Clear any currently outstanding host interrupt conditions */
2853 writelfl(0, mmio + hpriv->irq_cause_ofs);
2855 /* and unmask interrupt generation for host regs */
2856 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
2857 if (IS_GEN_I(hpriv))
2858 writelfl(~HC_MAIN_MASKED_IRQS_5,
2859 hpriv->main_mask_reg_addr);
2861 writelfl(~HC_MAIN_MASKED_IRQS,
2862 hpriv->main_mask_reg_addr);
2864 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2865 "PCI int cause/mask=0x%08x/0x%08x\n",
2866 readl(hpriv->main_cause_reg_addr),
2867 readl(hpriv->main_mask_reg_addr),
2868 readl(mmio + hpriv->irq_cause_ofs),
2869 readl(mmio + hpriv->irq_mask_ofs));
2871 writelfl(~HC_MAIN_MASKED_IRQS_SOC,
2872 hpriv->main_mask_reg_addr);
2873 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x\n",
2874 readl(hpriv->main_cause_reg_addr),
2875 readl(hpriv->main_mask_reg_addr));
2882 * mv_platform_probe - handle a positive probe of an soc Marvell
2884 * @pdev: platform device found
2887 * Inherited from caller.
2889 static int mv_platform_probe(struct platform_device *pdev)
2891 static int printed_version;
2892 const struct mv_sata_platform_data *mv_platform_data;
2893 const struct ata_port_info *ppi[] =
2894 { &mv_port_info[chip_soc], NULL };
2895 struct ata_host *host;
2896 struct mv_host_priv *hpriv;
2897 struct resource *res;
2900 if (!printed_version++)
2901 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2904 * Simple resource validation ..
2906 if (unlikely(pdev->num_resources != 2)) {
2907 dev_err(&pdev->dev, "invalid number of resources\n");
2912 * Get the register base first
2914 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2919 mv_platform_data = pdev->dev.platform_data;
2920 n_ports = mv_platform_data->n_ports;
2922 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2923 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2925 if (!host || !hpriv)
2927 host->private_data = hpriv;
2928 hpriv->n_ports = n_ports;
2931 hpriv->base = ioremap(res->start, res->end - res->start + 1);
2932 hpriv->base -= MV_SATAHC0_REG_BASE;
2934 /* initialize adapter */
2935 rc = mv_init_host(host, chip_soc);
2939 dev_printk(KERN_INFO, &pdev->dev,
2940 "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH,
2943 return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
2944 IRQF_SHARED, &mv6_sht);
2949 * mv_platform_remove - unplug a platform interface
2950 * @pdev: platform device
2952 * A platform bus SATA device has been unplugged. Perform the needed
2953 * cleanup. Also called on module unload for any active devices.
2955 static int __devexit mv_platform_remove(struct platform_device *pdev)
2957 struct device *dev = &pdev->dev;
2958 struct ata_host *host = dev_get_drvdata(dev);
2959 struct mv_host_priv *hpriv = host->private_data;
2960 void __iomem *base = hpriv->base;
2962 ata_host_detach(host);
2967 static struct platform_driver mv_platform_driver = {
2968 .probe = mv_platform_probe,
2969 .remove = __devexit_p(mv_platform_remove),
2972 .owner = THIS_MODULE,
2978 static int mv_pci_init_one(struct pci_dev *pdev,
2979 const struct pci_device_id *ent);
2982 static struct pci_driver mv_pci_driver = {
2984 .id_table = mv_pci_tbl,
2985 .probe = mv_pci_init_one,
2986 .remove = ata_pci_remove_one,
2992 static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
2995 /* move to PCI layer or libata core? */
2996 static int pci_go_64(struct pci_dev *pdev)
3000 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
3001 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
3003 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
3005 dev_printk(KERN_ERR, &pdev->dev,
3006 "64-bit DMA enable failed\n");
3011 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
3013 dev_printk(KERN_ERR, &pdev->dev,
3014 "32-bit DMA enable failed\n");
3017 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
3019 dev_printk(KERN_ERR, &pdev->dev,
3020 "32-bit consistent DMA enable failed\n");
3029 * mv_print_info - Dump key info to kernel log for perusal.
3030 * @host: ATA host to print info about
3032 * FIXME: complete this.
3035 * Inherited from caller.
3037 static void mv_print_info(struct ata_host *host)
3039 struct pci_dev *pdev = to_pci_dev(host->dev);
3040 struct mv_host_priv *hpriv = host->private_data;
3042 const char *scc_s, *gen;
3044 /* Use this to determine the HW stepping of the chip so we know
3045 * what errata to workaround
3047 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
3050 else if (scc == 0x01)
3055 if (IS_GEN_I(hpriv))
3057 else if (IS_GEN_II(hpriv))
3059 else if (IS_GEN_IIE(hpriv))
3064 dev_printk(KERN_INFO, &pdev->dev,
3065 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
3066 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
3067 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
3070 static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
3072 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
3074 if (!hpriv->crqb_pool)
3077 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
3079 if (!hpriv->crpb_pool)
3082 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
3084 if (!hpriv->sg_tbl_pool)
3091 * mv_pci_init_one - handle a positive probe of a PCI Marvell host
3092 * @pdev: PCI device found
3093 * @ent: PCI device ID entry for the matched host
3096 * Inherited from caller.
3098 static int mv_pci_init_one(struct pci_dev *pdev,
3099 const struct pci_device_id *ent)
3101 static int printed_version;
3102 unsigned int board_idx = (unsigned int)ent->driver_data;
3103 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
3104 struct ata_host *host;
3105 struct mv_host_priv *hpriv;
3108 if (!printed_version++)
3109 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
3112 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
3114 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
3115 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
3116 if (!host || !hpriv)
3118 host->private_data = hpriv;
3119 hpriv->n_ports = n_ports;
3121 /* acquire resources */
3122 rc = pcim_enable_device(pdev);
3126 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
3128 pcim_pin_device(pdev);
3131 host->iomap = pcim_iomap_table(pdev);
3132 hpriv->base = host->iomap[MV_PRIMARY_BAR];
3134 rc = pci_go_64(pdev);
3138 rc = mv_create_dma_pools(hpriv, &pdev->dev);
3142 /* initialize adapter */
3143 rc = mv_init_host(host, board_idx);
3147 /* Enable interrupts */
3148 if (msi && pci_enable_msi(pdev))
3151 mv_dump_pci_cfg(pdev, 0x68);
3152 mv_print_info(host);
3154 pci_set_master(pdev);
3155 pci_try_set_mwi(pdev);
3156 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
3157 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
3161 static int mv_platform_probe(struct platform_device *pdev);
3162 static int __devexit mv_platform_remove(struct platform_device *pdev);
3164 static int __init mv_init(void)
3168 rc = pci_register_driver(&mv_pci_driver);
3172 rc = platform_driver_register(&mv_platform_driver);
3176 pci_unregister_driver(&mv_pci_driver);
3181 static void __exit mv_exit(void)
3184 pci_unregister_driver(&mv_pci_driver);
3186 platform_driver_unregister(&mv_platform_driver);
3189 MODULE_AUTHOR("Brett Russ");
3190 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
3191 MODULE_LICENSE("GPL");
3192 MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
3193 MODULE_VERSION(DRV_VERSION);
3196 module_param(msi, int, 0444);
3197 MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
3200 module_init(mv_init);
3201 module_exit(mv_exit);