Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[sfrench/cifs-2.6.git] / drivers / ata / sata_nv.c
1 /*
2  *  sata_nv.c - NVIDIA nForce SATA
3  *
4  *  Copyright 2004 NVIDIA Corp.  All rights reserved.
5  *  Copyright 2004 Andrew Chew
6  *
7  *
8  *  This program is free software; you can redistribute it and/or modify
9  *  it under the terms of the GNU General Public License as published by
10  *  the Free Software Foundation; either version 2, or (at your option)
11  *  any later version.
12  *
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *  GNU General Public License for more details.
17  *
18  *  You should have received a copy of the GNU General Public License
19  *  along with this program; see the file COPYING.  If not, write to
20  *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
21  *
22  *
23  *  libata documentation is available via 'make {ps|pdf}docs',
24  *  as Documentation/DocBook/libata.*
25  *
26  *  No hardware documentation available outside of NVIDIA.
27  *  This driver programs the NVIDIA SATA controller in a similar
28  *  fashion as with other PCI IDE BMDMA controllers, with a few
29  *  NV-specific details such as register offsets, SATA phy location,
30  *  hotplug info, etc.
31  *
32  *  CK804/MCP04 controllers support an alternate programming interface
33  *  similar to the ADMA specification (with some modifications).
34  *  This allows the use of NCQ. Non-DMA-mapped ATA commands are still
35  *  sent through the legacy interface.
36  *
37  */
38
39 #include <linux/kernel.h>
40 #include <linux/module.h>
41 #include <linux/pci.h>
42 #include <linux/init.h>
43 #include <linux/blkdev.h>
44 #include <linux/delay.h>
45 #include <linux/interrupt.h>
46 #include <linux/device.h>
47 #include <scsi/scsi_host.h>
48 #include <scsi/scsi_device.h>
49 #include <linux/libata.h>
50
51 #define DRV_NAME                        "sata_nv"
52 #define DRV_VERSION                     "3.5"
53
54 #define NV_ADMA_DMA_BOUNDARY            0xffffffffUL
55
56 enum {
57         NV_MMIO_BAR                     = 5,
58
59         NV_PORTS                        = 2,
60         NV_PIO_MASK                     = 0x1f,
61         NV_MWDMA_MASK                   = 0x07,
62         NV_UDMA_MASK                    = 0x7f,
63         NV_PORT0_SCR_REG_OFFSET         = 0x00,
64         NV_PORT1_SCR_REG_OFFSET         = 0x40,
65
66         /* INT_STATUS/ENABLE */
67         NV_INT_STATUS                   = 0x10,
68         NV_INT_ENABLE                   = 0x11,
69         NV_INT_STATUS_CK804             = 0x440,
70         NV_INT_ENABLE_CK804             = 0x441,
71
72         /* INT_STATUS/ENABLE bits */
73         NV_INT_DEV                      = 0x01,
74         NV_INT_PM                       = 0x02,
75         NV_INT_ADDED                    = 0x04,
76         NV_INT_REMOVED                  = 0x08,
77
78         NV_INT_PORT_SHIFT               = 4,    /* each port occupies 4 bits */
79
80         NV_INT_ALL                      = 0x0f,
81         NV_INT_MASK                     = NV_INT_DEV |
82                                           NV_INT_ADDED | NV_INT_REMOVED,
83
84         /* INT_CONFIG */
85         NV_INT_CONFIG                   = 0x12,
86         NV_INT_CONFIG_METHD             = 0x01, // 0 = INT, 1 = SMI
87
88         // For PCI config register 20
89         NV_MCP_SATA_CFG_20              = 0x50,
90         NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
91         NV_MCP_SATA_CFG_20_PORT0_EN     = (1 << 17),
92         NV_MCP_SATA_CFG_20_PORT1_EN     = (1 << 16),
93         NV_MCP_SATA_CFG_20_PORT0_PWB_EN = (1 << 14),
94         NV_MCP_SATA_CFG_20_PORT1_PWB_EN = (1 << 12),
95
96         NV_ADMA_MAX_CPBS                = 32,
97         NV_ADMA_CPB_SZ                  = 128,
98         NV_ADMA_APRD_SZ                 = 16,
99         NV_ADMA_SGTBL_LEN               = (1024 - NV_ADMA_CPB_SZ) /
100                                            NV_ADMA_APRD_SZ,
101         NV_ADMA_SGTBL_TOTAL_LEN         = NV_ADMA_SGTBL_LEN + 5,
102         NV_ADMA_SGTBL_SZ                = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
103         NV_ADMA_PORT_PRIV_DMA_SZ        = NV_ADMA_MAX_CPBS *
104                                            (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
105
106         /* BAR5 offset to ADMA general registers */
107         NV_ADMA_GEN                     = 0x400,
108         NV_ADMA_GEN_CTL                 = 0x00,
109         NV_ADMA_NOTIFIER_CLEAR          = 0x30,
110
111         /* BAR5 offset to ADMA ports */
112         NV_ADMA_PORT                    = 0x480,
113
114         /* size of ADMA port register space  */
115         NV_ADMA_PORT_SIZE               = 0x100,
116
117         /* ADMA port registers */
118         NV_ADMA_CTL                     = 0x40,
119         NV_ADMA_CPB_COUNT               = 0x42,
120         NV_ADMA_NEXT_CPB_IDX            = 0x43,
121         NV_ADMA_STAT                    = 0x44,
122         NV_ADMA_CPB_BASE_LOW            = 0x48,
123         NV_ADMA_CPB_BASE_HIGH           = 0x4C,
124         NV_ADMA_APPEND                  = 0x50,
125         NV_ADMA_NOTIFIER                = 0x68,
126         NV_ADMA_NOTIFIER_ERROR          = 0x6C,
127
128         /* NV_ADMA_CTL register bits */
129         NV_ADMA_CTL_HOTPLUG_IEN         = (1 << 0),
130         NV_ADMA_CTL_CHANNEL_RESET       = (1 << 5),
131         NV_ADMA_CTL_GO                  = (1 << 7),
132         NV_ADMA_CTL_AIEN                = (1 << 8),
133         NV_ADMA_CTL_READ_NON_COHERENT   = (1 << 11),
134         NV_ADMA_CTL_WRITE_NON_COHERENT  = (1 << 12),
135
136         /* CPB response flag bits */
137         NV_CPB_RESP_DONE                = (1 << 0),
138         NV_CPB_RESP_ATA_ERR             = (1 << 3),
139         NV_CPB_RESP_CMD_ERR             = (1 << 4),
140         NV_CPB_RESP_CPB_ERR             = (1 << 7),
141
142         /* CPB control flag bits */
143         NV_CPB_CTL_CPB_VALID            = (1 << 0),
144         NV_CPB_CTL_QUEUE                = (1 << 1),
145         NV_CPB_CTL_APRD_VALID           = (1 << 2),
146         NV_CPB_CTL_IEN                  = (1 << 3),
147         NV_CPB_CTL_FPDMA                = (1 << 4),
148
149         /* APRD flags */
150         NV_APRD_WRITE                   = (1 << 1),
151         NV_APRD_END                     = (1 << 2),
152         NV_APRD_CONT                    = (1 << 3),
153
154         /* NV_ADMA_STAT flags */
155         NV_ADMA_STAT_TIMEOUT            = (1 << 0),
156         NV_ADMA_STAT_HOTUNPLUG          = (1 << 1),
157         NV_ADMA_STAT_HOTPLUG            = (1 << 2),
158         NV_ADMA_STAT_CPBERR             = (1 << 4),
159         NV_ADMA_STAT_SERROR             = (1 << 5),
160         NV_ADMA_STAT_CMD_COMPLETE       = (1 << 6),
161         NV_ADMA_STAT_IDLE               = (1 << 8),
162         NV_ADMA_STAT_LEGACY             = (1 << 9),
163         NV_ADMA_STAT_STOPPED            = (1 << 10),
164         NV_ADMA_STAT_DONE               = (1 << 12),
165         NV_ADMA_STAT_ERR                = NV_ADMA_STAT_CPBERR |
166                                           NV_ADMA_STAT_TIMEOUT,
167
168         /* port flags */
169         NV_ADMA_PORT_REGISTER_MODE      = (1 << 0),
170         NV_ADMA_ATAPI_SETUP_COMPLETE    = (1 << 1),
171
172         /* MCP55 reg offset */
173         NV_CTL_MCP55                    = 0x400,
174         NV_INT_STATUS_MCP55             = 0x440,
175         NV_INT_ENABLE_MCP55             = 0x444,
176         NV_NCQ_REG_MCP55                = 0x448,
177
178         /* MCP55 */
179         NV_INT_ALL_MCP55                = 0xffff,
180         NV_INT_PORT_SHIFT_MCP55         = 16,   /* each port occupies 16 bits */
181         NV_INT_MASK_MCP55               = NV_INT_ALL_MCP55 & 0xfffd,
182
183         /* SWNCQ ENABLE BITS*/
184         NV_CTL_PRI_SWNCQ                = 0x02,
185         NV_CTL_SEC_SWNCQ                = 0x04,
186
187         /* SW NCQ status bits*/
188         NV_SWNCQ_IRQ_DEV                = (1 << 0),
189         NV_SWNCQ_IRQ_PM                 = (1 << 1),
190         NV_SWNCQ_IRQ_ADDED              = (1 << 2),
191         NV_SWNCQ_IRQ_REMOVED            = (1 << 3),
192
193         NV_SWNCQ_IRQ_BACKOUT            = (1 << 4),
194         NV_SWNCQ_IRQ_SDBFIS             = (1 << 5),
195         NV_SWNCQ_IRQ_DHREGFIS           = (1 << 6),
196         NV_SWNCQ_IRQ_DMASETUP           = (1 << 7),
197
198         NV_SWNCQ_IRQ_HOTPLUG            = NV_SWNCQ_IRQ_ADDED |
199                                           NV_SWNCQ_IRQ_REMOVED,
200
201 };
202
203 /* ADMA Physical Region Descriptor - one SG segment */
204 struct nv_adma_prd {
205         __le64                  addr;
206         __le32                  len;
207         u8                      flags;
208         u8                      packet_len;
209         __le16                  reserved;
210 };
211
212 enum nv_adma_regbits {
213         CMDEND  = (1 << 15),            /* end of command list */
214         WNB     = (1 << 14),            /* wait-not-BSY */
215         IGN     = (1 << 13),            /* ignore this entry */
216         CS1n    = (1 << (4 + 8)),       /* std. PATA signals follow... */
217         DA2     = (1 << (2 + 8)),
218         DA1     = (1 << (1 + 8)),
219         DA0     = (1 << (0 + 8)),
220 };
221
222 /* ADMA Command Parameter Block
223    The first 5 SG segments are stored inside the Command Parameter Block itself.
224    If there are more than 5 segments the remainder are stored in a separate
225    memory area indicated by next_aprd. */
226 struct nv_adma_cpb {
227         u8                      resp_flags;    /* 0 */
228         u8                      reserved1;     /* 1 */
229         u8                      ctl_flags;     /* 2 */
230         /* len is length of taskfile in 64 bit words */
231         u8                      len;           /* 3  */
232         u8                      tag;           /* 4 */
233         u8                      next_cpb_idx;  /* 5 */
234         __le16                  reserved2;     /* 6-7 */
235         __le16                  tf[12];        /* 8-31 */
236         struct nv_adma_prd      aprd[5];       /* 32-111 */
237         __le64                  next_aprd;     /* 112-119 */
238         __le64                  reserved3;     /* 120-127 */
239 };
240
241
242 struct nv_adma_port_priv {
243         struct nv_adma_cpb      *cpb;
244         dma_addr_t              cpb_dma;
245         struct nv_adma_prd      *aprd;
246         dma_addr_t              aprd_dma;
247         void __iomem *          ctl_block;
248         void __iomem *          gen_block;
249         void __iomem *          notifier_clear_block;
250         u8                      flags;
251         int                     last_issue_ncq;
252 };
253
254 struct nv_host_priv {
255         unsigned long           type;
256 };
257
258 struct defer_queue {
259         u32             defer_bits;
260         unsigned int    head;
261         unsigned int    tail;
262         unsigned int    tag[ATA_MAX_QUEUE];
263 };
264
265 enum ncq_saw_flag_list {
266         ncq_saw_d2h     = (1U << 0),
267         ncq_saw_dmas    = (1U << 1),
268         ncq_saw_sdb     = (1U << 2),
269         ncq_saw_backout = (1U << 3),
270 };
271
272 struct nv_swncq_port_priv {
273         struct ata_prd  *prd;    /* our SG list */
274         dma_addr_t      prd_dma; /* and its DMA mapping */
275         void __iomem    *sactive_block;
276         void __iomem    *irq_block;
277         void __iomem    *tag_block;
278         u32             qc_active;
279
280         unsigned int    last_issue_tag;
281
282         /* fifo circular queue to store deferral command */
283         struct defer_queue defer_queue;
284
285         /* for NCQ interrupt analysis */
286         u32             dhfis_bits;
287         u32             dmafis_bits;
288         u32             sdbfis_bits;
289
290         unsigned int    ncq_flags;
291 };
292
293
294 #define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & ( 1 << (19 + (12 * (PORT)))))
295
296 static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
297 #ifdef CONFIG_PM
298 static int nv_pci_device_resume(struct pci_dev *pdev);
299 #endif
300 static void nv_ck804_host_stop(struct ata_host *host);
301 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
302 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
303 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
304 static int nv_scr_read (struct ata_port *ap, unsigned int sc_reg, u32 *val);
305 static int nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
306
307 static void nv_nf2_freeze(struct ata_port *ap);
308 static void nv_nf2_thaw(struct ata_port *ap);
309 static void nv_ck804_freeze(struct ata_port *ap);
310 static void nv_ck804_thaw(struct ata_port *ap);
311 static void nv_error_handler(struct ata_port *ap);
312 static int nv_adma_slave_config(struct scsi_device *sdev);
313 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
314 static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
315 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
316 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
317 static void nv_adma_irq_clear(struct ata_port *ap);
318 static int nv_adma_port_start(struct ata_port *ap);
319 static void nv_adma_port_stop(struct ata_port *ap);
320 #ifdef CONFIG_PM
321 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
322 static int nv_adma_port_resume(struct ata_port *ap);
323 #endif
324 static void nv_adma_freeze(struct ata_port *ap);
325 static void nv_adma_thaw(struct ata_port *ap);
326 static void nv_adma_error_handler(struct ata_port *ap);
327 static void nv_adma_host_stop(struct ata_host *host);
328 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
329 static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
330
331 static void nv_mcp55_thaw(struct ata_port *ap);
332 static void nv_mcp55_freeze(struct ata_port *ap);
333 static void nv_swncq_error_handler(struct ata_port *ap);
334 static int nv_swncq_slave_config(struct scsi_device *sdev);
335 static int nv_swncq_port_start(struct ata_port *ap);
336 static void nv_swncq_qc_prep(struct ata_queued_cmd *qc);
337 static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
338 static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc);
339 static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis);
340 static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance);
341 #ifdef CONFIG_PM
342 static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg);
343 static int nv_swncq_port_resume(struct ata_port *ap);
344 #endif
345
346 enum nv_host_type
347 {
348         GENERIC,
349         NFORCE2,
350         NFORCE3 = NFORCE2,      /* NF2 == NF3 as far as sata_nv is concerned */
351         CK804,
352         ADMA,
353         SWNCQ,
354 };
355
356 static const struct pci_device_id nv_pci_tbl[] = {
357         { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
358         { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
359         { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
360         { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
361         { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
362         { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
363         { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
364         { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), SWNCQ },
365         { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), SWNCQ },
366         { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), SWNCQ },
367         { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), SWNCQ },
368         { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), SWNCQ },
369         { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), SWNCQ },
370         { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), SWNCQ },
371
372         { } /* terminate list */
373 };
374
375 static struct pci_driver nv_pci_driver = {
376         .name                   = DRV_NAME,
377         .id_table               = nv_pci_tbl,
378         .probe                  = nv_init_one,
379 #ifdef CONFIG_PM
380         .suspend                = ata_pci_device_suspend,
381         .resume                 = nv_pci_device_resume,
382 #endif
383         .remove                 = ata_pci_remove_one,
384 };
385
386 static struct scsi_host_template nv_sht = {
387         .module                 = THIS_MODULE,
388         .name                   = DRV_NAME,
389         .ioctl                  = ata_scsi_ioctl,
390         .queuecommand           = ata_scsi_queuecmd,
391         .can_queue              = ATA_DEF_QUEUE,
392         .this_id                = ATA_SHT_THIS_ID,
393         .sg_tablesize           = LIBATA_MAX_PRD,
394         .cmd_per_lun            = ATA_SHT_CMD_PER_LUN,
395         .emulated               = ATA_SHT_EMULATED,
396         .use_clustering         = ATA_SHT_USE_CLUSTERING,
397         .proc_name              = DRV_NAME,
398         .dma_boundary           = ATA_DMA_BOUNDARY,
399         .slave_configure        = ata_scsi_slave_config,
400         .slave_destroy          = ata_scsi_slave_destroy,
401         .bios_param             = ata_std_bios_param,
402 };
403
404 static struct scsi_host_template nv_adma_sht = {
405         .module                 = THIS_MODULE,
406         .name                   = DRV_NAME,
407         .ioctl                  = ata_scsi_ioctl,
408         .queuecommand           = ata_scsi_queuecmd,
409         .change_queue_depth     = ata_scsi_change_queue_depth,
410         .can_queue              = NV_ADMA_MAX_CPBS,
411         .this_id                = ATA_SHT_THIS_ID,
412         .sg_tablesize           = NV_ADMA_SGTBL_TOTAL_LEN,
413         .cmd_per_lun            = ATA_SHT_CMD_PER_LUN,
414         .emulated               = ATA_SHT_EMULATED,
415         .use_clustering         = ATA_SHT_USE_CLUSTERING,
416         .proc_name              = DRV_NAME,
417         .dma_boundary           = NV_ADMA_DMA_BOUNDARY,
418         .slave_configure        = nv_adma_slave_config,
419         .slave_destroy          = ata_scsi_slave_destroy,
420         .bios_param             = ata_std_bios_param,
421 };
422
423 static struct scsi_host_template nv_swncq_sht = {
424         .module                 = THIS_MODULE,
425         .name                   = DRV_NAME,
426         .ioctl                  = ata_scsi_ioctl,
427         .queuecommand           = ata_scsi_queuecmd,
428         .change_queue_depth     = ata_scsi_change_queue_depth,
429         .can_queue              = ATA_MAX_QUEUE,
430         .this_id                = ATA_SHT_THIS_ID,
431         .sg_tablesize           = LIBATA_MAX_PRD,
432         .cmd_per_lun            = ATA_SHT_CMD_PER_LUN,
433         .emulated               = ATA_SHT_EMULATED,
434         .use_clustering         = ATA_SHT_USE_CLUSTERING,
435         .proc_name              = DRV_NAME,
436         .dma_boundary           = ATA_DMA_BOUNDARY,
437         .slave_configure        = nv_swncq_slave_config,
438         .slave_destroy          = ata_scsi_slave_destroy,
439         .bios_param             = ata_std_bios_param,
440 };
441
442 static const struct ata_port_operations nv_generic_ops = {
443         .tf_load                = ata_tf_load,
444         .tf_read                = ata_tf_read,
445         .exec_command           = ata_exec_command,
446         .check_status           = ata_check_status,
447         .dev_select             = ata_std_dev_select,
448         .bmdma_setup            = ata_bmdma_setup,
449         .bmdma_start            = ata_bmdma_start,
450         .bmdma_stop             = ata_bmdma_stop,
451         .bmdma_status           = ata_bmdma_status,
452         .qc_prep                = ata_qc_prep,
453         .qc_issue               = ata_qc_issue_prot,
454         .freeze                 = ata_bmdma_freeze,
455         .thaw                   = ata_bmdma_thaw,
456         .error_handler          = nv_error_handler,
457         .post_internal_cmd      = ata_bmdma_post_internal_cmd,
458         .data_xfer              = ata_data_xfer,
459         .irq_clear              = ata_bmdma_irq_clear,
460         .irq_on                 = ata_irq_on,
461         .scr_read               = nv_scr_read,
462         .scr_write              = nv_scr_write,
463         .port_start             = ata_port_start,
464 };
465
466 static const struct ata_port_operations nv_nf2_ops = {
467         .tf_load                = ata_tf_load,
468         .tf_read                = ata_tf_read,
469         .exec_command           = ata_exec_command,
470         .check_status           = ata_check_status,
471         .dev_select             = ata_std_dev_select,
472         .bmdma_setup            = ata_bmdma_setup,
473         .bmdma_start            = ata_bmdma_start,
474         .bmdma_stop             = ata_bmdma_stop,
475         .bmdma_status           = ata_bmdma_status,
476         .qc_prep                = ata_qc_prep,
477         .qc_issue               = ata_qc_issue_prot,
478         .freeze                 = nv_nf2_freeze,
479         .thaw                   = nv_nf2_thaw,
480         .error_handler          = nv_error_handler,
481         .post_internal_cmd      = ata_bmdma_post_internal_cmd,
482         .data_xfer              = ata_data_xfer,
483         .irq_clear              = ata_bmdma_irq_clear,
484         .irq_on                 = ata_irq_on,
485         .scr_read               = nv_scr_read,
486         .scr_write              = nv_scr_write,
487         .port_start             = ata_port_start,
488 };
489
490 static const struct ata_port_operations nv_ck804_ops = {
491         .tf_load                = ata_tf_load,
492         .tf_read                = ata_tf_read,
493         .exec_command           = ata_exec_command,
494         .check_status           = ata_check_status,
495         .dev_select             = ata_std_dev_select,
496         .bmdma_setup            = ata_bmdma_setup,
497         .bmdma_start            = ata_bmdma_start,
498         .bmdma_stop             = ata_bmdma_stop,
499         .bmdma_status           = ata_bmdma_status,
500         .qc_prep                = ata_qc_prep,
501         .qc_issue               = ata_qc_issue_prot,
502         .freeze                 = nv_ck804_freeze,
503         .thaw                   = nv_ck804_thaw,
504         .error_handler          = nv_error_handler,
505         .post_internal_cmd      = ata_bmdma_post_internal_cmd,
506         .data_xfer              = ata_data_xfer,
507         .irq_clear              = ata_bmdma_irq_clear,
508         .irq_on                 = ata_irq_on,
509         .scr_read               = nv_scr_read,
510         .scr_write              = nv_scr_write,
511         .port_start             = ata_port_start,
512         .host_stop              = nv_ck804_host_stop,
513 };
514
515 static const struct ata_port_operations nv_adma_ops = {
516         .tf_load                = ata_tf_load,
517         .tf_read                = nv_adma_tf_read,
518         .check_atapi_dma        = nv_adma_check_atapi_dma,
519         .exec_command           = ata_exec_command,
520         .check_status           = ata_check_status,
521         .dev_select             = ata_std_dev_select,
522         .bmdma_setup            = ata_bmdma_setup,
523         .bmdma_start            = ata_bmdma_start,
524         .bmdma_stop             = ata_bmdma_stop,
525         .bmdma_status           = ata_bmdma_status,
526         .qc_defer               = ata_std_qc_defer,
527         .qc_prep                = nv_adma_qc_prep,
528         .qc_issue               = nv_adma_qc_issue,
529         .freeze                 = nv_adma_freeze,
530         .thaw                   = nv_adma_thaw,
531         .error_handler          = nv_adma_error_handler,
532         .post_internal_cmd      = nv_adma_post_internal_cmd,
533         .data_xfer              = ata_data_xfer,
534         .irq_clear              = nv_adma_irq_clear,
535         .irq_on                 = ata_irq_on,
536         .scr_read               = nv_scr_read,
537         .scr_write              = nv_scr_write,
538         .port_start             = nv_adma_port_start,
539         .port_stop              = nv_adma_port_stop,
540 #ifdef CONFIG_PM
541         .port_suspend           = nv_adma_port_suspend,
542         .port_resume            = nv_adma_port_resume,
543 #endif
544         .host_stop              = nv_adma_host_stop,
545 };
546
547 static const struct ata_port_operations nv_swncq_ops = {
548         .tf_load                = ata_tf_load,
549         .tf_read                = ata_tf_read,
550         .exec_command           = ata_exec_command,
551         .check_status           = ata_check_status,
552         .dev_select             = ata_std_dev_select,
553         .bmdma_setup            = ata_bmdma_setup,
554         .bmdma_start            = ata_bmdma_start,
555         .bmdma_stop             = ata_bmdma_stop,
556         .bmdma_status           = ata_bmdma_status,
557         .qc_defer               = ata_std_qc_defer,
558         .qc_prep                = nv_swncq_qc_prep,
559         .qc_issue               = nv_swncq_qc_issue,
560         .freeze                 = nv_mcp55_freeze,
561         .thaw                   = nv_mcp55_thaw,
562         .error_handler          = nv_swncq_error_handler,
563         .post_internal_cmd      = ata_bmdma_post_internal_cmd,
564         .data_xfer              = ata_data_xfer,
565         .irq_clear              = ata_bmdma_irq_clear,
566         .irq_on                 = ata_irq_on,
567         .scr_read               = nv_scr_read,
568         .scr_write              = nv_scr_write,
569 #ifdef CONFIG_PM
570         .port_suspend           = nv_swncq_port_suspend,
571         .port_resume            = nv_swncq_port_resume,
572 #endif
573         .port_start             = nv_swncq_port_start,
574 };
575
576 static const struct ata_port_info nv_port_info[] = {
577         /* generic */
578         {
579                 .sht            = &nv_sht,
580                 .flags          = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
581                 .link_flags     = ATA_LFLAG_HRST_TO_RESUME,
582                 .pio_mask       = NV_PIO_MASK,
583                 .mwdma_mask     = NV_MWDMA_MASK,
584                 .udma_mask      = NV_UDMA_MASK,
585                 .port_ops       = &nv_generic_ops,
586                 .irq_handler    = nv_generic_interrupt,
587         },
588         /* nforce2/3 */
589         {
590                 .sht            = &nv_sht,
591                 .flags          = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
592                 .link_flags     = ATA_LFLAG_HRST_TO_RESUME,
593                 .pio_mask       = NV_PIO_MASK,
594                 .mwdma_mask     = NV_MWDMA_MASK,
595                 .udma_mask      = NV_UDMA_MASK,
596                 .port_ops       = &nv_nf2_ops,
597                 .irq_handler    = nv_nf2_interrupt,
598         },
599         /* ck804 */
600         {
601                 .sht            = &nv_sht,
602                 .flags          = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
603                 .link_flags     = ATA_LFLAG_HRST_TO_RESUME,
604                 .pio_mask       = NV_PIO_MASK,
605                 .mwdma_mask     = NV_MWDMA_MASK,
606                 .udma_mask      = NV_UDMA_MASK,
607                 .port_ops       = &nv_ck804_ops,
608                 .irq_handler    = nv_ck804_interrupt,
609         },
610         /* ADMA */
611         {
612                 .sht            = &nv_adma_sht,
613                 .flags          = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
614                                   ATA_FLAG_MMIO | ATA_FLAG_NCQ,
615                 .link_flags     = ATA_LFLAG_HRST_TO_RESUME,
616                 .pio_mask       = NV_PIO_MASK,
617                 .mwdma_mask     = NV_MWDMA_MASK,
618                 .udma_mask      = NV_UDMA_MASK,
619                 .port_ops       = &nv_adma_ops,
620                 .irq_handler    = nv_adma_interrupt,
621         },
622         /* SWNCQ */
623         {
624                 .sht            = &nv_swncq_sht,
625                 .flags          = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
626                                   ATA_FLAG_NCQ,
627                 .link_flags     = ATA_LFLAG_HRST_TO_RESUME,
628                 .pio_mask       = NV_PIO_MASK,
629                 .mwdma_mask     = NV_MWDMA_MASK,
630                 .udma_mask      = NV_UDMA_MASK,
631                 .port_ops       = &nv_swncq_ops,
632                 .irq_handler    = nv_swncq_interrupt,
633         },
634 };
635
636 MODULE_AUTHOR("NVIDIA");
637 MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
638 MODULE_LICENSE("GPL");
639 MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
640 MODULE_VERSION(DRV_VERSION);
641
642 static int adma_enabled = 1;
643 static int swncq_enabled;
644
645 static void nv_adma_register_mode(struct ata_port *ap)
646 {
647         struct nv_adma_port_priv *pp = ap->private_data;
648         void __iomem *mmio = pp->ctl_block;
649         u16 tmp, status;
650         int count = 0;
651
652         if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
653                 return;
654
655         status = readw(mmio + NV_ADMA_STAT);
656         while(!(status & NV_ADMA_STAT_IDLE) && count < 20) {
657                 ndelay(50);
658                 status = readw(mmio + NV_ADMA_STAT);
659                 count++;
660         }
661         if(count == 20)
662                 ata_port_printk(ap, KERN_WARNING,
663                         "timeout waiting for ADMA IDLE, stat=0x%hx\n",
664                         status);
665
666         tmp = readw(mmio + NV_ADMA_CTL);
667         writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
668
669         count = 0;
670         status = readw(mmio + NV_ADMA_STAT);
671         while(!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
672                 ndelay(50);
673                 status = readw(mmio + NV_ADMA_STAT);
674                 count++;
675         }
676         if(count == 20)
677                 ata_port_printk(ap, KERN_WARNING,
678                          "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
679                          status);
680
681         pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
682 }
683
684 static void nv_adma_mode(struct ata_port *ap)
685 {
686         struct nv_adma_port_priv *pp = ap->private_data;
687         void __iomem *mmio = pp->ctl_block;
688         u16 tmp, status;
689         int count = 0;
690
691         if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
692                 return;
693
694         WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
695
696         tmp = readw(mmio + NV_ADMA_CTL);
697         writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
698
699         status = readw(mmio + NV_ADMA_STAT);
700         while(((status & NV_ADMA_STAT_LEGACY) ||
701               !(status & NV_ADMA_STAT_IDLE)) && count < 20) {
702                 ndelay(50);
703                 status = readw(mmio + NV_ADMA_STAT);
704                 count++;
705         }
706         if(count == 20)
707                 ata_port_printk(ap, KERN_WARNING,
708                         "timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
709                         status);
710
711         pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
712 }
713
714 static int nv_adma_slave_config(struct scsi_device *sdev)
715 {
716         struct ata_port *ap = ata_shost_to_port(sdev->host);
717         struct nv_adma_port_priv *pp = ap->private_data;
718         struct pci_dev *pdev = to_pci_dev(ap->host->dev);
719         u64 bounce_limit;
720         unsigned long segment_boundary;
721         unsigned short sg_tablesize;
722         int rc;
723         int adma_enable;
724         u32 current_reg, new_reg, config_mask;
725
726         rc = ata_scsi_slave_config(sdev);
727
728         if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
729                 /* Not a proper libata device, ignore */
730                 return rc;
731
732         if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) {
733                 /*
734                  * NVIDIA reports that ADMA mode does not support ATAPI commands.
735                  * Therefore ATAPI commands are sent through the legacy interface.
736                  * However, the legacy interface only supports 32-bit DMA.
737                  * Restrict DMA parameters as required by the legacy interface
738                  * when an ATAPI device is connected.
739                  */
740                 bounce_limit = ATA_DMA_MASK;
741                 segment_boundary = ATA_DMA_BOUNDARY;
742                 /* Subtract 1 since an extra entry may be needed for padding, see
743                    libata-scsi.c */
744                 sg_tablesize = LIBATA_MAX_PRD - 1;
745
746                 /* Since the legacy DMA engine is in use, we need to disable ADMA
747                    on the port. */
748                 adma_enable = 0;
749                 nv_adma_register_mode(ap);
750         }
751         else {
752                 bounce_limit = *ap->dev->dma_mask;
753                 segment_boundary = NV_ADMA_DMA_BOUNDARY;
754                 sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
755                 adma_enable = 1;
756         }
757
758         pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg);
759
760         if(ap->port_no == 1)
761                 config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
762                               NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
763         else
764                 config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
765                               NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
766
767         if(adma_enable) {
768                 new_reg = current_reg | config_mask;
769                 pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
770         }
771         else {
772                 new_reg = current_reg & ~config_mask;
773                 pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
774         }
775
776         if(current_reg != new_reg)
777                 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
778
779         blk_queue_bounce_limit(sdev->request_queue, bounce_limit);
780         blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
781         blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize);
782         ata_port_printk(ap, KERN_INFO,
783                 "bounce limit 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
784                 (unsigned long long)bounce_limit, segment_boundary, sg_tablesize);
785         return rc;
786 }
787
788 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
789 {
790         struct nv_adma_port_priv *pp = qc->ap->private_data;
791         return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
792 }
793
794 static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
795 {
796         /* Since commands where a result TF is requested are not
797            executed in ADMA mode, the only time this function will be called
798            in ADMA mode will be if a command fails. In this case we
799            don't care about going into register mode with ADMA commands
800            pending, as the commands will all shortly be aborted anyway. */
801         nv_adma_register_mode(ap);
802
803         ata_tf_read(ap, tf);
804 }
805
806 static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
807 {
808         unsigned int idx = 0;
809
810         if(tf->flags & ATA_TFLAG_ISADDR) {
811                 if (tf->flags & ATA_TFLAG_LBA48) {
812                         cpb[idx++] = cpu_to_le16((ATA_REG_ERR   << 8) | tf->hob_feature | WNB);
813                         cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
814                         cpb[idx++] = cpu_to_le16((ATA_REG_LBAL  << 8) | tf->hob_lbal);
815                         cpb[idx++] = cpu_to_le16((ATA_REG_LBAM  << 8) | tf->hob_lbam);
816                         cpb[idx++] = cpu_to_le16((ATA_REG_LBAH  << 8) | tf->hob_lbah);
817                         cpb[idx++] = cpu_to_le16((ATA_REG_ERR    << 8) | tf->feature);
818                 } else
819                         cpb[idx++] = cpu_to_le16((ATA_REG_ERR    << 8) | tf->feature | WNB);
820
821                 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT  << 8) | tf->nsect);
822                 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL   << 8) | tf->lbal);
823                 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM   << 8) | tf->lbam);
824                 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH   << 8) | tf->lbah);
825         }
826
827         if(tf->flags & ATA_TFLAG_DEVICE)
828                 cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device);
829
830         cpb[idx++] = cpu_to_le16((ATA_REG_CMD    << 8) | tf->command | CMDEND);
831
832         while(idx < 12)
833                 cpb[idx++] = cpu_to_le16(IGN);
834
835         return idx;
836 }
837
838 static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
839 {
840         struct nv_adma_port_priv *pp = ap->private_data;
841         u8 flags = pp->cpb[cpb_num].resp_flags;
842
843         VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
844
845         if (unlikely((force_err ||
846                      flags & (NV_CPB_RESP_ATA_ERR |
847                               NV_CPB_RESP_CMD_ERR |
848                               NV_CPB_RESP_CPB_ERR)))) {
849                 struct ata_eh_info *ehi = &ap->link.eh_info;
850                 int freeze = 0;
851
852                 ata_ehi_clear_desc(ehi);
853                 __ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x: ", flags );
854                 if (flags & NV_CPB_RESP_ATA_ERR) {
855                         ata_ehi_push_desc(ehi, "ATA error");
856                         ehi->err_mask |= AC_ERR_DEV;
857                 } else if (flags & NV_CPB_RESP_CMD_ERR) {
858                         ata_ehi_push_desc(ehi, "CMD error");
859                         ehi->err_mask |= AC_ERR_DEV;
860                 } else if (flags & NV_CPB_RESP_CPB_ERR) {
861                         ata_ehi_push_desc(ehi, "CPB error");
862                         ehi->err_mask |= AC_ERR_SYSTEM;
863                         freeze = 1;
864                 } else {
865                         /* notifier error, but no error in CPB flags? */
866                         ata_ehi_push_desc(ehi, "unknown");
867                         ehi->err_mask |= AC_ERR_OTHER;
868                         freeze = 1;
869                 }
870                 /* Kill all commands. EH will determine what actually failed. */
871                 if (freeze)
872                         ata_port_freeze(ap);
873                 else
874                         ata_port_abort(ap);
875                 return 1;
876         }
877
878         if (likely(flags & NV_CPB_RESP_DONE)) {
879                 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num);
880                 VPRINTK("CPB flags done, flags=0x%x\n", flags);
881                 if (likely(qc)) {
882                         DPRINTK("Completing qc from tag %d\n",cpb_num);
883                         ata_qc_complete(qc);
884                 } else {
885                         struct ata_eh_info *ehi = &ap->link.eh_info;
886                         /* Notifier bits set without a command may indicate the drive
887                            is misbehaving. Raise host state machine violation on this
888                            condition. */
889                         ata_port_printk(ap, KERN_ERR, "notifier for tag %d with no command?\n",
890                                 cpb_num);
891                         ehi->err_mask |= AC_ERR_HSM;
892                         ehi->action |= ATA_EH_SOFTRESET;
893                         ata_port_freeze(ap);
894                         return 1;
895                 }
896         }
897         return 0;
898 }
899
900 static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
901 {
902         struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
903
904         /* freeze if hotplugged */
905         if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
906                 ata_port_freeze(ap);
907                 return 1;
908         }
909
910         /* bail out if not our interrupt */
911         if (!(irq_stat & NV_INT_DEV))
912                 return 0;
913
914         /* DEV interrupt w/ no active qc? */
915         if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
916                 ata_check_status(ap);
917                 return 1;
918         }
919
920         /* handle interrupt */
921         return ata_host_intr(ap, qc);
922 }
923
924 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
925 {
926         struct ata_host *host = dev_instance;
927         int i, handled = 0;
928         u32 notifier_clears[2];
929
930         spin_lock(&host->lock);
931
932         for (i = 0; i < host->n_ports; i++) {
933                 struct ata_port *ap = host->ports[i];
934                 notifier_clears[i] = 0;
935
936                 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
937                         struct nv_adma_port_priv *pp = ap->private_data;
938                         void __iomem *mmio = pp->ctl_block;
939                         u16 status;
940                         u32 gen_ctl;
941                         u32 notifier, notifier_error;
942
943                         /* if ADMA is disabled, use standard ata interrupt handler */
944                         if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
945                                 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
946                                         >> (NV_INT_PORT_SHIFT * i);
947                                 handled += nv_host_intr(ap, irq_stat);
948                                 continue;
949                         }
950
951                         /* if in ATA register mode, check for standard interrupts */
952                         if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
953                                 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
954                                         >> (NV_INT_PORT_SHIFT * i);
955                                 if(ata_tag_valid(ap->link.active_tag))
956                                         /** NV_INT_DEV indication seems unreliable at times
957                                             at least in ADMA mode. Force it on always when a
958                                             command is active, to prevent losing interrupts. */
959                                         irq_stat |= NV_INT_DEV;
960                                 handled += nv_host_intr(ap, irq_stat);
961                         }
962
963                         notifier = readl(mmio + NV_ADMA_NOTIFIER);
964                         notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
965                         notifier_clears[i] = notifier | notifier_error;
966
967                         gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
968
969                         if( !NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
970                             !notifier_error)
971                                 /* Nothing to do */
972                                 continue;
973
974                         status = readw(mmio + NV_ADMA_STAT);
975
976                         /* Clear status. Ensure the controller sees the clearing before we start
977                            looking at any of the CPB statuses, so that any CPB completions after
978                            this point in the handler will raise another interrupt. */
979                         writew(status, mmio + NV_ADMA_STAT);
980                         readw(mmio + NV_ADMA_STAT); /* flush posted write */
981                         rmb();
982
983                         handled++; /* irq handled if we got here */
984
985                         /* freeze if hotplugged or controller error */
986                         if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
987                                                NV_ADMA_STAT_HOTUNPLUG |
988                                                NV_ADMA_STAT_TIMEOUT |
989                                                NV_ADMA_STAT_SERROR))) {
990                                 struct ata_eh_info *ehi = &ap->link.eh_info;
991
992                                 ata_ehi_clear_desc(ehi);
993                                 __ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status );
994                                 if (status & NV_ADMA_STAT_TIMEOUT) {
995                                         ehi->err_mask |= AC_ERR_SYSTEM;
996                                         ata_ehi_push_desc(ehi, "timeout");
997                                 } else if (status & NV_ADMA_STAT_HOTPLUG) {
998                                         ata_ehi_hotplugged(ehi);
999                                         ata_ehi_push_desc(ehi, "hotplug");
1000                                 } else if (status & NV_ADMA_STAT_HOTUNPLUG) {
1001                                         ata_ehi_hotplugged(ehi);
1002                                         ata_ehi_push_desc(ehi, "hot unplug");
1003                                 } else if (status & NV_ADMA_STAT_SERROR) {
1004                                         /* let libata analyze SError and figure out the cause */
1005                                         ata_ehi_push_desc(ehi, "SError");
1006                                 } else
1007                                         ata_ehi_push_desc(ehi, "unknown");
1008                                 ata_port_freeze(ap);
1009                                 continue;
1010                         }
1011
1012                         if (status & (NV_ADMA_STAT_DONE |
1013                                       NV_ADMA_STAT_CPBERR)) {
1014                                 u32 check_commands;
1015                                 int pos, error = 0;
1016
1017                                 if(ata_tag_valid(ap->link.active_tag))
1018                                         check_commands = 1 << ap->link.active_tag;
1019                                 else
1020                                         check_commands = ap->link.sactive;
1021
1022                                 /** Check CPBs for completed commands */
1023                                 while ((pos = ffs(check_commands)) && !error) {
1024                                         pos--;
1025                                         error = nv_adma_check_cpb(ap, pos,
1026                                                 notifier_error & (1 << pos) );
1027                                         check_commands &= ~(1 << pos );
1028                                 }
1029                         }
1030                 }
1031         }
1032
1033         if(notifier_clears[0] || notifier_clears[1]) {
1034                 /* Note: Both notifier clear registers must be written
1035                    if either is set, even if one is zero, according to NVIDIA. */
1036                 struct nv_adma_port_priv *pp = host->ports[0]->private_data;
1037                 writel(notifier_clears[0], pp->notifier_clear_block);
1038                 pp = host->ports[1]->private_data;
1039                 writel(notifier_clears[1], pp->notifier_clear_block);
1040         }
1041
1042         spin_unlock(&host->lock);
1043
1044         return IRQ_RETVAL(handled);
1045 }
1046
1047 static void nv_adma_freeze(struct ata_port *ap)
1048 {
1049         struct nv_adma_port_priv *pp = ap->private_data;
1050         void __iomem *mmio = pp->ctl_block;
1051         u16 tmp;
1052
1053         nv_ck804_freeze(ap);
1054
1055         if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1056                 return;
1057
1058         /* clear any outstanding CK804 notifications */
1059         writeb( NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
1060                 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1061
1062         /* Disable interrupt */
1063         tmp = readw(mmio + NV_ADMA_CTL);
1064         writew( tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1065                 mmio + NV_ADMA_CTL);
1066         readw( mmio + NV_ADMA_CTL );    /* flush posted write */
1067 }
1068
1069 static void nv_adma_thaw(struct ata_port *ap)
1070 {
1071         struct nv_adma_port_priv *pp = ap->private_data;
1072         void __iomem *mmio = pp->ctl_block;
1073         u16 tmp;
1074
1075         nv_ck804_thaw(ap);
1076
1077         if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1078                 return;
1079
1080         /* Enable interrupt */
1081         tmp = readw(mmio + NV_ADMA_CTL);
1082         writew( tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1083                 mmio + NV_ADMA_CTL);
1084         readw( mmio + NV_ADMA_CTL );    /* flush posted write */
1085 }
1086
1087 static void nv_adma_irq_clear(struct ata_port *ap)
1088 {
1089         struct nv_adma_port_priv *pp = ap->private_data;
1090         void __iomem *mmio = pp->ctl_block;
1091         u32 notifier_clears[2];
1092
1093         if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
1094                 ata_bmdma_irq_clear(ap);
1095                 return;
1096         }
1097
1098         /* clear any outstanding CK804 notifications */
1099         writeb( NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
1100                 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1101
1102         /* clear ADMA status */
1103         writew(0xffff, mmio + NV_ADMA_STAT);
1104
1105         /* clear notifiers - note both ports need to be written with
1106            something even though we are only clearing on one */
1107         if (ap->port_no == 0) {
1108                 notifier_clears[0] = 0xFFFFFFFF;
1109                 notifier_clears[1] = 0;
1110         } else {
1111                 notifier_clears[0] = 0;
1112                 notifier_clears[1] = 0xFFFFFFFF;
1113         }
1114         pp = ap->host->ports[0]->private_data;
1115         writel(notifier_clears[0], pp->notifier_clear_block);
1116         pp = ap->host->ports[1]->private_data;
1117         writel(notifier_clears[1], pp->notifier_clear_block);
1118 }
1119
1120 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
1121 {
1122         struct nv_adma_port_priv *pp = qc->ap->private_data;
1123
1124         if(pp->flags & NV_ADMA_PORT_REGISTER_MODE)
1125                 ata_bmdma_post_internal_cmd(qc);
1126 }
1127
1128 static int nv_adma_port_start(struct ata_port *ap)
1129 {
1130         struct device *dev = ap->host->dev;
1131         struct nv_adma_port_priv *pp;
1132         int rc;
1133         void *mem;
1134         dma_addr_t mem_dma;
1135         void __iomem *mmio;
1136         u16 tmp;
1137
1138         VPRINTK("ENTER\n");
1139
1140         rc = ata_port_start(ap);
1141         if (rc)
1142                 return rc;
1143
1144         pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1145         if (!pp)
1146                 return -ENOMEM;
1147
1148         mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
1149                ap->port_no * NV_ADMA_PORT_SIZE;
1150         pp->ctl_block = mmio;
1151         pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
1152         pp->notifier_clear_block = pp->gen_block +
1153                NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
1154
1155         mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
1156                                   &mem_dma, GFP_KERNEL);
1157         if (!mem)
1158                 return -ENOMEM;
1159         memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
1160
1161         /*
1162          * First item in chunk of DMA memory:
1163          * 128-byte command parameter block (CPB)
1164          * one for each command tag
1165          */
1166         pp->cpb     = mem;
1167         pp->cpb_dma = mem_dma;
1168
1169         writel(mem_dma & 0xFFFFFFFF,    mmio + NV_ADMA_CPB_BASE_LOW);
1170         writel((mem_dma >> 16 ) >> 16,  mmio + NV_ADMA_CPB_BASE_HIGH);
1171
1172         mem     += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1173         mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1174
1175         /*
1176          * Second item: block of ADMA_SGTBL_LEN s/g entries
1177          */
1178         pp->aprd = mem;
1179         pp->aprd_dma = mem_dma;
1180
1181         ap->private_data = pp;
1182
1183         /* clear any outstanding interrupt conditions */
1184         writew(0xffff, mmio + NV_ADMA_STAT);
1185
1186         /* initialize port variables */
1187         pp->flags = NV_ADMA_PORT_REGISTER_MODE;
1188
1189         /* clear CPB fetch count */
1190         writew(0, mmio + NV_ADMA_CPB_COUNT);
1191
1192         /* clear GO for register mode, enable interrupt */
1193         tmp = readw(mmio + NV_ADMA_CTL);
1194         writew( (tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1195                  NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1196
1197         tmp = readw(mmio + NV_ADMA_CTL);
1198         writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1199         readw( mmio + NV_ADMA_CTL );    /* flush posted write */
1200         udelay(1);
1201         writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1202         readw( mmio + NV_ADMA_CTL );    /* flush posted write */
1203
1204         return 0;
1205 }
1206
1207 static void nv_adma_port_stop(struct ata_port *ap)
1208 {
1209         struct nv_adma_port_priv *pp = ap->private_data;
1210         void __iomem *mmio = pp->ctl_block;
1211
1212         VPRINTK("ENTER\n");
1213         writew(0, mmio + NV_ADMA_CTL);
1214 }
1215
1216 #ifdef CONFIG_PM
1217 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
1218 {
1219         struct nv_adma_port_priv *pp = ap->private_data;
1220         void __iomem *mmio = pp->ctl_block;
1221
1222         /* Go to register mode - clears GO */
1223         nv_adma_register_mode(ap);
1224
1225         /* clear CPB fetch count */
1226         writew(0, mmio + NV_ADMA_CPB_COUNT);
1227
1228         /* disable interrupt, shut down port */
1229         writew(0, mmio + NV_ADMA_CTL);
1230
1231         return 0;
1232 }
1233
1234 static int nv_adma_port_resume(struct ata_port *ap)
1235 {
1236         struct nv_adma_port_priv *pp = ap->private_data;
1237         void __iomem *mmio = pp->ctl_block;
1238         u16 tmp;
1239
1240         /* set CPB block location */
1241         writel(pp->cpb_dma & 0xFFFFFFFF,        mmio + NV_ADMA_CPB_BASE_LOW);
1242         writel((pp->cpb_dma >> 16 ) >> 16,      mmio + NV_ADMA_CPB_BASE_HIGH);
1243
1244         /* clear any outstanding interrupt conditions */
1245         writew(0xffff, mmio + NV_ADMA_STAT);
1246
1247         /* initialize port variables */
1248         pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
1249
1250         /* clear CPB fetch count */
1251         writew(0, mmio + NV_ADMA_CPB_COUNT);
1252
1253         /* clear GO for register mode, enable interrupt */
1254         tmp = readw(mmio + NV_ADMA_CTL);
1255         writew( (tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1256                  NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1257
1258         tmp = readw(mmio + NV_ADMA_CTL);
1259         writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1260         readw( mmio + NV_ADMA_CTL );    /* flush posted write */
1261         udelay(1);
1262         writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1263         readw( mmio + NV_ADMA_CTL );    /* flush posted write */
1264
1265         return 0;
1266 }
1267 #endif
1268
1269 static void nv_adma_setup_port(struct ata_port *ap)
1270 {
1271         void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1272         struct ata_ioports *ioport = &ap->ioaddr;
1273
1274         VPRINTK("ENTER\n");
1275
1276         mmio += NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE;
1277
1278         ioport->cmd_addr        = mmio;
1279         ioport->data_addr       = mmio + (ATA_REG_DATA * 4);
1280         ioport->error_addr      =
1281         ioport->feature_addr    = mmio + (ATA_REG_ERR * 4);
1282         ioport->nsect_addr      = mmio + (ATA_REG_NSECT * 4);
1283         ioport->lbal_addr       = mmio + (ATA_REG_LBAL * 4);
1284         ioport->lbam_addr       = mmio + (ATA_REG_LBAM * 4);
1285         ioport->lbah_addr       = mmio + (ATA_REG_LBAH * 4);
1286         ioport->device_addr     = mmio + (ATA_REG_DEVICE * 4);
1287         ioport->status_addr     =
1288         ioport->command_addr    = mmio + (ATA_REG_STATUS * 4);
1289         ioport->altstatus_addr  =
1290         ioport->ctl_addr        = mmio + 0x20;
1291 }
1292
1293 static int nv_adma_host_init(struct ata_host *host)
1294 {
1295         struct pci_dev *pdev = to_pci_dev(host->dev);
1296         unsigned int i;
1297         u32 tmp32;
1298
1299         VPRINTK("ENTER\n");
1300
1301         /* enable ADMA on the ports */
1302         pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1303         tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1304                  NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1305                  NV_MCP_SATA_CFG_20_PORT1_EN |
1306                  NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1307
1308         pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1309
1310         for (i = 0; i < host->n_ports; i++)
1311                 nv_adma_setup_port(host->ports[i]);
1312
1313         return 0;
1314 }
1315
1316 static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1317                               struct scatterlist *sg,
1318                               int idx,
1319                               struct nv_adma_prd *aprd)
1320 {
1321         u8 flags = 0;
1322         if (qc->tf.flags & ATA_TFLAG_WRITE)
1323                 flags |= NV_APRD_WRITE;
1324         if (idx == qc->n_elem - 1)
1325                 flags |= NV_APRD_END;
1326         else if (idx != 4)
1327                 flags |= NV_APRD_CONT;
1328
1329         aprd->addr  = cpu_to_le64(((u64)sg_dma_address(sg)));
1330         aprd->len   = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
1331         aprd->flags = flags;
1332         aprd->packet_len = 0;
1333 }
1334
1335 static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1336 {
1337         struct nv_adma_port_priv *pp = qc->ap->private_data;
1338         unsigned int idx;
1339         struct nv_adma_prd *aprd;
1340         struct scatterlist *sg;
1341
1342         VPRINTK("ENTER\n");
1343
1344         idx = 0;
1345
1346         ata_for_each_sg(sg, qc) {
1347                 aprd = (idx < 5) ? &cpb->aprd[idx] : &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (idx-5)];
1348                 nv_adma_fill_aprd(qc, sg, idx, aprd);
1349                 idx++;
1350         }
1351         if (idx > 5)
1352                 cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));
1353         else
1354                 cpb->next_aprd = cpu_to_le64(0);
1355 }
1356
1357 static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
1358 {
1359         struct nv_adma_port_priv *pp = qc->ap->private_data;
1360
1361         /* ADMA engine can only be used for non-ATAPI DMA commands,
1362            or interrupt-driven no-data commands, where a result taskfile
1363            is not required. */
1364         if((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
1365            (qc->tf.flags & ATA_TFLAG_POLLING) ||
1366            (qc->flags & ATA_QCFLAG_RESULT_TF))
1367                 return 1;
1368
1369         if((qc->flags & ATA_QCFLAG_DMAMAP) ||
1370            (qc->tf.protocol == ATA_PROT_NODATA))
1371                 return 0;
1372
1373         return 1;
1374 }
1375
1376 static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1377 {
1378         struct nv_adma_port_priv *pp = qc->ap->private_data;
1379         struct nv_adma_cpb *cpb = &pp->cpb[qc->tag];
1380         u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
1381                        NV_CPB_CTL_IEN;
1382
1383         if (nv_adma_use_reg_mode(qc)) {
1384                 nv_adma_register_mode(qc->ap);
1385                 ata_qc_prep(qc);
1386                 return;
1387         }
1388
1389         cpb->resp_flags = NV_CPB_RESP_DONE;
1390         wmb();
1391         cpb->ctl_flags = 0;
1392         wmb();
1393
1394         cpb->len                = 3;
1395         cpb->tag                = qc->tag;
1396         cpb->next_cpb_idx       = 0;
1397
1398         /* turn on NCQ flags for NCQ commands */
1399         if (qc->tf.protocol == ATA_PROT_NCQ)
1400                 ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1401
1402         VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1403
1404         nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1405
1406         if(qc->flags & ATA_QCFLAG_DMAMAP) {
1407                 nv_adma_fill_sg(qc, cpb);
1408                 ctl_flags |= NV_CPB_CTL_APRD_VALID;
1409         } else
1410                 memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
1411
1412         /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID until we are
1413            finished filling in all of the contents */
1414         wmb();
1415         cpb->ctl_flags = ctl_flags;
1416         wmb();
1417         cpb->resp_flags = 0;
1418 }
1419
1420 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1421 {
1422         struct nv_adma_port_priv *pp = qc->ap->private_data;
1423         void __iomem *mmio = pp->ctl_block;
1424         int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ);
1425
1426         VPRINTK("ENTER\n");
1427
1428         if (nv_adma_use_reg_mode(qc)) {
1429                 /* use ATA register mode */
1430                 VPRINTK("using ATA register mode: 0x%lx\n", qc->flags);
1431                 nv_adma_register_mode(qc->ap);
1432                 return ata_qc_issue_prot(qc);
1433         } else
1434                 nv_adma_mode(qc->ap);
1435
1436         /* write append register, command tag in lower 8 bits
1437            and (number of cpbs to append -1) in top 8 bits */
1438         wmb();
1439
1440         if(curr_ncq != pp->last_issue_ncq) {
1441                 /* Seems to need some delay before switching between NCQ and non-NCQ
1442                    commands, else we get command timeouts and such. */
1443                 udelay(20);
1444                 pp->last_issue_ncq = curr_ncq;
1445         }
1446
1447         writew(qc->tag, mmio + NV_ADMA_APPEND);
1448
1449         DPRINTK("Issued tag %u\n",qc->tag);
1450
1451         return 0;
1452 }
1453
1454 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1455 {
1456         struct ata_host *host = dev_instance;
1457         unsigned int i;
1458         unsigned int handled = 0;
1459         unsigned long flags;
1460
1461         spin_lock_irqsave(&host->lock, flags);
1462
1463         for (i = 0; i < host->n_ports; i++) {
1464                 struct ata_port *ap;
1465
1466                 ap = host->ports[i];
1467                 if (ap &&
1468                     !(ap->flags & ATA_FLAG_DISABLED)) {
1469                         struct ata_queued_cmd *qc;
1470
1471                         qc = ata_qc_from_tag(ap, ap->link.active_tag);
1472                         if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
1473                                 handled += ata_host_intr(ap, qc);
1474                         else
1475                                 // No request pending?  Clear interrupt status
1476                                 // anyway, in case there's one pending.
1477                                 ap->ops->check_status(ap);
1478                 }
1479
1480         }
1481
1482         spin_unlock_irqrestore(&host->lock, flags);
1483
1484         return IRQ_RETVAL(handled);
1485 }
1486
1487 static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
1488 {
1489         int i, handled = 0;
1490
1491         for (i = 0; i < host->n_ports; i++) {
1492                 struct ata_port *ap = host->ports[i];
1493
1494                 if (ap && !(ap->flags & ATA_FLAG_DISABLED))
1495                         handled += nv_host_intr(ap, irq_stat);
1496
1497                 irq_stat >>= NV_INT_PORT_SHIFT;
1498         }
1499
1500         return IRQ_RETVAL(handled);
1501 }
1502
1503 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
1504 {
1505         struct ata_host *host = dev_instance;
1506         u8 irq_stat;
1507         irqreturn_t ret;
1508
1509         spin_lock(&host->lock);
1510         irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
1511         ret = nv_do_interrupt(host, irq_stat);
1512         spin_unlock(&host->lock);
1513
1514         return ret;
1515 }
1516
1517 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
1518 {
1519         struct ata_host *host = dev_instance;
1520         u8 irq_stat;
1521         irqreturn_t ret;
1522
1523         spin_lock(&host->lock);
1524         irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1525         ret = nv_do_interrupt(host, irq_stat);
1526         spin_unlock(&host->lock);
1527
1528         return ret;
1529 }
1530
1531 static int nv_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val)
1532 {
1533         if (sc_reg > SCR_CONTROL)
1534                 return -EINVAL;
1535
1536         *val = ioread32(ap->ioaddr.scr_addr + (sc_reg * 4));
1537         return 0;
1538 }
1539
1540 static int nv_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val)
1541 {
1542         if (sc_reg > SCR_CONTROL)
1543                 return -EINVAL;
1544
1545         iowrite32(val, ap->ioaddr.scr_addr + (sc_reg * 4));
1546         return 0;
1547 }
1548
1549 static void nv_nf2_freeze(struct ata_port *ap)
1550 {
1551         void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1552         int shift = ap->port_no * NV_INT_PORT_SHIFT;
1553         u8 mask;
1554
1555         mask = ioread8(scr_addr + NV_INT_ENABLE);
1556         mask &= ~(NV_INT_ALL << shift);
1557         iowrite8(mask, scr_addr + NV_INT_ENABLE);
1558 }
1559
1560 static void nv_nf2_thaw(struct ata_port *ap)
1561 {
1562         void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1563         int shift = ap->port_no * NV_INT_PORT_SHIFT;
1564         u8 mask;
1565
1566         iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
1567
1568         mask = ioread8(scr_addr + NV_INT_ENABLE);
1569         mask |= (NV_INT_MASK << shift);
1570         iowrite8(mask, scr_addr + NV_INT_ENABLE);
1571 }
1572
1573 static void nv_ck804_freeze(struct ata_port *ap)
1574 {
1575         void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1576         int shift = ap->port_no * NV_INT_PORT_SHIFT;
1577         u8 mask;
1578
1579         mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1580         mask &= ~(NV_INT_ALL << shift);
1581         writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1582 }
1583
1584 static void nv_ck804_thaw(struct ata_port *ap)
1585 {
1586         void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1587         int shift = ap->port_no * NV_INT_PORT_SHIFT;
1588         u8 mask;
1589
1590         writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1591
1592         mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1593         mask |= (NV_INT_MASK << shift);
1594         writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1595 }
1596
1597 static void nv_mcp55_freeze(struct ata_port *ap)
1598 {
1599         void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1600         int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1601         u32 mask;
1602
1603         writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1604
1605         mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1606         mask &= ~(NV_INT_ALL_MCP55 << shift);
1607         writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1608         ata_bmdma_freeze(ap);
1609 }
1610
1611 static void nv_mcp55_thaw(struct ata_port *ap)
1612 {
1613         void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1614         int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1615         u32 mask;
1616
1617         writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1618
1619         mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1620         mask |= (NV_INT_MASK_MCP55 << shift);
1621         writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1622         ata_bmdma_thaw(ap);
1623 }
1624
1625 static int nv_hardreset(struct ata_link *link, unsigned int *class,
1626                         unsigned long deadline)
1627 {
1628         unsigned int dummy;
1629
1630         /* SATA hardreset fails to retrieve proper device signature on
1631          * some controllers.  Don't classify on hardreset.  For more
1632          * info, see http://bugme.osdl.org/show_bug.cgi?id=3352
1633          */
1634         return sata_std_hardreset(link, &dummy, deadline);
1635 }
1636
1637 static void nv_error_handler(struct ata_port *ap)
1638 {
1639         ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1640                            nv_hardreset, ata_std_postreset);
1641 }
1642
1643 static void nv_adma_error_handler(struct ata_port *ap)
1644 {
1645         struct nv_adma_port_priv *pp = ap->private_data;
1646         if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
1647                 void __iomem *mmio = pp->ctl_block;
1648                 int i;
1649                 u16 tmp;
1650
1651                 if(ata_tag_valid(ap->link.active_tag) || ap->link.sactive) {
1652                         u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1653                         u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1654                         u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
1655                         u32 status = readw(mmio + NV_ADMA_STAT);
1656                         u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
1657                         u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
1658
1659                         ata_port_printk(ap, KERN_ERR, "EH in ADMA mode, notifier 0x%X "
1660                                 "notifier_error 0x%X gen_ctl 0x%X status 0x%X "
1661                                 "next cpb count 0x%X next cpb idx 0x%x\n",
1662                                 notifier, notifier_error, gen_ctl, status,
1663                                 cpb_count, next_cpb_idx);
1664
1665                         for( i=0;i<NV_ADMA_MAX_CPBS;i++) {
1666                                 struct nv_adma_cpb *cpb = &pp->cpb[i];
1667                                 if( (ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) ||
1668                                     ap->link.sactive & (1 << i) )
1669                                         ata_port_printk(ap, KERN_ERR,
1670                                                 "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1671                                                 i, cpb->ctl_flags, cpb->resp_flags);
1672                         }
1673                 }
1674
1675                 /* Push us back into port register mode for error handling. */
1676                 nv_adma_register_mode(ap);
1677
1678                 /* Mark all of the CPBs as invalid to prevent them from being executed */
1679                 for( i=0;i<NV_ADMA_MAX_CPBS;i++)
1680                         pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1681
1682                 /* clear CPB fetch count */
1683                 writew(0, mmio + NV_ADMA_CPB_COUNT);
1684
1685                 /* Reset channel */
1686                 tmp = readw(mmio + NV_ADMA_CTL);
1687                 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1688                 readw( mmio + NV_ADMA_CTL );    /* flush posted write */
1689                 udelay(1);
1690                 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1691                 readw( mmio + NV_ADMA_CTL );    /* flush posted write */
1692         }
1693
1694         ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1695                            nv_hardreset, ata_std_postreset);
1696 }
1697
1698 static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc)
1699 {
1700         struct nv_swncq_port_priv *pp = ap->private_data;
1701         struct defer_queue *dq = &pp->defer_queue;
1702
1703         /* queue is full */
1704         WARN_ON(dq->tail - dq->head == ATA_MAX_QUEUE);
1705         dq->defer_bits |= (1 << qc->tag);
1706         dq->tag[dq->tail++ & (ATA_MAX_QUEUE - 1)] = qc->tag;
1707 }
1708
1709 static struct ata_queued_cmd *nv_swncq_qc_from_dq(struct ata_port *ap)
1710 {
1711         struct nv_swncq_port_priv *pp = ap->private_data;
1712         struct defer_queue *dq = &pp->defer_queue;
1713         unsigned int tag;
1714
1715         if (dq->head == dq->tail)       /* null queue */
1716                 return NULL;
1717
1718         tag = dq->tag[dq->head & (ATA_MAX_QUEUE - 1)];
1719         dq->tag[dq->head++ & (ATA_MAX_QUEUE - 1)] = ATA_TAG_POISON;
1720         WARN_ON(!(dq->defer_bits & (1 << tag)));
1721         dq->defer_bits &= ~(1 << tag);
1722
1723         return ata_qc_from_tag(ap, tag);
1724 }
1725
1726 static void nv_swncq_fis_reinit(struct ata_port *ap)
1727 {
1728         struct nv_swncq_port_priv *pp = ap->private_data;
1729
1730         pp->dhfis_bits = 0;
1731         pp->dmafis_bits = 0;
1732         pp->sdbfis_bits = 0;
1733         pp->ncq_flags = 0;
1734 }
1735
1736 static void nv_swncq_pp_reinit(struct ata_port *ap)
1737 {
1738         struct nv_swncq_port_priv *pp = ap->private_data;
1739         struct defer_queue *dq = &pp->defer_queue;
1740
1741         dq->head = 0;
1742         dq->tail = 0;
1743         dq->defer_bits = 0;
1744         pp->qc_active = 0;
1745         pp->last_issue_tag = ATA_TAG_POISON;
1746         nv_swncq_fis_reinit(ap);
1747 }
1748
1749 static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis)
1750 {
1751         struct nv_swncq_port_priv *pp = ap->private_data;
1752
1753         writew(fis, pp->irq_block);
1754 }
1755
1756 static void __ata_bmdma_stop(struct ata_port *ap)
1757 {
1758         struct ata_queued_cmd qc;
1759
1760         qc.ap = ap;
1761         ata_bmdma_stop(&qc);
1762 }
1763
1764 static void nv_swncq_ncq_stop(struct ata_port *ap)
1765 {
1766         struct nv_swncq_port_priv *pp = ap->private_data;
1767         unsigned int i;
1768         u32 sactive;
1769         u32 done_mask;
1770
1771         ata_port_printk(ap, KERN_ERR,
1772                         "EH in SWNCQ mode,QC:qc_active 0x%X sactive 0x%X\n",
1773                         ap->qc_active, ap->link.sactive);
1774         ata_port_printk(ap, KERN_ERR,
1775                 "SWNCQ:qc_active 0x%X defer_bits 0x%X last_issue_tag 0x%x\n  "
1776                 "dhfis 0x%X dmafis 0x%X sdbfis 0x%X\n",
1777                 pp->qc_active, pp->defer_queue.defer_bits, pp->last_issue_tag,
1778                 pp->dhfis_bits, pp->dmafis_bits, pp->sdbfis_bits);
1779
1780         ata_port_printk(ap, KERN_ERR, "ATA_REG 0x%X ERR_REG 0x%X\n",
1781                         ap->ops->check_status(ap),
1782                         ioread8(ap->ioaddr.error_addr));
1783
1784         sactive = readl(pp->sactive_block);
1785         done_mask = pp->qc_active ^ sactive;
1786
1787         ata_port_printk(ap, KERN_ERR, "tag : dhfis dmafis sdbfis sacitve\n");
1788         for (i = 0; i < ATA_MAX_QUEUE; i++) {
1789                 u8 err = 0;
1790                 if (pp->qc_active & (1 << i))
1791                         err = 0;
1792                 else if (done_mask & (1 << i))
1793                         err = 1;
1794                 else
1795                         continue;
1796
1797                 ata_port_printk(ap, KERN_ERR,
1798                                 "tag 0x%x: %01x %01x %01x %01x %s\n", i,
1799                                 (pp->dhfis_bits >> i) & 0x1,
1800                                 (pp->dmafis_bits >> i) & 0x1,
1801                                 (pp->sdbfis_bits >> i) & 0x1,
1802                                 (sactive >> i) & 0x1,
1803                                 (err ? "error! tag doesn't exit" : " "));
1804         }
1805
1806         nv_swncq_pp_reinit(ap);
1807         ap->ops->irq_clear(ap);
1808         __ata_bmdma_stop(ap);
1809         nv_swncq_irq_clear(ap, 0xffff);
1810 }
1811
1812 static void nv_swncq_error_handler(struct ata_port *ap)
1813 {
1814         struct ata_eh_context *ehc = &ap->link.eh_context;
1815
1816         if (ap->link.sactive) {
1817                 nv_swncq_ncq_stop(ap);
1818                 ehc->i.action |= ATA_EH_HARDRESET;
1819         }
1820
1821         ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1822                            nv_hardreset, ata_std_postreset);
1823 }
1824
1825 #ifdef CONFIG_PM
1826 static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg)
1827 {
1828         void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1829         u32 tmp;
1830
1831         /* clear irq */
1832         writel(~0, mmio + NV_INT_STATUS_MCP55);
1833
1834         /* disable irq */
1835         writel(0, mmio + NV_INT_ENABLE_MCP55);
1836
1837         /* disable swncq */
1838         tmp = readl(mmio + NV_CTL_MCP55);
1839         tmp &= ~(NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ);
1840         writel(tmp, mmio + NV_CTL_MCP55);
1841
1842         return 0;
1843 }
1844
1845 static int nv_swncq_port_resume(struct ata_port *ap)
1846 {
1847         void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1848         u32 tmp;
1849
1850         /* clear irq */
1851         writel(~0, mmio + NV_INT_STATUS_MCP55);
1852
1853         /* enable irq */
1854         writel(0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1855
1856         /* enable swncq */
1857         tmp = readl(mmio + NV_CTL_MCP55);
1858         writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1859
1860         return 0;
1861 }
1862 #endif
1863
1864 static void nv_swncq_host_init(struct ata_host *host)
1865 {
1866         u32 tmp;
1867         void __iomem *mmio = host->iomap[NV_MMIO_BAR];
1868         struct pci_dev *pdev = to_pci_dev(host->dev);
1869         u8 regval;
1870
1871         /* disable  ECO 398 */
1872         pci_read_config_byte(pdev, 0x7f, &regval);
1873         regval &= ~(1 << 7);
1874         pci_write_config_byte(pdev, 0x7f, regval);
1875
1876         /* enable swncq */
1877         tmp = readl(mmio + NV_CTL_MCP55);
1878         VPRINTK("HOST_CTL:0x%X\n", tmp);
1879         writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1880
1881         /* enable irq intr */
1882         tmp = readl(mmio + NV_INT_ENABLE_MCP55);
1883         VPRINTK("HOST_ENABLE:0x%X\n", tmp);
1884         writel(tmp | 0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1885
1886         /*  clear port irq */
1887         writel(~0x0, mmio + NV_INT_STATUS_MCP55);
1888 }
1889
1890 static int nv_swncq_slave_config(struct scsi_device *sdev)
1891 {
1892         struct ata_port *ap = ata_shost_to_port(sdev->host);
1893         struct pci_dev *pdev = to_pci_dev(ap->host->dev);
1894         struct ata_device *dev;
1895         int rc;
1896         u8 rev;
1897         u8 check_maxtor = 0;
1898         unsigned char model_num[ATA_ID_PROD_LEN + 1];
1899
1900         rc = ata_scsi_slave_config(sdev);
1901         if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
1902                 /* Not a proper libata device, ignore */
1903                 return rc;
1904
1905         dev = &ap->link.device[sdev->id];
1906         if (!(ap->flags & ATA_FLAG_NCQ) || dev->class == ATA_DEV_ATAPI)
1907                 return rc;
1908
1909         /* if MCP51 and Maxtor, then disable ncq */
1910         if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA ||
1911                 pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2)
1912                 check_maxtor = 1;
1913
1914         /* if MCP55 and rev <= a2 and Maxtor, then disable ncq */
1915         if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA ||
1916                 pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2) {
1917                 pci_read_config_byte(pdev, 0x8, &rev);
1918                 if (rev <= 0xa2)
1919                         check_maxtor = 1;
1920         }
1921
1922         if (!check_maxtor)
1923                 return rc;
1924
1925         ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
1926
1927         if (strncmp(model_num, "Maxtor", 6) == 0) {
1928                 ata_scsi_change_queue_depth(sdev, 1);
1929                 ata_dev_printk(dev, KERN_NOTICE,
1930                         "Disabling SWNCQ mode (depth %x)\n", sdev->queue_depth);
1931         }
1932
1933         return rc;
1934 }
1935
1936 static int nv_swncq_port_start(struct ata_port *ap)
1937 {
1938         struct device *dev = ap->host->dev;
1939         void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1940         struct nv_swncq_port_priv *pp;
1941         int rc;
1942
1943         rc = ata_port_start(ap);
1944         if (rc)
1945                 return rc;
1946
1947         pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1948         if (!pp)
1949                 return -ENOMEM;
1950
1951         pp->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE,
1952                                       &pp->prd_dma, GFP_KERNEL);
1953         if (!pp->prd)
1954                 return -ENOMEM;
1955         memset(pp->prd, 0, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE);
1956
1957         ap->private_data = pp;
1958         pp->sactive_block = ap->ioaddr.scr_addr + 4 * SCR_ACTIVE;
1959         pp->irq_block = mmio + NV_INT_STATUS_MCP55 + ap->port_no * 2;
1960         pp->tag_block = mmio + NV_NCQ_REG_MCP55 + ap->port_no * 2;
1961
1962         return 0;
1963 }
1964
1965 static void nv_swncq_qc_prep(struct ata_queued_cmd *qc)
1966 {
1967         if (qc->tf.protocol != ATA_PROT_NCQ) {
1968                 ata_qc_prep(qc);
1969                 return;
1970         }
1971
1972         if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1973                 return;
1974
1975         nv_swncq_fill_sg(qc);
1976 }
1977
1978 static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
1979 {
1980         struct ata_port *ap = qc->ap;
1981         struct scatterlist *sg;
1982         unsigned int idx;
1983         struct nv_swncq_port_priv *pp = ap->private_data;
1984         struct ata_prd *prd;
1985
1986         WARN_ON(qc->__sg == NULL);
1987         WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
1988
1989         prd = pp->prd + ATA_MAX_PRD * qc->tag;
1990
1991         idx = 0;
1992         ata_for_each_sg(sg, qc) {
1993                 u32 addr, offset;
1994                 u32 sg_len, len;
1995
1996                 addr = (u32)sg_dma_address(sg);
1997                 sg_len = sg_dma_len(sg);
1998
1999                 while (sg_len) {
2000                         offset = addr & 0xffff;
2001                         len = sg_len;
2002                         if ((offset + sg_len) > 0x10000)
2003                                 len = 0x10000 - offset;
2004
2005                         prd[idx].addr = cpu_to_le32(addr);
2006                         prd[idx].flags_len = cpu_to_le32(len & 0xffff);
2007
2008                         idx++;
2009                         sg_len -= len;
2010                         addr += len;
2011                 }
2012         }
2013
2014         if (idx)
2015                 prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2016 }
2017
2018 static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap,
2019                                           struct ata_queued_cmd *qc)
2020 {
2021         struct nv_swncq_port_priv *pp = ap->private_data;
2022
2023         if (qc == NULL)
2024                 return 0;
2025
2026         DPRINTK("Enter\n");
2027
2028         writel((1 << qc->tag), pp->sactive_block);
2029         pp->last_issue_tag = qc->tag;
2030         pp->dhfis_bits &= ~(1 << qc->tag);
2031         pp->dmafis_bits &= ~(1 << qc->tag);
2032         pp->qc_active |= (0x1 << qc->tag);
2033
2034         ap->ops->tf_load(ap, &qc->tf);   /* load tf registers */
2035         ap->ops->exec_command(ap, &qc->tf);
2036
2037         DPRINTK("Issued tag %u\n", qc->tag);
2038
2039         return 0;
2040 }
2041
2042 static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc)
2043 {
2044         struct ata_port *ap = qc->ap;
2045         struct nv_swncq_port_priv *pp = ap->private_data;
2046
2047         if (qc->tf.protocol != ATA_PROT_NCQ)
2048                 return ata_qc_issue_prot(qc);
2049
2050         DPRINTK("Enter\n");
2051
2052         if (!pp->qc_active)
2053                 nv_swncq_issue_atacmd(ap, qc);
2054         else
2055                 nv_swncq_qc_to_dq(ap, qc);      /* add qc to defer queue */
2056
2057         return 0;
2058 }
2059
2060 static void nv_swncq_hotplug(struct ata_port *ap, u32 fis)
2061 {
2062         u32 serror;
2063         struct ata_eh_info *ehi = &ap->link.eh_info;
2064
2065         ata_ehi_clear_desc(ehi);
2066
2067         /* AHCI needs SError cleared; otherwise, it might lock up */
2068         sata_scr_read(&ap->link, SCR_ERROR, &serror);
2069         sata_scr_write(&ap->link, SCR_ERROR, serror);
2070
2071         /* analyze @irq_stat */
2072         if (fis & NV_SWNCQ_IRQ_ADDED)
2073                 ata_ehi_push_desc(ehi, "hot plug");
2074         else if (fis & NV_SWNCQ_IRQ_REMOVED)
2075                 ata_ehi_push_desc(ehi, "hot unplug");
2076
2077         ata_ehi_hotplugged(ehi);
2078
2079         /* okay, let's hand over to EH */
2080         ehi->serror |= serror;
2081
2082         ata_port_freeze(ap);
2083 }
2084
2085 static int nv_swncq_sdbfis(struct ata_port *ap)
2086 {
2087         struct ata_queued_cmd *qc;
2088         struct nv_swncq_port_priv *pp = ap->private_data;
2089         struct ata_eh_info *ehi = &ap->link.eh_info;
2090         u32 sactive;
2091         int nr_done = 0;
2092         u32 done_mask;
2093         int i;
2094         u8 host_stat;
2095         u8 lack_dhfis = 0;
2096
2097         host_stat = ap->ops->bmdma_status(ap);
2098         if (unlikely(host_stat & ATA_DMA_ERR)) {
2099                 /* error when transfering data to/from memory */
2100                 ata_ehi_clear_desc(ehi);
2101                 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
2102                 ehi->err_mask |= AC_ERR_HOST_BUS;
2103                 ehi->action |= ATA_EH_SOFTRESET;
2104                 return -EINVAL;
2105         }
2106
2107         ap->ops->irq_clear(ap);
2108         __ata_bmdma_stop(ap);
2109
2110         sactive = readl(pp->sactive_block);
2111         done_mask = pp->qc_active ^ sactive;
2112
2113         if (unlikely(done_mask & sactive)) {
2114                 ata_ehi_clear_desc(ehi);
2115                 ata_ehi_push_desc(ehi, "illegal SWNCQ:qc_active transition"
2116                                   "(%08x->%08x)", pp->qc_active, sactive);
2117                 ehi->err_mask |= AC_ERR_HSM;
2118                 ehi->action |= ATA_EH_HARDRESET;
2119                 return -EINVAL;
2120         }
2121         for (i = 0; i < ATA_MAX_QUEUE; i++) {
2122                 if (!(done_mask & (1 << i)))
2123                         continue;
2124
2125                 qc = ata_qc_from_tag(ap, i);
2126                 if (qc) {
2127                         ata_qc_complete(qc);
2128                         pp->qc_active &= ~(1 << i);
2129                         pp->dhfis_bits &= ~(1 << i);
2130                         pp->dmafis_bits &= ~(1 << i);
2131                         pp->sdbfis_bits |= (1 << i);
2132                         nr_done++;
2133                 }
2134         }
2135
2136         if (!ap->qc_active) {
2137                 DPRINTK("over\n");
2138                 nv_swncq_pp_reinit(ap);
2139                 return nr_done;
2140         }
2141
2142         if (pp->qc_active & pp->dhfis_bits)
2143                 return nr_done;
2144
2145         if ((pp->ncq_flags & ncq_saw_backout) ||
2146             (pp->qc_active ^ pp->dhfis_bits))
2147                 /* if the controller cann't get a device to host register FIS,
2148                  * The driver needs to reissue the new command.
2149                  */
2150                 lack_dhfis = 1;
2151
2152         DPRINTK("id 0x%x QC: qc_active 0x%x,"
2153                 "SWNCQ:qc_active 0x%X defer_bits %X "
2154                 "dhfis 0x%X dmafis 0x%X last_issue_tag %x\n",
2155                 ap->print_id, ap->qc_active, pp->qc_active,
2156                 pp->defer_queue.defer_bits, pp->dhfis_bits,
2157                 pp->dmafis_bits, pp->last_issue_tag);
2158
2159         nv_swncq_fis_reinit(ap);
2160
2161         if (lack_dhfis) {
2162                 qc = ata_qc_from_tag(ap, pp->last_issue_tag);
2163                 nv_swncq_issue_atacmd(ap, qc);
2164                 return nr_done;
2165         }
2166
2167         if (pp->defer_queue.defer_bits) {
2168                 /* send deferral queue command */
2169                 qc = nv_swncq_qc_from_dq(ap);
2170                 WARN_ON(qc == NULL);
2171                 nv_swncq_issue_atacmd(ap, qc);
2172         }
2173
2174         return nr_done;
2175 }
2176
2177 static inline u32 nv_swncq_tag(struct ata_port *ap)
2178 {
2179         struct nv_swncq_port_priv *pp = ap->private_data;
2180         u32 tag;
2181
2182         tag = readb(pp->tag_block) >> 2;
2183         return (tag & 0x1f);
2184 }
2185
2186 static int nv_swncq_dmafis(struct ata_port *ap)
2187 {
2188         struct ata_queued_cmd *qc;
2189         unsigned int rw;
2190         u8 dmactl;
2191         u32 tag;
2192         struct nv_swncq_port_priv *pp = ap->private_data;
2193
2194         __ata_bmdma_stop(ap);
2195         tag = nv_swncq_tag(ap);
2196
2197         DPRINTK("dma setup tag 0x%x\n", tag);
2198         qc = ata_qc_from_tag(ap, tag);
2199
2200         if (unlikely(!qc))
2201                 return 0;
2202
2203         rw = qc->tf.flags & ATA_TFLAG_WRITE;
2204
2205         /* load PRD table addr. */
2206         iowrite32(pp->prd_dma + ATA_PRD_TBL_SZ * qc->tag,
2207                   ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
2208
2209         /* specify data direction, triple-check start bit is clear */
2210         dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2211         dmactl &= ~ATA_DMA_WR;
2212         if (!rw)
2213                 dmactl |= ATA_DMA_WR;
2214
2215         iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2216
2217         return 1;
2218 }
2219
2220 static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis)
2221 {
2222         struct nv_swncq_port_priv *pp = ap->private_data;
2223         struct ata_queued_cmd *qc;
2224         struct ata_eh_info *ehi = &ap->link.eh_info;
2225         u32 serror;
2226         u8 ata_stat;
2227         int rc = 0;
2228
2229         ata_stat = ap->ops->check_status(ap);
2230         nv_swncq_irq_clear(ap, fis);
2231         if (!fis)
2232                 return;
2233
2234         if (ap->pflags & ATA_PFLAG_FROZEN)
2235                 return;
2236
2237         if (fis & NV_SWNCQ_IRQ_HOTPLUG) {
2238                 nv_swncq_hotplug(ap, fis);
2239                 return;
2240         }
2241
2242         if (!pp->qc_active)
2243                 return;
2244
2245         if (ap->ops->scr_read(ap, SCR_ERROR, &serror))
2246                 return;
2247         ap->ops->scr_write(ap, SCR_ERROR, serror);
2248
2249         if (ata_stat & ATA_ERR) {
2250                 ata_ehi_clear_desc(ehi);
2251                 ata_ehi_push_desc(ehi, "Ata error. fis:0x%X", fis);
2252                 ehi->err_mask |= AC_ERR_DEV;
2253                 ehi->serror |= serror;
2254                 ehi->action |= ATA_EH_SOFTRESET;
2255                 ata_port_freeze(ap);
2256                 return;
2257         }
2258
2259         if (fis & NV_SWNCQ_IRQ_BACKOUT) {
2260                 /* If the IRQ is backout, driver must issue
2261                  * the new command again some time later.
2262                  */
2263                 pp->ncq_flags |= ncq_saw_backout;
2264         }
2265
2266         if (fis & NV_SWNCQ_IRQ_SDBFIS) {
2267                 pp->ncq_flags |= ncq_saw_sdb;
2268                 DPRINTK("id 0x%x SWNCQ: qc_active 0x%X "
2269                         "dhfis 0x%X dmafis 0x%X sactive 0x%X\n",
2270                         ap->print_id, pp->qc_active, pp->dhfis_bits,
2271                         pp->dmafis_bits, readl(pp->sactive_block));
2272                 rc = nv_swncq_sdbfis(ap);
2273                 if (rc < 0)
2274                         goto irq_error;
2275         }
2276
2277         if (fis & NV_SWNCQ_IRQ_DHREGFIS) {
2278                 /* The interrupt indicates the new command
2279                  * was transmitted correctly to the drive.
2280                  */
2281                 pp->dhfis_bits |= (0x1 << pp->last_issue_tag);
2282                 pp->ncq_flags |= ncq_saw_d2h;
2283                 if (pp->ncq_flags & (ncq_saw_sdb | ncq_saw_backout)) {
2284                         ata_ehi_push_desc(ehi, "illegal fis transaction");
2285                         ehi->err_mask |= AC_ERR_HSM;
2286                         ehi->action |= ATA_EH_HARDRESET;
2287                         goto irq_error;
2288                 }
2289
2290                 if (!(fis & NV_SWNCQ_IRQ_DMASETUP) &&
2291                     !(pp->ncq_flags & ncq_saw_dmas)) {
2292                         ata_stat = ap->ops->check_status(ap);
2293                         if (ata_stat & ATA_BUSY)
2294                                 goto irq_exit;
2295
2296                         if (pp->defer_queue.defer_bits) {
2297                                 DPRINTK("send next command\n");
2298                                 qc = nv_swncq_qc_from_dq(ap);
2299                                 nv_swncq_issue_atacmd(ap, qc);
2300                         }
2301                 }
2302         }
2303
2304         if (fis & NV_SWNCQ_IRQ_DMASETUP) {
2305                 /* program the dma controller with appropriate PRD buffers
2306                  * and start the DMA transfer for requested command.
2307                  */
2308                 pp->dmafis_bits |= (0x1 << nv_swncq_tag(ap));
2309                 pp->ncq_flags |= ncq_saw_dmas;
2310                 rc = nv_swncq_dmafis(ap);
2311         }
2312
2313 irq_exit:
2314         return;
2315 irq_error:
2316         ata_ehi_push_desc(ehi, "fis:0x%x", fis);
2317         ata_port_freeze(ap);
2318         return;
2319 }
2320
2321 static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance)
2322 {
2323         struct ata_host *host = dev_instance;
2324         unsigned int i;
2325         unsigned int handled = 0;
2326         unsigned long flags;
2327         u32 irq_stat;
2328
2329         spin_lock_irqsave(&host->lock, flags);
2330
2331         irq_stat = readl(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_MCP55);
2332
2333         for (i = 0; i < host->n_ports; i++) {
2334                 struct ata_port *ap = host->ports[i];
2335
2336                 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
2337                         if (ap->link.sactive) {
2338                                 nv_swncq_host_interrupt(ap, (u16)irq_stat);
2339                                 handled = 1;
2340                         } else {
2341                                 if (irq_stat)   /* reserve Hotplug */
2342                                         nv_swncq_irq_clear(ap, 0xfff0);
2343
2344                                 handled += nv_host_intr(ap, (u8)irq_stat);
2345                         }
2346                 }
2347                 irq_stat >>= NV_INT_PORT_SHIFT_MCP55;
2348         }
2349
2350         spin_unlock_irqrestore(&host->lock, flags);
2351
2352         return IRQ_RETVAL(handled);
2353 }
2354
2355 static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
2356 {
2357         static int printed_version = 0;
2358         const struct ata_port_info *ppi[] = { NULL, NULL };
2359         struct ata_host *host;
2360         struct nv_host_priv *hpriv;
2361         int rc;
2362         u32 bar;
2363         void __iomem *base;
2364         unsigned long type = ent->driver_data;
2365
2366         // Make sure this is a SATA controller by counting the number of bars
2367         // (NVIDIA SATA controllers will always have six bars).  Otherwise,
2368         // it's an IDE controller and we ignore it.
2369         for (bar=0; bar<6; bar++)
2370                 if (pci_resource_start(pdev, bar) == 0)
2371                         return -ENODEV;
2372
2373         if (!printed_version++)
2374                 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
2375
2376         rc = pcim_enable_device(pdev);
2377         if (rc)
2378                 return rc;
2379
2380         /* determine type and allocate host */
2381         if (type == CK804 && adma_enabled) {
2382                 dev_printk(KERN_NOTICE, &pdev->dev, "Using ADMA mode\n");
2383                 type = ADMA;
2384         }
2385
2386         ppi[0] = &nv_port_info[type];
2387         rc = ata_pci_prepare_sff_host(pdev, ppi, &host);
2388         if (rc)
2389                 return rc;
2390
2391         hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2392         if (!hpriv)
2393                 return -ENOMEM;
2394         hpriv->type = type;
2395         host->private_data = hpriv;
2396
2397         /* set 64bit dma masks, may fail */
2398         if (type == ADMA) {
2399                 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0)
2400                         pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2401         }
2402
2403         /* request and iomap NV_MMIO_BAR */
2404         rc = pcim_iomap_regions(pdev, 1 << NV_MMIO_BAR, DRV_NAME);
2405         if (rc)
2406                 return rc;
2407
2408         /* configure SCR access */
2409         base = host->iomap[NV_MMIO_BAR];
2410         host->ports[0]->ioaddr.scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
2411         host->ports[1]->ioaddr.scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
2412
2413         /* enable SATA space for CK804 */
2414         if (type >= CK804) {
2415                 u8 regval;
2416
2417                 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2418                 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2419                 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2420         }
2421
2422         /* init ADMA */
2423         if (type == ADMA) {
2424                 rc = nv_adma_host_init(host);
2425                 if (rc)
2426                         return rc;
2427         } else if (type == SWNCQ && swncq_enabled) {
2428                 dev_printk(KERN_NOTICE, &pdev->dev, "Using SWNCQ mode\n");
2429                 nv_swncq_host_init(host);
2430         }
2431
2432         pci_set_master(pdev);
2433         return ata_host_activate(host, pdev->irq, ppi[0]->irq_handler,
2434                                  IRQF_SHARED, ppi[0]->sht);
2435 }
2436
2437 #ifdef CONFIG_PM
2438 static int nv_pci_device_resume(struct pci_dev *pdev)
2439 {
2440         struct ata_host *host = dev_get_drvdata(&pdev->dev);
2441         struct nv_host_priv *hpriv = host->private_data;
2442         int rc;
2443
2444         rc = ata_pci_device_do_resume(pdev);
2445         if(rc)
2446                 return rc;
2447
2448         if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2449                 if(hpriv->type >= CK804) {
2450                         u8 regval;
2451
2452                         pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2453                         regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2454                         pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2455                 }
2456                 if(hpriv->type == ADMA) {
2457                         u32 tmp32;
2458                         struct nv_adma_port_priv *pp;
2459                         /* enable/disable ADMA on the ports appropriately */
2460                         pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2461
2462                         pp = host->ports[0]->private_data;
2463                         if(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2464                                 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2465                                            NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2466                         else
2467                                 tmp32 |=  (NV_MCP_SATA_CFG_20_PORT0_EN |
2468                                            NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2469                         pp = host->ports[1]->private_data;
2470                         if(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2471                                 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
2472                                            NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2473                         else
2474                                 tmp32 |=  (NV_MCP_SATA_CFG_20_PORT1_EN |
2475                                            NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2476
2477                         pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2478                 }
2479         }
2480
2481         ata_host_resume(host);
2482
2483         return 0;
2484 }
2485 #endif
2486
2487 static void nv_ck804_host_stop(struct ata_host *host)
2488 {
2489         struct pci_dev *pdev = to_pci_dev(host->dev);
2490         u8 regval;
2491
2492         /* disable SATA space for CK804 */
2493         pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2494         regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2495         pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2496 }
2497
2498 static void nv_adma_host_stop(struct ata_host *host)
2499 {
2500         struct pci_dev *pdev = to_pci_dev(host->dev);
2501         u32 tmp32;
2502
2503         /* disable ADMA on the ports */
2504         pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2505         tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2506                    NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
2507                    NV_MCP_SATA_CFG_20_PORT1_EN |
2508                    NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2509
2510         pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2511
2512         nv_ck804_host_stop(host);
2513 }
2514
2515 static int __init nv_init(void)
2516 {
2517         return pci_register_driver(&nv_pci_driver);
2518 }
2519
2520 static void __exit nv_exit(void)
2521 {
2522         pci_unregister_driver(&nv_pci_driver);
2523 }
2524
2525 module_init(nv_init);
2526 module_exit(nv_exit);
2527 module_param_named(adma, adma_enabled, bool, 0444);
2528 MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: true)");
2529 module_param_named(swncq, swncq_enabled, bool, 0444);
2530 MODULE_PARM_DESC(swncq, "Enable use of SWNCQ (Default: false)");
2531