Merge commit 'gcl/next' into next
[deliverable/linux.git] / drivers / ata / sata_nv.c
CommitLineData
1da177e4
LT
1/*
2 * sata_nv.c - NVIDIA nForce SATA
3 *
4 * Copyright 2004 NVIDIA Corp. All rights reserved.
5 * Copyright 2004 Andrew Chew
6 *
aa7e16d6
JG
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
1da177e4 21 *
af36d7f0
JG
22 *
23 * libata documentation is available via 'make {ps|pdf}docs',
24 * as Documentation/DocBook/libata.*
25 *
26 * No hardware documentation available outside of NVIDIA.
27 * This driver programs the NVIDIA SATA controller in a similar
28 * fashion as with other PCI IDE BMDMA controllers, with a few
29 * NV-specific details such as register offsets, SATA phy location,
30 * hotplug info, etc.
31 *
fbbb262d
RH
32 * CK804/MCP04 controllers support an alternate programming interface
33 * similar to the ADMA specification (with some modifications).
34 * This allows the use of NCQ. Non-DMA-mapped ATA commands are still
35 * sent through the legacy interface.
36 *
1da177e4
LT
37 */
38
1da177e4
LT
39#include <linux/kernel.h>
40#include <linux/module.h>
41#include <linux/pci.h>
42#include <linux/init.h>
43#include <linux/blkdev.h>
44#include <linux/delay.h>
45#include <linux/interrupt.h>
a9524a76 46#include <linux/device.h>
1da177e4 47#include <scsi/scsi_host.h>
fbbb262d 48#include <scsi/scsi_device.h>
1da177e4
LT
49#include <linux/libata.h>
50
51#define DRV_NAME "sata_nv"
2a3103ce 52#define DRV_VERSION "3.5"
fbbb262d
RH
53
54#define NV_ADMA_DMA_BOUNDARY 0xffffffffUL
1da177e4 55
10ad05df 56enum {
0d5ff566
TH
57 NV_MMIO_BAR = 5,
58
10ad05df
JG
59 NV_PORTS = 2,
60 NV_PIO_MASK = 0x1f,
61 NV_MWDMA_MASK = 0x07,
62 NV_UDMA_MASK = 0x7f,
63 NV_PORT0_SCR_REG_OFFSET = 0x00,
64 NV_PORT1_SCR_REG_OFFSET = 0x40,
1da177e4 65
27e4b274 66 /* INT_STATUS/ENABLE */
10ad05df 67 NV_INT_STATUS = 0x10,
10ad05df 68 NV_INT_ENABLE = 0x11,
27e4b274 69 NV_INT_STATUS_CK804 = 0x440,
10ad05df 70 NV_INT_ENABLE_CK804 = 0x441,
1da177e4 71
27e4b274
TH
72 /* INT_STATUS/ENABLE bits */
73 NV_INT_DEV = 0x01,
74 NV_INT_PM = 0x02,
75 NV_INT_ADDED = 0x04,
76 NV_INT_REMOVED = 0x08,
77
78 NV_INT_PORT_SHIFT = 4, /* each port occupies 4 bits */
79
39f87582 80 NV_INT_ALL = 0x0f,
5a44efff
TH
81 NV_INT_MASK = NV_INT_DEV |
82 NV_INT_ADDED | NV_INT_REMOVED,
39f87582 83
27e4b274 84 /* INT_CONFIG */
10ad05df
JG
85 NV_INT_CONFIG = 0x12,
86 NV_INT_CONFIG_METHD = 0x01, // 0 = INT, 1 = SMI
1da177e4 87
10ad05df
JG
88 // For PCI config register 20
89 NV_MCP_SATA_CFG_20 = 0x50,
90 NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
fbbb262d
RH
91 NV_MCP_SATA_CFG_20_PORT0_EN = (1 << 17),
92 NV_MCP_SATA_CFG_20_PORT1_EN = (1 << 16),
93 NV_MCP_SATA_CFG_20_PORT0_PWB_EN = (1 << 14),
94 NV_MCP_SATA_CFG_20_PORT1_PWB_EN = (1 << 12),
95
96 NV_ADMA_MAX_CPBS = 32,
97 NV_ADMA_CPB_SZ = 128,
98 NV_ADMA_APRD_SZ = 16,
99 NV_ADMA_SGTBL_LEN = (1024 - NV_ADMA_CPB_SZ) /
100 NV_ADMA_APRD_SZ,
101 NV_ADMA_SGTBL_TOTAL_LEN = NV_ADMA_SGTBL_LEN + 5,
102 NV_ADMA_SGTBL_SZ = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
103 NV_ADMA_PORT_PRIV_DMA_SZ = NV_ADMA_MAX_CPBS *
104 (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
105
106 /* BAR5 offset to ADMA general registers */
107 NV_ADMA_GEN = 0x400,
108 NV_ADMA_GEN_CTL = 0x00,
109 NV_ADMA_NOTIFIER_CLEAR = 0x30,
110
111 /* BAR5 offset to ADMA ports */
112 NV_ADMA_PORT = 0x480,
113
114 /* size of ADMA port register space */
115 NV_ADMA_PORT_SIZE = 0x100,
116
117 /* ADMA port registers */
118 NV_ADMA_CTL = 0x40,
119 NV_ADMA_CPB_COUNT = 0x42,
120 NV_ADMA_NEXT_CPB_IDX = 0x43,
121 NV_ADMA_STAT = 0x44,
122 NV_ADMA_CPB_BASE_LOW = 0x48,
123 NV_ADMA_CPB_BASE_HIGH = 0x4C,
124 NV_ADMA_APPEND = 0x50,
125 NV_ADMA_NOTIFIER = 0x68,
126 NV_ADMA_NOTIFIER_ERROR = 0x6C,
127
128 /* NV_ADMA_CTL register bits */
129 NV_ADMA_CTL_HOTPLUG_IEN = (1 << 0),
130 NV_ADMA_CTL_CHANNEL_RESET = (1 << 5),
131 NV_ADMA_CTL_GO = (1 << 7),
132 NV_ADMA_CTL_AIEN = (1 << 8),
133 NV_ADMA_CTL_READ_NON_COHERENT = (1 << 11),
134 NV_ADMA_CTL_WRITE_NON_COHERENT = (1 << 12),
135
136 /* CPB response flag bits */
137 NV_CPB_RESP_DONE = (1 << 0),
138 NV_CPB_RESP_ATA_ERR = (1 << 3),
139 NV_CPB_RESP_CMD_ERR = (1 << 4),
140 NV_CPB_RESP_CPB_ERR = (1 << 7),
141
142 /* CPB control flag bits */
143 NV_CPB_CTL_CPB_VALID = (1 << 0),
144 NV_CPB_CTL_QUEUE = (1 << 1),
145 NV_CPB_CTL_APRD_VALID = (1 << 2),
146 NV_CPB_CTL_IEN = (1 << 3),
147 NV_CPB_CTL_FPDMA = (1 << 4),
148
149 /* APRD flags */
150 NV_APRD_WRITE = (1 << 1),
151 NV_APRD_END = (1 << 2),
152 NV_APRD_CONT = (1 << 3),
153
154 /* NV_ADMA_STAT flags */
155 NV_ADMA_STAT_TIMEOUT = (1 << 0),
156 NV_ADMA_STAT_HOTUNPLUG = (1 << 1),
157 NV_ADMA_STAT_HOTPLUG = (1 << 2),
158 NV_ADMA_STAT_CPBERR = (1 << 4),
159 NV_ADMA_STAT_SERROR = (1 << 5),
160 NV_ADMA_STAT_CMD_COMPLETE = (1 << 6),
161 NV_ADMA_STAT_IDLE = (1 << 8),
162 NV_ADMA_STAT_LEGACY = (1 << 9),
163 NV_ADMA_STAT_STOPPED = (1 << 10),
164 NV_ADMA_STAT_DONE = (1 << 12),
165 NV_ADMA_STAT_ERR = NV_ADMA_STAT_CPBERR |
2dcb407e 166 NV_ADMA_STAT_TIMEOUT,
fbbb262d
RH
167
168 /* port flags */
169 NV_ADMA_PORT_REGISTER_MODE = (1 << 0),
2dec7555 170 NV_ADMA_ATAPI_SETUP_COMPLETE = (1 << 1),
fbbb262d 171
f140f0f1
KL
172 /* MCP55 reg offset */
173 NV_CTL_MCP55 = 0x400,
174 NV_INT_STATUS_MCP55 = 0x440,
175 NV_INT_ENABLE_MCP55 = 0x444,
176 NV_NCQ_REG_MCP55 = 0x448,
177
178 /* MCP55 */
179 NV_INT_ALL_MCP55 = 0xffff,
180 NV_INT_PORT_SHIFT_MCP55 = 16, /* each port occupies 16 bits */
181 NV_INT_MASK_MCP55 = NV_INT_ALL_MCP55 & 0xfffd,
182
183 /* SWNCQ ENABLE BITS*/
184 NV_CTL_PRI_SWNCQ = 0x02,
185 NV_CTL_SEC_SWNCQ = 0x04,
186
187 /* SW NCQ status bits*/
188 NV_SWNCQ_IRQ_DEV = (1 << 0),
189 NV_SWNCQ_IRQ_PM = (1 << 1),
190 NV_SWNCQ_IRQ_ADDED = (1 << 2),
191 NV_SWNCQ_IRQ_REMOVED = (1 << 3),
192
193 NV_SWNCQ_IRQ_BACKOUT = (1 << 4),
194 NV_SWNCQ_IRQ_SDBFIS = (1 << 5),
195 NV_SWNCQ_IRQ_DHREGFIS = (1 << 6),
196 NV_SWNCQ_IRQ_DMASETUP = (1 << 7),
197
198 NV_SWNCQ_IRQ_HOTPLUG = NV_SWNCQ_IRQ_ADDED |
199 NV_SWNCQ_IRQ_REMOVED,
200
fbbb262d
RH
201};
202
203/* ADMA Physical Region Descriptor - one SG segment */
204struct nv_adma_prd {
205 __le64 addr;
206 __le32 len;
207 u8 flags;
208 u8 packet_len;
209 __le16 reserved;
210};
211
212enum nv_adma_regbits {
213 CMDEND = (1 << 15), /* end of command list */
214 WNB = (1 << 14), /* wait-not-BSY */
215 IGN = (1 << 13), /* ignore this entry */
216 CS1n = (1 << (4 + 8)), /* std. PATA signals follow... */
217 DA2 = (1 << (2 + 8)),
218 DA1 = (1 << (1 + 8)),
219 DA0 = (1 << (0 + 8)),
220};
221
222/* ADMA Command Parameter Block
223 The first 5 SG segments are stored inside the Command Parameter Block itself.
224 If there are more than 5 segments the remainder are stored in a separate
225 memory area indicated by next_aprd. */
226struct nv_adma_cpb {
227 u8 resp_flags; /* 0 */
228 u8 reserved1; /* 1 */
229 u8 ctl_flags; /* 2 */
230 /* len is length of taskfile in 64 bit words */
2dcb407e 231 u8 len; /* 3 */
fbbb262d
RH
232 u8 tag; /* 4 */
233 u8 next_cpb_idx; /* 5 */
234 __le16 reserved2; /* 6-7 */
235 __le16 tf[12]; /* 8-31 */
236 struct nv_adma_prd aprd[5]; /* 32-111 */
237 __le64 next_aprd; /* 112-119 */
238 __le64 reserved3; /* 120-127 */
10ad05df 239};
1da177e4 240
fbbb262d
RH
241
242struct nv_adma_port_priv {
243 struct nv_adma_cpb *cpb;
244 dma_addr_t cpb_dma;
245 struct nv_adma_prd *aprd;
246 dma_addr_t aprd_dma;
2dcb407e
JG
247 void __iomem *ctl_block;
248 void __iomem *gen_block;
249 void __iomem *notifier_clear_block;
8959d300 250 u64 adma_dma_mask;
fbbb262d 251 u8 flags;
5e5c74a5 252 int last_issue_ncq;
fbbb262d
RH
253};
254
cdf56bcf
RH
255struct nv_host_priv {
256 unsigned long type;
257};
258
f140f0f1
KL
259struct defer_queue {
260 u32 defer_bits;
261 unsigned int head;
262 unsigned int tail;
263 unsigned int tag[ATA_MAX_QUEUE];
264};
265
266enum ncq_saw_flag_list {
267 ncq_saw_d2h = (1U << 0),
268 ncq_saw_dmas = (1U << 1),
269 ncq_saw_sdb = (1U << 2),
270 ncq_saw_backout = (1U << 3),
271};
272
273struct nv_swncq_port_priv {
274 struct ata_prd *prd; /* our SG list */
275 dma_addr_t prd_dma; /* and its DMA mapping */
276 void __iomem *sactive_block;
277 void __iomem *irq_block;
278 void __iomem *tag_block;
279 u32 qc_active;
280
281 unsigned int last_issue_tag;
282
283 /* fifo circular queue to store deferral command */
284 struct defer_queue defer_queue;
285
286 /* for NCQ interrupt analysis */
287 u32 dhfis_bits;
288 u32 dmafis_bits;
289 u32 sdbfis_bits;
290
291 unsigned int ncq_flags;
292};
293
294
5796d1c4 295#define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & (1 << (19 + (12 * (PORT)))))
fbbb262d 296
2dcb407e 297static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
438ac6d5 298#ifdef CONFIG_PM
cdf56bcf 299static int nv_pci_device_resume(struct pci_dev *pdev);
438ac6d5 300#endif
cca3974e 301static void nv_ck804_host_stop(struct ata_host *host);
7d12e780
DH
302static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
303static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
304static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
82ef04fb
TH
305static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
306static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
1da177e4 307
e8caa3c7
TH
308static int nv_noclassify_hardreset(struct ata_link *link, unsigned int *class,
309 unsigned long deadline);
39f87582
TH
310static void nv_nf2_freeze(struct ata_port *ap);
311static void nv_nf2_thaw(struct ata_port *ap);
312static void nv_ck804_freeze(struct ata_port *ap);
313static void nv_ck804_thaw(struct ata_port *ap);
fbbb262d 314static int nv_adma_slave_config(struct scsi_device *sdev);
2dec7555 315static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
fbbb262d
RH
316static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
317static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
318static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
319static void nv_adma_irq_clear(struct ata_port *ap);
320static int nv_adma_port_start(struct ata_port *ap);
321static void nv_adma_port_stop(struct ata_port *ap);
438ac6d5 322#ifdef CONFIG_PM
cdf56bcf
RH
323static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
324static int nv_adma_port_resume(struct ata_port *ap);
438ac6d5 325#endif
53014e25
RH
326static void nv_adma_freeze(struct ata_port *ap);
327static void nv_adma_thaw(struct ata_port *ap);
fbbb262d
RH
328static void nv_adma_error_handler(struct ata_port *ap);
329static void nv_adma_host_stop(struct ata_host *host);
f5ecac2d 330static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
f2fb344b 331static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
39f87582 332
f140f0f1
KL
333static void nv_mcp55_thaw(struct ata_port *ap);
334static void nv_mcp55_freeze(struct ata_port *ap);
335static void nv_swncq_error_handler(struct ata_port *ap);
336static int nv_swncq_slave_config(struct scsi_device *sdev);
337static int nv_swncq_port_start(struct ata_port *ap);
338static void nv_swncq_qc_prep(struct ata_queued_cmd *qc);
339static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
340static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc);
341static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis);
342static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance);
343#ifdef CONFIG_PM
344static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg);
345static int nv_swncq_port_resume(struct ata_port *ap);
346#endif
347
1da177e4
LT
348enum nv_host_type
349{
350 GENERIC,
351 NFORCE2,
27e4b274 352 NFORCE3 = NFORCE2, /* NF2 == NF3 as far as sata_nv is concerned */
fbbb262d 353 CK804,
f140f0f1 354 ADMA,
2d775708 355 MCP5x,
f140f0f1 356 SWNCQ,
1da177e4
LT
357};
358
3b7d697d 359static const struct pci_device_id nv_pci_tbl[] = {
54bb3a94
JG
360 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
361 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
362 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
363 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
364 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
365 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
366 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
2d775708
TH
367 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), MCP5x },
368 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), MCP5x },
369 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), MCP5x },
370 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), MCP5x },
e2e031eb
KL
371 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
372 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
373 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
2d2744fc
JG
374
375 { } /* terminate list */
1da177e4
LT
376};
377
1da177e4
LT
378static struct pci_driver nv_pci_driver = {
379 .name = DRV_NAME,
380 .id_table = nv_pci_tbl,
381 .probe = nv_init_one,
438ac6d5 382#ifdef CONFIG_PM
cdf56bcf
RH
383 .suspend = ata_pci_device_suspend,
384 .resume = nv_pci_device_resume,
438ac6d5 385#endif
1daf9ce7 386 .remove = ata_pci_remove_one,
1da177e4
LT
387};
388
193515d5 389static struct scsi_host_template nv_sht = {
68d1d07b 390 ATA_BMDMA_SHT(DRV_NAME),
1da177e4
LT
391};
392
fbbb262d 393static struct scsi_host_template nv_adma_sht = {
68d1d07b 394 ATA_NCQ_SHT(DRV_NAME),
fbbb262d 395 .can_queue = NV_ADMA_MAX_CPBS,
fbbb262d 396 .sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN,
fbbb262d
RH
397 .dma_boundary = NV_ADMA_DMA_BOUNDARY,
398 .slave_configure = nv_adma_slave_config,
fbbb262d
RH
399};
400
f140f0f1 401static struct scsi_host_template nv_swncq_sht = {
68d1d07b 402 ATA_NCQ_SHT(DRV_NAME),
f140f0f1 403 .can_queue = ATA_MAX_QUEUE,
f140f0f1 404 .sg_tablesize = LIBATA_MAX_PRD,
f140f0f1
KL
405 .dma_boundary = ATA_DMA_BOUNDARY,
406 .slave_configure = nv_swncq_slave_config,
f140f0f1
KL
407};
408
4c1eb90a 409static struct ata_port_operations nv_common_ops = {
029cfd6b 410 .inherits = &ata_bmdma_port_ops,
1da177e4
LT
411 .scr_read = nv_scr_read,
412 .scr_write = nv_scr_write,
1da177e4
LT
413};
414
4c1eb90a
TH
415/* OSDL bz11195 reports that link doesn't come online after hardreset
416 * on generic nv's and there have been several other similar reports
417 * on linux-ide. Disable hardreset for generic nv's.
418 */
419static struct ata_port_operations nv_generic_ops = {
420 .inherits = &nv_common_ops,
421 .hardreset = ATA_OP_NULL,
422};
423
7dac745b
TH
424/* nf2 is ripe with hardreset related problems.
425 *
426 * kernel bz#3352 reports nf2/3 controllers can't determine device
427 * signature reliably. The following thread reports detection failure
428 * on cold boot with the standard debouncing timing.
3c324283
TH
429 *
430 * http://thread.gmane.org/gmane.linux.ide/34098
431 *
7dac745b
TH
432 * And bz#12176 reports that hardreset simply doesn't work on nf2.
433 * Give up on it and just don't do hardreset.
3c324283 434 */
029cfd6b 435static struct ata_port_operations nv_nf2_ops = {
7dac745b 436 .inherits = &nv_generic_ops,
39f87582
TH
437 .freeze = nv_nf2_freeze,
438 .thaw = nv_nf2_thaw,
ada364e8
TH
439};
440
8d993eaa
TH
441/* For initial probing after boot and hot plugging, hardreset mostly
442 * works fine on CK804 but curiously, reprobing on the initial port by
443 * rescanning or rmmod/insmod fails to acquire the initial D2H Reg FIS
444 * in somewhat undeterministic way. Use noclassify hardreset.
445 */
029cfd6b 446static struct ata_port_operations nv_ck804_ops = {
4c1eb90a 447 .inherits = &nv_common_ops,
39f87582
TH
448 .freeze = nv_ck804_freeze,
449 .thaw = nv_ck804_thaw,
8d993eaa 450 .hardreset = nv_noclassify_hardreset,
ada364e8
TH
451 .host_stop = nv_ck804_host_stop,
452};
453
029cfd6b 454static struct ata_port_operations nv_adma_ops = {
3c324283 455 .inherits = &nv_ck804_ops,
029cfd6b 456
2dec7555 457 .check_atapi_dma = nv_adma_check_atapi_dma,
5682ed33 458 .sff_tf_read = nv_adma_tf_read,
31cc23b3 459 .qc_defer = ata_std_qc_defer,
fbbb262d
RH
460 .qc_prep = nv_adma_qc_prep,
461 .qc_issue = nv_adma_qc_issue,
5682ed33 462 .sff_irq_clear = nv_adma_irq_clear,
029cfd6b 463
53014e25
RH
464 .freeze = nv_adma_freeze,
465 .thaw = nv_adma_thaw,
fbbb262d 466 .error_handler = nv_adma_error_handler,
f5ecac2d 467 .post_internal_cmd = nv_adma_post_internal_cmd,
029cfd6b 468
fbbb262d
RH
469 .port_start = nv_adma_port_start,
470 .port_stop = nv_adma_port_stop,
438ac6d5 471#ifdef CONFIG_PM
cdf56bcf
RH
472 .port_suspend = nv_adma_port_suspend,
473 .port_resume = nv_adma_port_resume,
438ac6d5 474#endif
fbbb262d
RH
475 .host_stop = nv_adma_host_stop,
476};
477
2d775708
TH
478/* Kernel bz#12351 reports that when SWNCQ is enabled, for hotplug to
479 * work, hardreset should be used and hardreset can't report proper
480 * signature, which suggests that mcp5x is closer to nf2 as long as
481 * reset quirkiness is concerned. Define separate ops for mcp5x with
482 * nv_noclassify_hardreset().
483 */
484static struct ata_port_operations nv_mcp5x_ops = {
485 .inherits = &nv_common_ops,
486 .hardreset = nv_noclassify_hardreset,
487};
488
029cfd6b 489static struct ata_port_operations nv_swncq_ops = {
2d775708 490 .inherits = &nv_mcp5x_ops,
029cfd6b 491
f140f0f1
KL
492 .qc_defer = ata_std_qc_defer,
493 .qc_prep = nv_swncq_qc_prep,
494 .qc_issue = nv_swncq_qc_issue,
029cfd6b 495
f140f0f1
KL
496 .freeze = nv_mcp55_freeze,
497 .thaw = nv_mcp55_thaw,
498 .error_handler = nv_swncq_error_handler,
029cfd6b 499
f140f0f1
KL
500#ifdef CONFIG_PM
501 .port_suspend = nv_swncq_port_suspend,
502 .port_resume = nv_swncq_port_resume,
503#endif
504 .port_start = nv_swncq_port_start,
505};
506
95947193
TH
507struct nv_pi_priv {
508 irq_handler_t irq_handler;
509 struct scsi_host_template *sht;
510};
511
512#define NV_PI_PRIV(_irq_handler, _sht) \
513 &(struct nv_pi_priv){ .irq_handler = _irq_handler, .sht = _sht }
514
1626aeb8 515static const struct ata_port_info nv_port_info[] = {
ada364e8
TH
516 /* generic */
517 {
0c88758b 518 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
ada364e8
TH
519 .pio_mask = NV_PIO_MASK,
520 .mwdma_mask = NV_MWDMA_MASK,
521 .udma_mask = NV_UDMA_MASK,
522 .port_ops = &nv_generic_ops,
95947193 523 .private_data = NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
ada364e8
TH
524 },
525 /* nforce2/3 */
526 {
0c88758b 527 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
ada364e8
TH
528 .pio_mask = NV_PIO_MASK,
529 .mwdma_mask = NV_MWDMA_MASK,
530 .udma_mask = NV_UDMA_MASK,
531 .port_ops = &nv_nf2_ops,
95947193 532 .private_data = NV_PI_PRIV(nv_nf2_interrupt, &nv_sht),
ada364e8
TH
533 },
534 /* ck804 */
535 {
0c88758b 536 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
ada364e8
TH
537 .pio_mask = NV_PIO_MASK,
538 .mwdma_mask = NV_MWDMA_MASK,
539 .udma_mask = NV_UDMA_MASK,
540 .port_ops = &nv_ck804_ops,
95947193 541 .private_data = NV_PI_PRIV(nv_ck804_interrupt, &nv_sht),
ada364e8 542 },
fbbb262d
RH
543 /* ADMA */
544 {
fbbb262d
RH
545 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
546 ATA_FLAG_MMIO | ATA_FLAG_NCQ,
547 .pio_mask = NV_PIO_MASK,
548 .mwdma_mask = NV_MWDMA_MASK,
549 .udma_mask = NV_UDMA_MASK,
550 .port_ops = &nv_adma_ops,
95947193 551 .private_data = NV_PI_PRIV(nv_adma_interrupt, &nv_adma_sht),
fbbb262d 552 },
2d775708
TH
553 /* MCP5x */
554 {
555 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
556 .pio_mask = NV_PIO_MASK,
557 .mwdma_mask = NV_MWDMA_MASK,
558 .udma_mask = NV_UDMA_MASK,
559 .port_ops = &nv_mcp5x_ops,
560 .private_data = NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
561 },
f140f0f1
KL
562 /* SWNCQ */
563 {
f140f0f1
KL
564 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
565 ATA_FLAG_NCQ,
f140f0f1
KL
566 .pio_mask = NV_PIO_MASK,
567 .mwdma_mask = NV_MWDMA_MASK,
568 .udma_mask = NV_UDMA_MASK,
569 .port_ops = &nv_swncq_ops,
95947193 570 .private_data = NV_PI_PRIV(nv_swncq_interrupt, &nv_swncq_sht),
f140f0f1 571 },
1da177e4
LT
572};
573
574MODULE_AUTHOR("NVIDIA");
575MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
576MODULE_LICENSE("GPL");
577MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
578MODULE_VERSION(DRV_VERSION);
579
06993d22 580static int adma_enabled;
d21279f4 581static int swncq_enabled = 1;
fbbb262d 582
2dec7555
RH
583static void nv_adma_register_mode(struct ata_port *ap)
584{
2dec7555 585 struct nv_adma_port_priv *pp = ap->private_data;
cdf56bcf 586 void __iomem *mmio = pp->ctl_block;
a2cfe81a
RH
587 u16 tmp, status;
588 int count = 0;
2dec7555
RH
589
590 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
591 return;
592
a2cfe81a 593 status = readw(mmio + NV_ADMA_STAT);
2dcb407e 594 while (!(status & NV_ADMA_STAT_IDLE) && count < 20) {
a2cfe81a
RH
595 ndelay(50);
596 status = readw(mmio + NV_ADMA_STAT);
597 count++;
598 }
2dcb407e 599 if (count == 20)
a2cfe81a
RH
600 ata_port_printk(ap, KERN_WARNING,
601 "timeout waiting for ADMA IDLE, stat=0x%hx\n",
602 status);
603
2dec7555
RH
604 tmp = readw(mmio + NV_ADMA_CTL);
605 writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
606
a2cfe81a
RH
607 count = 0;
608 status = readw(mmio + NV_ADMA_STAT);
2dcb407e 609 while (!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
a2cfe81a
RH
610 ndelay(50);
611 status = readw(mmio + NV_ADMA_STAT);
612 count++;
613 }
2dcb407e 614 if (count == 20)
a2cfe81a
RH
615 ata_port_printk(ap, KERN_WARNING,
616 "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
617 status);
618
2dec7555
RH
619 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
620}
621
622static void nv_adma_mode(struct ata_port *ap)
623{
2dec7555 624 struct nv_adma_port_priv *pp = ap->private_data;
cdf56bcf 625 void __iomem *mmio = pp->ctl_block;
a2cfe81a
RH
626 u16 tmp, status;
627 int count = 0;
2dec7555
RH
628
629 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
630 return;
f20b16ff 631
2dec7555
RH
632 WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
633
634 tmp = readw(mmio + NV_ADMA_CTL);
635 writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
636
a2cfe81a 637 status = readw(mmio + NV_ADMA_STAT);
2dcb407e 638 while (((status & NV_ADMA_STAT_LEGACY) ||
a2cfe81a
RH
639 !(status & NV_ADMA_STAT_IDLE)) && count < 20) {
640 ndelay(50);
641 status = readw(mmio + NV_ADMA_STAT);
642 count++;
643 }
2dcb407e 644 if (count == 20)
a2cfe81a
RH
645 ata_port_printk(ap, KERN_WARNING,
646 "timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
647 status);
648
2dec7555
RH
649 pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
650}
651
fbbb262d
RH
652static int nv_adma_slave_config(struct scsi_device *sdev)
653{
654 struct ata_port *ap = ata_shost_to_port(sdev->host);
2dec7555 655 struct nv_adma_port_priv *pp = ap->private_data;
8959d300
RH
656 struct nv_adma_port_priv *port0, *port1;
657 struct scsi_device *sdev0, *sdev1;
2dec7555 658 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
8959d300 659 unsigned long segment_boundary, flags;
fbbb262d
RH
660 unsigned short sg_tablesize;
661 int rc;
2dec7555
RH
662 int adma_enable;
663 u32 current_reg, new_reg, config_mask;
fbbb262d
RH
664
665 rc = ata_scsi_slave_config(sdev);
666
667 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
668 /* Not a proper libata device, ignore */
669 return rc;
670
8959d300
RH
671 spin_lock_irqsave(ap->lock, flags);
672
9af5c9c9 673 if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) {
fbbb262d
RH
674 /*
675 * NVIDIA reports that ADMA mode does not support ATAPI commands.
676 * Therefore ATAPI commands are sent through the legacy interface.
677 * However, the legacy interface only supports 32-bit DMA.
678 * Restrict DMA parameters as required by the legacy interface
679 * when an ATAPI device is connected.
680 */
fbbb262d
RH
681 segment_boundary = ATA_DMA_BOUNDARY;
682 /* Subtract 1 since an extra entry may be needed for padding, see
683 libata-scsi.c */
684 sg_tablesize = LIBATA_MAX_PRD - 1;
f20b16ff 685
2dec7555
RH
686 /* Since the legacy DMA engine is in use, we need to disable ADMA
687 on the port. */
688 adma_enable = 0;
689 nv_adma_register_mode(ap);
2dcb407e 690 } else {
fbbb262d
RH
691 segment_boundary = NV_ADMA_DMA_BOUNDARY;
692 sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
2dec7555 693 adma_enable = 1;
fbbb262d 694 }
f20b16ff 695
2dec7555
RH
696 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg);
697
2dcb407e 698 if (ap->port_no == 1)
2dec7555
RH
699 config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
700 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
701 else
702 config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
703 NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
f20b16ff 704
2dcb407e 705 if (adma_enable) {
2dec7555
RH
706 new_reg = current_reg | config_mask;
707 pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
2dcb407e 708 } else {
2dec7555
RH
709 new_reg = current_reg & ~config_mask;
710 pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
711 }
f20b16ff 712
2dcb407e 713 if (current_reg != new_reg)
2dec7555 714 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
f20b16ff 715
8959d300
RH
716 port0 = ap->host->ports[0]->private_data;
717 port1 = ap->host->ports[1]->private_data;
718 sdev0 = ap->host->ports[0]->link.device[0].sdev;
719 sdev1 = ap->host->ports[1]->link.device[0].sdev;
720 if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
721 (port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
722 /** We have to set the DMA mask to 32-bit if either port is in
723 ATAPI mode, since they are on the same PCI device which is
724 used for DMA mapping. If we set the mask we also need to set
725 the bounce limit on both ports to ensure that the block
726 layer doesn't feed addresses that cause DMA mapping to
727 choke. If either SCSI device is not allocated yet, it's OK
728 since that port will discover its correct setting when it
729 does get allocated.
730 Note: Setting 32-bit mask should not fail. */
731 if (sdev0)
732 blk_queue_bounce_limit(sdev0->request_queue,
733 ATA_DMA_MASK);
734 if (sdev1)
735 blk_queue_bounce_limit(sdev1->request_queue,
736 ATA_DMA_MASK);
737
738 pci_set_dma_mask(pdev, ATA_DMA_MASK);
739 } else {
740 /** This shouldn't fail as it was set to this value before */
741 pci_set_dma_mask(pdev, pp->adma_dma_mask);
742 if (sdev0)
743 blk_queue_bounce_limit(sdev0->request_queue,
744 pp->adma_dma_mask);
745 if (sdev1)
746 blk_queue_bounce_limit(sdev1->request_queue,
747 pp->adma_dma_mask);
748 }
749
fbbb262d
RH
750 blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
751 blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize);
752 ata_port_printk(ap, KERN_INFO,
8959d300
RH
753 "DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
754 (unsigned long long)*ap->host->dev->dma_mask,
755 segment_boundary, sg_tablesize);
756
757 spin_unlock_irqrestore(ap->lock, flags);
758
fbbb262d
RH
759 return rc;
760}
761
2dec7555
RH
762static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
763{
764 struct nv_adma_port_priv *pp = qc->ap->private_data;
765 return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
766}
767
f2fb344b
RH
768static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
769{
3f3debdb
RH
770 /* Other than when internal or pass-through commands are executed,
771 the only time this function will be called in ADMA mode will be
772 if a command fails. In the failure case we don't care about going
773 into register mode with ADMA commands pending, as the commands will
774 all shortly be aborted anyway. We assume that NCQ commands are not
775 issued via passthrough, which is the only way that switching into
776 ADMA mode could abort outstanding commands. */
f2fb344b
RH
777 nv_adma_register_mode(ap);
778
9363c382 779 ata_sff_tf_read(ap, tf);
f2fb344b
RH
780}
781
2dec7555 782static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
fbbb262d
RH
783{
784 unsigned int idx = 0;
785
2dcb407e 786 if (tf->flags & ATA_TFLAG_ISADDR) {
ac3d6b86
RH
787 if (tf->flags & ATA_TFLAG_LBA48) {
788 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->hob_feature | WNB);
789 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
790 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->hob_lbal);
791 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->hob_lbam);
792 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->hob_lbah);
793 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature);
794 } else
795 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature | WNB);
a84471fe 796
ac3d6b86
RH
797 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->nsect);
798 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->lbal);
799 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->lbam);
800 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->lbah);
fbbb262d 801 }
a84471fe 802
2dcb407e 803 if (tf->flags & ATA_TFLAG_DEVICE)
ac3d6b86 804 cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device);
fbbb262d
RH
805
806 cpb[idx++] = cpu_to_le16((ATA_REG_CMD << 8) | tf->command | CMDEND);
a84471fe 807
2dcb407e 808 while (idx < 12)
ac3d6b86 809 cpb[idx++] = cpu_to_le16(IGN);
fbbb262d
RH
810
811 return idx;
812}
813
5bd28a4b 814static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
fbbb262d
RH
815{
816 struct nv_adma_port_priv *pp = ap->private_data;
2dec7555 817 u8 flags = pp->cpb[cpb_num].resp_flags;
fbbb262d
RH
818
819 VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
820
5bd28a4b
RH
821 if (unlikely((force_err ||
822 flags & (NV_CPB_RESP_ATA_ERR |
823 NV_CPB_RESP_CMD_ERR |
824 NV_CPB_RESP_CPB_ERR)))) {
9af5c9c9 825 struct ata_eh_info *ehi = &ap->link.eh_info;
5bd28a4b
RH
826 int freeze = 0;
827
828 ata_ehi_clear_desc(ehi);
2dcb407e 829 __ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x: ", flags);
5bd28a4b 830 if (flags & NV_CPB_RESP_ATA_ERR) {
b64bbc39 831 ata_ehi_push_desc(ehi, "ATA error");
5bd28a4b
RH
832 ehi->err_mask |= AC_ERR_DEV;
833 } else if (flags & NV_CPB_RESP_CMD_ERR) {
b64bbc39 834 ata_ehi_push_desc(ehi, "CMD error");
5bd28a4b
RH
835 ehi->err_mask |= AC_ERR_DEV;
836 } else if (flags & NV_CPB_RESP_CPB_ERR) {
b64bbc39 837 ata_ehi_push_desc(ehi, "CPB error");
5bd28a4b
RH
838 ehi->err_mask |= AC_ERR_SYSTEM;
839 freeze = 1;
840 } else {
841 /* notifier error, but no error in CPB flags? */
b64bbc39 842 ata_ehi_push_desc(ehi, "unknown");
5bd28a4b
RH
843 ehi->err_mask |= AC_ERR_OTHER;
844 freeze = 1;
845 }
846 /* Kill all commands. EH will determine what actually failed. */
847 if (freeze)
848 ata_port_freeze(ap);
849 else
850 ata_port_abort(ap);
851 return 1;
fbbb262d 852 }
5bd28a4b 853
f2fb344b 854 if (likely(flags & NV_CPB_RESP_DONE)) {
fbbb262d 855 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num);
5bd28a4b
RH
856 VPRINTK("CPB flags done, flags=0x%x\n", flags);
857 if (likely(qc)) {
2dcb407e 858 DPRINTK("Completing qc from tag %d\n", cpb_num);
fbbb262d 859 ata_qc_complete(qc);
2a54cf76 860 } else {
9af5c9c9 861 struct ata_eh_info *ehi = &ap->link.eh_info;
2a54cf76
RH
862 /* Notifier bits set without a command may indicate the drive
863 is misbehaving. Raise host state machine violation on this
864 condition. */
5796d1c4
JG
865 ata_port_printk(ap, KERN_ERR,
866 "notifier for tag %d with no cmd?\n",
867 cpb_num);
2a54cf76 868 ehi->err_mask |= AC_ERR_HSM;
cf480626 869 ehi->action |= ATA_EH_RESET;
2a54cf76
RH
870 ata_port_freeze(ap);
871 return 1;
fbbb262d
RH
872 }
873 }
5bd28a4b 874 return 0;
fbbb262d
RH
875}
876
2dec7555
RH
877static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
878{
9af5c9c9 879 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
2dec7555
RH
880
881 /* freeze if hotplugged */
882 if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
883 ata_port_freeze(ap);
884 return 1;
885 }
886
887 /* bail out if not our interrupt */
888 if (!(irq_stat & NV_INT_DEV))
889 return 0;
890
891 /* DEV interrupt w/ no active qc? */
892 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
9363c382 893 ata_sff_check_status(ap);
2dec7555
RH
894 return 1;
895 }
896
897 /* handle interrupt */
9363c382 898 return ata_sff_host_intr(ap, qc);
2dec7555
RH
899}
900
fbbb262d
RH
901static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
902{
903 struct ata_host *host = dev_instance;
904 int i, handled = 0;
2dec7555 905 u32 notifier_clears[2];
fbbb262d
RH
906
907 spin_lock(&host->lock);
908
909 for (i = 0; i < host->n_ports; i++) {
910 struct ata_port *ap = host->ports[i];
2dec7555 911 notifier_clears[i] = 0;
fbbb262d
RH
912
913 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
914 struct nv_adma_port_priv *pp = ap->private_data;
cdf56bcf 915 void __iomem *mmio = pp->ctl_block;
fbbb262d
RH
916 u16 status;
917 u32 gen_ctl;
fbbb262d 918 u32 notifier, notifier_error;
a617c09f 919
53014e25
RH
920 /* if ADMA is disabled, use standard ata interrupt handler */
921 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
922 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
923 >> (NV_INT_PORT_SHIFT * i);
924 handled += nv_host_intr(ap, irq_stat);
925 continue;
926 }
fbbb262d 927
53014e25 928 /* if in ATA register mode, check for standard interrupts */
fbbb262d 929 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
0d5ff566 930 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
2dec7555 931 >> (NV_INT_PORT_SHIFT * i);
2dcb407e 932 if (ata_tag_valid(ap->link.active_tag))
f740d168
RH
933 /** NV_INT_DEV indication seems unreliable at times
934 at least in ADMA mode. Force it on always when a
935 command is active, to prevent losing interrupts. */
936 irq_stat |= NV_INT_DEV;
2dec7555 937 handled += nv_host_intr(ap, irq_stat);
fbbb262d
RH
938 }
939
940 notifier = readl(mmio + NV_ADMA_NOTIFIER);
941 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
2dec7555 942 notifier_clears[i] = notifier | notifier_error;
fbbb262d 943
cdf56bcf 944 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
fbbb262d 945
2dcb407e 946 if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
fbbb262d
RH
947 !notifier_error)
948 /* Nothing to do */
949 continue;
950
951 status = readw(mmio + NV_ADMA_STAT);
952
953 /* Clear status. Ensure the controller sees the clearing before we start
954 looking at any of the CPB statuses, so that any CPB completions after
955 this point in the handler will raise another interrupt. */
956 writew(status, mmio + NV_ADMA_STAT);
957 readw(mmio + NV_ADMA_STAT); /* flush posted write */
958 rmb();
959
5bd28a4b
RH
960 handled++; /* irq handled if we got here */
961
962 /* freeze if hotplugged or controller error */
963 if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
964 NV_ADMA_STAT_HOTUNPLUG |
5278b50c
RH
965 NV_ADMA_STAT_TIMEOUT |
966 NV_ADMA_STAT_SERROR))) {
9af5c9c9 967 struct ata_eh_info *ehi = &ap->link.eh_info;
5bd28a4b
RH
968
969 ata_ehi_clear_desc(ehi);
2dcb407e 970 __ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status);
5bd28a4b
RH
971 if (status & NV_ADMA_STAT_TIMEOUT) {
972 ehi->err_mask |= AC_ERR_SYSTEM;
b64bbc39 973 ata_ehi_push_desc(ehi, "timeout");
5bd28a4b
RH
974 } else if (status & NV_ADMA_STAT_HOTPLUG) {
975 ata_ehi_hotplugged(ehi);
b64bbc39 976 ata_ehi_push_desc(ehi, "hotplug");
5bd28a4b
RH
977 } else if (status & NV_ADMA_STAT_HOTUNPLUG) {
978 ata_ehi_hotplugged(ehi);
b64bbc39 979 ata_ehi_push_desc(ehi, "hot unplug");
5278b50c
RH
980 } else if (status & NV_ADMA_STAT_SERROR) {
981 /* let libata analyze SError and figure out the cause */
b64bbc39
TH
982 ata_ehi_push_desc(ehi, "SError");
983 } else
984 ata_ehi_push_desc(ehi, "unknown");
fbbb262d 985 ata_port_freeze(ap);
fbbb262d
RH
986 continue;
987 }
988
5bd28a4b 989 if (status & (NV_ADMA_STAT_DONE |
a1fe7824
RH
990 NV_ADMA_STAT_CPBERR |
991 NV_ADMA_STAT_CMD_COMPLETE)) {
992 u32 check_commands = notifier_clears[i];
721449bf 993 int pos, error = 0;
8ba5e4cb 994
a1fe7824
RH
995 if (status & NV_ADMA_STAT_CPBERR) {
996 /* Check all active commands */
997 if (ata_tag_valid(ap->link.active_tag))
998 check_commands = 1 <<
999 ap->link.active_tag;
1000 else
1001 check_commands = ap->
1002 link.sactive;
1003 }
8ba5e4cb 1004
fbbb262d 1005 /** Check CPBs for completed commands */
721449bf
RH
1006 while ((pos = ffs(check_commands)) && !error) {
1007 pos--;
1008 error = nv_adma_check_cpb(ap, pos,
5796d1c4
JG
1009 notifier_error & (1 << pos));
1010 check_commands &= ~(1 << pos);
fbbb262d
RH
1011 }
1012 }
fbbb262d
RH
1013 }
1014 }
f20b16ff 1015
b447916e 1016 if (notifier_clears[0] || notifier_clears[1]) {
2dec7555
RH
1017 /* Note: Both notifier clear registers must be written
1018 if either is set, even if one is zero, according to NVIDIA. */
cdf56bcf
RH
1019 struct nv_adma_port_priv *pp = host->ports[0]->private_data;
1020 writel(notifier_clears[0], pp->notifier_clear_block);
1021 pp = host->ports[1]->private_data;
1022 writel(notifier_clears[1], pp->notifier_clear_block);
2dec7555 1023 }
fbbb262d
RH
1024
1025 spin_unlock(&host->lock);
1026
1027 return IRQ_RETVAL(handled);
1028}
1029
53014e25
RH
1030static void nv_adma_freeze(struct ata_port *ap)
1031{
1032 struct nv_adma_port_priv *pp = ap->private_data;
1033 void __iomem *mmio = pp->ctl_block;
1034 u16 tmp;
1035
1036 nv_ck804_freeze(ap);
1037
1038 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1039 return;
1040
1041 /* clear any outstanding CK804 notifications */
2dcb407e 1042 writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
53014e25
RH
1043 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1044
1045 /* Disable interrupt */
1046 tmp = readw(mmio + NV_ADMA_CTL);
2dcb407e 1047 writew(tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
53014e25 1048 mmio + NV_ADMA_CTL);
5796d1c4 1049 readw(mmio + NV_ADMA_CTL); /* flush posted write */
53014e25
RH
1050}
1051
1052static void nv_adma_thaw(struct ata_port *ap)
1053{
1054 struct nv_adma_port_priv *pp = ap->private_data;
1055 void __iomem *mmio = pp->ctl_block;
1056 u16 tmp;
1057
1058 nv_ck804_thaw(ap);
1059
1060 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1061 return;
1062
1063 /* Enable interrupt */
1064 tmp = readw(mmio + NV_ADMA_CTL);
2dcb407e 1065 writew(tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
53014e25 1066 mmio + NV_ADMA_CTL);
5796d1c4 1067 readw(mmio + NV_ADMA_CTL); /* flush posted write */
53014e25
RH
1068}
1069
fbbb262d
RH
1070static void nv_adma_irq_clear(struct ata_port *ap)
1071{
cdf56bcf
RH
1072 struct nv_adma_port_priv *pp = ap->private_data;
1073 void __iomem *mmio = pp->ctl_block;
53014e25 1074 u32 notifier_clears[2];
fbbb262d 1075
53014e25 1076 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
9363c382 1077 ata_sff_irq_clear(ap);
53014e25
RH
1078 return;
1079 }
1080
1081 /* clear any outstanding CK804 notifications */
2dcb407e 1082 writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
53014e25 1083 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
fbbb262d 1084
53014e25
RH
1085 /* clear ADMA status */
1086 writew(0xffff, mmio + NV_ADMA_STAT);
a617c09f 1087
53014e25
RH
1088 /* clear notifiers - note both ports need to be written with
1089 something even though we are only clearing on one */
1090 if (ap->port_no == 0) {
1091 notifier_clears[0] = 0xFFFFFFFF;
1092 notifier_clears[1] = 0;
1093 } else {
1094 notifier_clears[0] = 0;
1095 notifier_clears[1] = 0xFFFFFFFF;
1096 }
1097 pp = ap->host->ports[0]->private_data;
1098 writel(notifier_clears[0], pp->notifier_clear_block);
1099 pp = ap->host->ports[1]->private_data;
1100 writel(notifier_clears[1], pp->notifier_clear_block);
fbbb262d
RH
1101}
1102
f5ecac2d 1103static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
fbbb262d 1104{
f5ecac2d 1105 struct nv_adma_port_priv *pp = qc->ap->private_data;
fbbb262d 1106
b447916e 1107 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
9363c382 1108 ata_sff_post_internal_cmd(qc);
fbbb262d
RH
1109}
1110
1111static int nv_adma_port_start(struct ata_port *ap)
1112{
1113 struct device *dev = ap->host->dev;
1114 struct nv_adma_port_priv *pp;
1115 int rc;
1116 void *mem;
1117 dma_addr_t mem_dma;
cdf56bcf 1118 void __iomem *mmio;
8959d300 1119 struct pci_dev *pdev = to_pci_dev(dev);
fbbb262d
RH
1120 u16 tmp;
1121
1122 VPRINTK("ENTER\n");
1123
8959d300
RH
1124 /* Ensure DMA mask is set to 32-bit before allocating legacy PRD and
1125 pad buffers */
1126 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1127 if (rc)
1128 return rc;
1129 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1130 if (rc)
1131 return rc;
1132
fbbb262d
RH
1133 rc = ata_port_start(ap);
1134 if (rc)
1135 return rc;
1136
24dc5f33
TH
1137 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1138 if (!pp)
1139 return -ENOMEM;
fbbb262d 1140
0d5ff566 1141 mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
cdf56bcf
RH
1142 ap->port_no * NV_ADMA_PORT_SIZE;
1143 pp->ctl_block = mmio;
0d5ff566 1144 pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
cdf56bcf
RH
1145 pp->notifier_clear_block = pp->gen_block +
1146 NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
1147
8959d300
RH
1148 /* Now that the legacy PRD and padding buffer are allocated we can
1149 safely raise the DMA mask to allocate the CPB/APRD table.
1150 These are allowed to fail since we store the value that ends up
1151 being used to set as the bounce limit in slave_config later if
1152 needed. */
1153 pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1154 pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1155 pp->adma_dma_mask = *dev->dma_mask;
1156
24dc5f33
TH
1157 mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
1158 &mem_dma, GFP_KERNEL);
1159 if (!mem)
1160 return -ENOMEM;
fbbb262d
RH
1161 memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
1162
1163 /*
1164 * First item in chunk of DMA memory:
1165 * 128-byte command parameter block (CPB)
1166 * one for each command tag
1167 */
1168 pp->cpb = mem;
1169 pp->cpb_dma = mem_dma;
1170
1171 writel(mem_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
5796d1c4 1172 writel((mem_dma >> 16) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
fbbb262d
RH
1173
1174 mem += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1175 mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1176
1177 /*
1178 * Second item: block of ADMA_SGTBL_LEN s/g entries
1179 */
1180 pp->aprd = mem;
1181 pp->aprd_dma = mem_dma;
1182
1183 ap->private_data = pp;
1184
1185 /* clear any outstanding interrupt conditions */
1186 writew(0xffff, mmio + NV_ADMA_STAT);
1187
1188 /* initialize port variables */
1189 pp->flags = NV_ADMA_PORT_REGISTER_MODE;
1190
1191 /* clear CPB fetch count */
1192 writew(0, mmio + NV_ADMA_CPB_COUNT);
1193
cdf56bcf 1194 /* clear GO for register mode, enable interrupt */
fbbb262d 1195 tmp = readw(mmio + NV_ADMA_CTL);
5796d1c4
JG
1196 writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1197 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
fbbb262d
RH
1198
1199 tmp = readw(mmio + NV_ADMA_CTL);
1200 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
5796d1c4 1201 readw(mmio + NV_ADMA_CTL); /* flush posted write */
fbbb262d
RH
1202 udelay(1);
1203 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
5796d1c4 1204 readw(mmio + NV_ADMA_CTL); /* flush posted write */
fbbb262d
RH
1205
1206 return 0;
fbbb262d
RH
1207}
1208
1209static void nv_adma_port_stop(struct ata_port *ap)
1210{
fbbb262d 1211 struct nv_adma_port_priv *pp = ap->private_data;
cdf56bcf 1212 void __iomem *mmio = pp->ctl_block;
fbbb262d
RH
1213
1214 VPRINTK("ENTER\n");
fbbb262d 1215 writew(0, mmio + NV_ADMA_CTL);
fbbb262d
RH
1216}
1217
438ac6d5 1218#ifdef CONFIG_PM
cdf56bcf
RH
1219static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
1220{
1221 struct nv_adma_port_priv *pp = ap->private_data;
1222 void __iomem *mmio = pp->ctl_block;
1223
1224 /* Go to register mode - clears GO */
1225 nv_adma_register_mode(ap);
1226
1227 /* clear CPB fetch count */
1228 writew(0, mmio + NV_ADMA_CPB_COUNT);
1229
1230 /* disable interrupt, shut down port */
1231 writew(0, mmio + NV_ADMA_CTL);
1232
1233 return 0;
1234}
1235
1236static int nv_adma_port_resume(struct ata_port *ap)
1237{
1238 struct nv_adma_port_priv *pp = ap->private_data;
1239 void __iomem *mmio = pp->ctl_block;
1240 u16 tmp;
1241
1242 /* set CPB block location */
1243 writel(pp->cpb_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
5796d1c4 1244 writel((pp->cpb_dma >> 16) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
cdf56bcf
RH
1245
1246 /* clear any outstanding interrupt conditions */
1247 writew(0xffff, mmio + NV_ADMA_STAT);
1248
1249 /* initialize port variables */
1250 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
1251
1252 /* clear CPB fetch count */
1253 writew(0, mmio + NV_ADMA_CPB_COUNT);
1254
1255 /* clear GO for register mode, enable interrupt */
1256 tmp = readw(mmio + NV_ADMA_CTL);
5796d1c4
JG
1257 writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1258 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
cdf56bcf
RH
1259
1260 tmp = readw(mmio + NV_ADMA_CTL);
1261 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
5796d1c4 1262 readw(mmio + NV_ADMA_CTL); /* flush posted write */
cdf56bcf
RH
1263 udelay(1);
1264 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
5796d1c4 1265 readw(mmio + NV_ADMA_CTL); /* flush posted write */
cdf56bcf
RH
1266
1267 return 0;
1268}
438ac6d5 1269#endif
fbbb262d 1270
9a829ccf 1271static void nv_adma_setup_port(struct ata_port *ap)
fbbb262d 1272{
9a829ccf
TH
1273 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1274 struct ata_ioports *ioport = &ap->ioaddr;
fbbb262d
RH
1275
1276 VPRINTK("ENTER\n");
1277
9a829ccf 1278 mmio += NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE;
fbbb262d 1279
0d5ff566
TH
1280 ioport->cmd_addr = mmio;
1281 ioport->data_addr = mmio + (ATA_REG_DATA * 4);
fbbb262d 1282 ioport->error_addr =
0d5ff566
TH
1283 ioport->feature_addr = mmio + (ATA_REG_ERR * 4);
1284 ioport->nsect_addr = mmio + (ATA_REG_NSECT * 4);
1285 ioport->lbal_addr = mmio + (ATA_REG_LBAL * 4);
1286 ioport->lbam_addr = mmio + (ATA_REG_LBAM * 4);
1287 ioport->lbah_addr = mmio + (ATA_REG_LBAH * 4);
1288 ioport->device_addr = mmio + (ATA_REG_DEVICE * 4);
fbbb262d 1289 ioport->status_addr =
0d5ff566 1290 ioport->command_addr = mmio + (ATA_REG_STATUS * 4);
fbbb262d 1291 ioport->altstatus_addr =
0d5ff566 1292 ioport->ctl_addr = mmio + 0x20;
fbbb262d
RH
1293}
1294
9a829ccf 1295static int nv_adma_host_init(struct ata_host *host)
fbbb262d 1296{
9a829ccf 1297 struct pci_dev *pdev = to_pci_dev(host->dev);
fbbb262d
RH
1298 unsigned int i;
1299 u32 tmp32;
1300
1301 VPRINTK("ENTER\n");
1302
1303 /* enable ADMA on the ports */
1304 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1305 tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1306 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1307 NV_MCP_SATA_CFG_20_PORT1_EN |
1308 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1309
1310 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1311
9a829ccf
TH
1312 for (i = 0; i < host->n_ports; i++)
1313 nv_adma_setup_port(host->ports[i]);
fbbb262d 1314
fbbb262d
RH
1315 return 0;
1316}
1317
1318static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1319 struct scatterlist *sg,
1320 int idx,
1321 struct nv_adma_prd *aprd)
1322{
41949ed5 1323 u8 flags = 0;
fbbb262d
RH
1324 if (qc->tf.flags & ATA_TFLAG_WRITE)
1325 flags |= NV_APRD_WRITE;
1326 if (idx == qc->n_elem - 1)
1327 flags |= NV_APRD_END;
1328 else if (idx != 4)
1329 flags |= NV_APRD_CONT;
1330
1331 aprd->addr = cpu_to_le64(((u64)sg_dma_address(sg)));
1332 aprd->len = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
2dec7555 1333 aprd->flags = flags;
41949ed5 1334 aprd->packet_len = 0;
fbbb262d
RH
1335}
1336
1337static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1338{
1339 struct nv_adma_port_priv *pp = qc->ap->private_data;
fbbb262d
RH
1340 struct nv_adma_prd *aprd;
1341 struct scatterlist *sg;
ff2aeb1e 1342 unsigned int si;
fbbb262d
RH
1343
1344 VPRINTK("ENTER\n");
1345
ff2aeb1e
TH
1346 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1347 aprd = (si < 5) ? &cpb->aprd[si] :
1348 &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (si-5)];
1349 nv_adma_fill_aprd(qc, sg, si, aprd);
fbbb262d 1350 }
ff2aeb1e 1351 if (si > 5)
fbbb262d 1352 cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));
41949ed5
RH
1353 else
1354 cpb->next_aprd = cpu_to_le64(0);
fbbb262d
RH
1355}
1356
382a6652
RH
1357static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
1358{
1359 struct nv_adma_port_priv *pp = qc->ap->private_data;
1360
1361 /* ADMA engine can only be used for non-ATAPI DMA commands,
3f3debdb 1362 or interrupt-driven no-data commands. */
b447916e 1363 if ((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
3f3debdb 1364 (qc->tf.flags & ATA_TFLAG_POLLING))
382a6652
RH
1365 return 1;
1366
b447916e 1367 if ((qc->flags & ATA_QCFLAG_DMAMAP) ||
382a6652
RH
1368 (qc->tf.protocol == ATA_PROT_NODATA))
1369 return 0;
1370
1371 return 1;
1372}
1373
fbbb262d
RH
1374static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1375{
1376 struct nv_adma_port_priv *pp = qc->ap->private_data;
1377 struct nv_adma_cpb *cpb = &pp->cpb[qc->tag];
1378 u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
fbbb262d
RH
1379 NV_CPB_CTL_IEN;
1380
382a6652 1381 if (nv_adma_use_reg_mode(qc)) {
3f3debdb
RH
1382 BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1383 (qc->flags & ATA_QCFLAG_DMAMAP));
2dec7555 1384 nv_adma_register_mode(qc->ap);
9363c382 1385 ata_sff_qc_prep(qc);
fbbb262d
RH
1386 return;
1387 }
1388
41949ed5
RH
1389 cpb->resp_flags = NV_CPB_RESP_DONE;
1390 wmb();
1391 cpb->ctl_flags = 0;
1392 wmb();
fbbb262d
RH
1393
1394 cpb->len = 3;
1395 cpb->tag = qc->tag;
1396 cpb->next_cpb_idx = 0;
1397
1398 /* turn on NCQ flags for NCQ commands */
1399 if (qc->tf.protocol == ATA_PROT_NCQ)
1400 ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1401
cdf56bcf
RH
1402 VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1403
fbbb262d
RH
1404 nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1405
b447916e 1406 if (qc->flags & ATA_QCFLAG_DMAMAP) {
382a6652
RH
1407 nv_adma_fill_sg(qc, cpb);
1408 ctl_flags |= NV_CPB_CTL_APRD_VALID;
1409 } else
1410 memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
fbbb262d 1411
5796d1c4
JG
1412 /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID
1413 until we are finished filling in all of the contents */
fbbb262d
RH
1414 wmb();
1415 cpb->ctl_flags = ctl_flags;
41949ed5
RH
1416 wmb();
1417 cpb->resp_flags = 0;
fbbb262d
RH
1418}
1419
1420static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1421{
2dec7555 1422 struct nv_adma_port_priv *pp = qc->ap->private_data;
cdf56bcf 1423 void __iomem *mmio = pp->ctl_block;
5e5c74a5 1424 int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ);
fbbb262d
RH
1425
1426 VPRINTK("ENTER\n");
1427
3f3debdb
RH
1428 /* We can't handle result taskfile with NCQ commands, since
1429 retrieving the taskfile switches us out of ADMA mode and would abort
1430 existing commands. */
1431 if (unlikely(qc->tf.protocol == ATA_PROT_NCQ &&
1432 (qc->flags & ATA_QCFLAG_RESULT_TF))) {
1433 ata_dev_printk(qc->dev, KERN_ERR,
1434 "NCQ w/ RESULT_TF not allowed\n");
1435 return AC_ERR_SYSTEM;
1436 }
1437
382a6652 1438 if (nv_adma_use_reg_mode(qc)) {
fbbb262d 1439 /* use ATA register mode */
382a6652 1440 VPRINTK("using ATA register mode: 0x%lx\n", qc->flags);
3f3debdb
RH
1441 BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1442 (qc->flags & ATA_QCFLAG_DMAMAP));
fbbb262d 1443 nv_adma_register_mode(qc->ap);
9363c382 1444 return ata_sff_qc_issue(qc);
fbbb262d
RH
1445 } else
1446 nv_adma_mode(qc->ap);
1447
1448 /* write append register, command tag in lower 8 bits
1449 and (number of cpbs to append -1) in top 8 bits */
1450 wmb();
5e5c74a5 1451
b447916e 1452 if (curr_ncq != pp->last_issue_ncq) {
5796d1c4
JG
1453 /* Seems to need some delay before switching between NCQ and
1454 non-NCQ commands, else we get command timeouts and such. */
5e5c74a5
RH
1455 udelay(20);
1456 pp->last_issue_ncq = curr_ncq;
1457 }
1458
fbbb262d
RH
1459 writew(qc->tag, mmio + NV_ADMA_APPEND);
1460
5796d1c4 1461 DPRINTK("Issued tag %u\n", qc->tag);
fbbb262d
RH
1462
1463 return 0;
1464}
1465
7d12e780 1466static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1da177e4 1467{
cca3974e 1468 struct ata_host *host = dev_instance;
1da177e4
LT
1469 unsigned int i;
1470 unsigned int handled = 0;
1471 unsigned long flags;
1472
cca3974e 1473 spin_lock_irqsave(&host->lock, flags);
1da177e4 1474
cca3974e 1475 for (i = 0; i < host->n_ports; i++) {
1da177e4
LT
1476 struct ata_port *ap;
1477
cca3974e 1478 ap = host->ports[i];
c1389503 1479 if (ap &&
029f5468 1480 !(ap->flags & ATA_FLAG_DISABLED)) {
1da177e4
LT
1481 struct ata_queued_cmd *qc;
1482
9af5c9c9 1483 qc = ata_qc_from_tag(ap, ap->link.active_tag);
e50362ec 1484 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
9363c382 1485 handled += ata_sff_host_intr(ap, qc);
b887030a
AC
1486 else
1487 // No request pending? Clear interrupt status
1488 // anyway, in case there's one pending.
5682ed33 1489 ap->ops->sff_check_status(ap);
1da177e4
LT
1490 }
1491
1492 }
1493
cca3974e 1494 spin_unlock_irqrestore(&host->lock, flags);
1da177e4
LT
1495
1496 return IRQ_RETVAL(handled);
1497}
1498
cca3974e 1499static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
ada364e8
TH
1500{
1501 int i, handled = 0;
1502
cca3974e
JG
1503 for (i = 0; i < host->n_ports; i++) {
1504 struct ata_port *ap = host->ports[i];
ada364e8
TH
1505
1506 if (ap && !(ap->flags & ATA_FLAG_DISABLED))
1507 handled += nv_host_intr(ap, irq_stat);
1508
1509 irq_stat >>= NV_INT_PORT_SHIFT;
1510 }
1511
1512 return IRQ_RETVAL(handled);
1513}
1514
7d12e780 1515static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
ada364e8 1516{
cca3974e 1517 struct ata_host *host = dev_instance;
ada364e8
TH
1518 u8 irq_stat;
1519 irqreturn_t ret;
1520
cca3974e 1521 spin_lock(&host->lock);
0d5ff566 1522 irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
cca3974e
JG
1523 ret = nv_do_interrupt(host, irq_stat);
1524 spin_unlock(&host->lock);
ada364e8
TH
1525
1526 return ret;
1527}
1528
7d12e780 1529static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
ada364e8 1530{
cca3974e 1531 struct ata_host *host = dev_instance;
ada364e8
TH
1532 u8 irq_stat;
1533 irqreturn_t ret;
1534
cca3974e 1535 spin_lock(&host->lock);
0d5ff566 1536 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
cca3974e
JG
1537 ret = nv_do_interrupt(host, irq_stat);
1538 spin_unlock(&host->lock);
ada364e8
TH
1539
1540 return ret;
1541}
1542
82ef04fb 1543static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
1da177e4 1544{
1da177e4 1545 if (sc_reg > SCR_CONTROL)
da3dbb17 1546 return -EINVAL;
1da177e4 1547
82ef04fb 1548 *val = ioread32(link->ap->ioaddr.scr_addr + (sc_reg * 4));
da3dbb17 1549 return 0;
1da177e4
LT
1550}
1551
82ef04fb 1552static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
1da177e4 1553{
1da177e4 1554 if (sc_reg > SCR_CONTROL)
da3dbb17 1555 return -EINVAL;
1da177e4 1556
82ef04fb 1557 iowrite32(val, link->ap->ioaddr.scr_addr + (sc_reg * 4));
da3dbb17 1558 return 0;
1da177e4
LT
1559}
1560
e8caa3c7
TH
1561static int nv_noclassify_hardreset(struct ata_link *link, unsigned int *class,
1562 unsigned long deadline)
1563{
1564 bool online;
1565 int rc;
1566
1567 rc = sata_link_hardreset(link, sata_deb_timing_hotplug, deadline,
1568 &online, NULL);
1569 return online ? -EAGAIN : rc;
1570}
1571
39f87582
TH
1572static void nv_nf2_freeze(struct ata_port *ap)
1573{
0d5ff566 1574 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
39f87582
TH
1575 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1576 u8 mask;
1577
0d5ff566 1578 mask = ioread8(scr_addr + NV_INT_ENABLE);
39f87582 1579 mask &= ~(NV_INT_ALL << shift);
0d5ff566 1580 iowrite8(mask, scr_addr + NV_INT_ENABLE);
39f87582
TH
1581}
1582
1583static void nv_nf2_thaw(struct ata_port *ap)
1584{
0d5ff566 1585 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
39f87582
TH
1586 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1587 u8 mask;
1588
0d5ff566 1589 iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
39f87582 1590
0d5ff566 1591 mask = ioread8(scr_addr + NV_INT_ENABLE);
39f87582 1592 mask |= (NV_INT_MASK << shift);
0d5ff566 1593 iowrite8(mask, scr_addr + NV_INT_ENABLE);
39f87582
TH
1594}
1595
1596static void nv_ck804_freeze(struct ata_port *ap)
1597{
0d5ff566 1598 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
39f87582
TH
1599 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1600 u8 mask;
1601
1602 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1603 mask &= ~(NV_INT_ALL << shift);
1604 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1605}
1606
1607static void nv_ck804_thaw(struct ata_port *ap)
1608{
0d5ff566 1609 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
39f87582
TH
1610 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1611 u8 mask;
1612
1613 writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1614
1615 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1616 mask |= (NV_INT_MASK << shift);
1617 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1618}
1619
f140f0f1
KL
1620static void nv_mcp55_freeze(struct ata_port *ap)
1621{
1622 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1623 int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1624 u32 mask;
1625
1626 writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1627
1628 mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1629 mask &= ~(NV_INT_ALL_MCP55 << shift);
1630 writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
9363c382 1631 ata_sff_freeze(ap);
f140f0f1
KL
1632}
1633
1634static void nv_mcp55_thaw(struct ata_port *ap)
1635{
1636 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1637 int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1638 u32 mask;
1639
1640 writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1641
1642 mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1643 mask |= (NV_INT_MASK_MCP55 << shift);
1644 writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
9363c382 1645 ata_sff_thaw(ap);
f140f0f1
KL
1646}
1647
fbbb262d
RH
1648static void nv_adma_error_handler(struct ata_port *ap)
1649{
1650 struct nv_adma_port_priv *pp = ap->private_data;
b447916e 1651 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
cdf56bcf 1652 void __iomem *mmio = pp->ctl_block;
fbbb262d
RH
1653 int i;
1654 u16 tmp;
a84471fe 1655
b447916e 1656 if (ata_tag_valid(ap->link.active_tag) || ap->link.sactive) {
2cb27853
RH
1657 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1658 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1659 u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
1660 u32 status = readw(mmio + NV_ADMA_STAT);
08af7414
RH
1661 u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
1662 u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
2cb27853 1663
5796d1c4
JG
1664 ata_port_printk(ap, KERN_ERR,
1665 "EH in ADMA mode, notifier 0x%X "
08af7414
RH
1666 "notifier_error 0x%X gen_ctl 0x%X status 0x%X "
1667 "next cpb count 0x%X next cpb idx 0x%x\n",
1668 notifier, notifier_error, gen_ctl, status,
1669 cpb_count, next_cpb_idx);
2cb27853 1670
b447916e 1671 for (i = 0; i < NV_ADMA_MAX_CPBS; i++) {
2cb27853 1672 struct nv_adma_cpb *cpb = &pp->cpb[i];
b447916e 1673 if ((ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) ||
5796d1c4 1674 ap->link.sactive & (1 << i))
2cb27853
RH
1675 ata_port_printk(ap, KERN_ERR,
1676 "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1677 i, cpb->ctl_flags, cpb->resp_flags);
1678 }
1679 }
fbbb262d 1680
fbbb262d
RH
1681 /* Push us back into port register mode for error handling. */
1682 nv_adma_register_mode(ap);
1683
5796d1c4
JG
1684 /* Mark all of the CPBs as invalid to prevent them from
1685 being executed */
b447916e 1686 for (i = 0; i < NV_ADMA_MAX_CPBS; i++)
fbbb262d
RH
1687 pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1688
1689 /* clear CPB fetch count */
1690 writew(0, mmio + NV_ADMA_CPB_COUNT);
1691
1692 /* Reset channel */
1693 tmp = readw(mmio + NV_ADMA_CTL);
1694 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
b447916e 1695 readw(mmio + NV_ADMA_CTL); /* flush posted write */
fbbb262d
RH
1696 udelay(1);
1697 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
b447916e 1698 readw(mmio + NV_ADMA_CTL); /* flush posted write */
fbbb262d
RH
1699 }
1700
9363c382 1701 ata_sff_error_handler(ap);
fbbb262d
RH
1702}
1703
f140f0f1
KL
1704static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc)
1705{
1706 struct nv_swncq_port_priv *pp = ap->private_data;
1707 struct defer_queue *dq = &pp->defer_queue;
1708
1709 /* queue is full */
1710 WARN_ON(dq->tail - dq->head == ATA_MAX_QUEUE);
1711 dq->defer_bits |= (1 << qc->tag);
1712 dq->tag[dq->tail++ & (ATA_MAX_QUEUE - 1)] = qc->tag;
1713}
1714
1715static struct ata_queued_cmd *nv_swncq_qc_from_dq(struct ata_port *ap)
1716{
1717 struct nv_swncq_port_priv *pp = ap->private_data;
1718 struct defer_queue *dq = &pp->defer_queue;
1719 unsigned int tag;
1720
1721 if (dq->head == dq->tail) /* null queue */
1722 return NULL;
1723
1724 tag = dq->tag[dq->head & (ATA_MAX_QUEUE - 1)];
1725 dq->tag[dq->head++ & (ATA_MAX_QUEUE - 1)] = ATA_TAG_POISON;
1726 WARN_ON(!(dq->defer_bits & (1 << tag)));
1727 dq->defer_bits &= ~(1 << tag);
1728
1729 return ata_qc_from_tag(ap, tag);
1730}
1731
1732static void nv_swncq_fis_reinit(struct ata_port *ap)
1733{
1734 struct nv_swncq_port_priv *pp = ap->private_data;
1735
1736 pp->dhfis_bits = 0;
1737 pp->dmafis_bits = 0;
1738 pp->sdbfis_bits = 0;
1739 pp->ncq_flags = 0;
1740}
1741
1742static void nv_swncq_pp_reinit(struct ata_port *ap)
1743{
1744 struct nv_swncq_port_priv *pp = ap->private_data;
1745 struct defer_queue *dq = &pp->defer_queue;
1746
1747 dq->head = 0;
1748 dq->tail = 0;
1749 dq->defer_bits = 0;
1750 pp->qc_active = 0;
1751 pp->last_issue_tag = ATA_TAG_POISON;
1752 nv_swncq_fis_reinit(ap);
1753}
1754
1755static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis)
1756{
1757 struct nv_swncq_port_priv *pp = ap->private_data;
1758
1759 writew(fis, pp->irq_block);
1760}
1761
1762static void __ata_bmdma_stop(struct ata_port *ap)
1763{
1764 struct ata_queued_cmd qc;
1765
1766 qc.ap = ap;
1767 ata_bmdma_stop(&qc);
1768}
1769
1770static void nv_swncq_ncq_stop(struct ata_port *ap)
1771{
1772 struct nv_swncq_port_priv *pp = ap->private_data;
1773 unsigned int i;
1774 u32 sactive;
1775 u32 done_mask;
1776
1777 ata_port_printk(ap, KERN_ERR,
1778 "EH in SWNCQ mode,QC:qc_active 0x%X sactive 0x%X\n",
1779 ap->qc_active, ap->link.sactive);
1780 ata_port_printk(ap, KERN_ERR,
1781 "SWNCQ:qc_active 0x%X defer_bits 0x%X last_issue_tag 0x%x\n "
1782 "dhfis 0x%X dmafis 0x%X sdbfis 0x%X\n",
1783 pp->qc_active, pp->defer_queue.defer_bits, pp->last_issue_tag,
1784 pp->dhfis_bits, pp->dmafis_bits, pp->sdbfis_bits);
1785
1786 ata_port_printk(ap, KERN_ERR, "ATA_REG 0x%X ERR_REG 0x%X\n",
5682ed33 1787 ap->ops->sff_check_status(ap),
f140f0f1
KL
1788 ioread8(ap->ioaddr.error_addr));
1789
1790 sactive = readl(pp->sactive_block);
1791 done_mask = pp->qc_active ^ sactive;
1792
1793 ata_port_printk(ap, KERN_ERR, "tag : dhfis dmafis sdbfis sacitve\n");
1794 for (i = 0; i < ATA_MAX_QUEUE; i++) {
1795 u8 err = 0;
1796 if (pp->qc_active & (1 << i))
1797 err = 0;
1798 else if (done_mask & (1 << i))
1799 err = 1;
1800 else
1801 continue;
1802
1803 ata_port_printk(ap, KERN_ERR,
1804 "tag 0x%x: %01x %01x %01x %01x %s\n", i,
1805 (pp->dhfis_bits >> i) & 0x1,
1806 (pp->dmafis_bits >> i) & 0x1,
1807 (pp->sdbfis_bits >> i) & 0x1,
1808 (sactive >> i) & 0x1,
1809 (err ? "error! tag doesn't exit" : " "));
1810 }
1811
1812 nv_swncq_pp_reinit(ap);
5682ed33 1813 ap->ops->sff_irq_clear(ap);
f140f0f1
KL
1814 __ata_bmdma_stop(ap);
1815 nv_swncq_irq_clear(ap, 0xffff);
1816}
1817
1818static void nv_swncq_error_handler(struct ata_port *ap)
1819{
1820 struct ata_eh_context *ehc = &ap->link.eh_context;
1821
1822 if (ap->link.sactive) {
1823 nv_swncq_ncq_stop(ap);
cf480626 1824 ehc->i.action |= ATA_EH_RESET;
f140f0f1
KL
1825 }
1826
9363c382 1827 ata_sff_error_handler(ap);
f140f0f1
KL
1828}
1829
1830#ifdef CONFIG_PM
1831static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg)
1832{
1833 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1834 u32 tmp;
1835
1836 /* clear irq */
1837 writel(~0, mmio + NV_INT_STATUS_MCP55);
1838
1839 /* disable irq */
1840 writel(0, mmio + NV_INT_ENABLE_MCP55);
1841
1842 /* disable swncq */
1843 tmp = readl(mmio + NV_CTL_MCP55);
1844 tmp &= ~(NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ);
1845 writel(tmp, mmio + NV_CTL_MCP55);
1846
1847 return 0;
1848}
1849
1850static int nv_swncq_port_resume(struct ata_port *ap)
1851{
1852 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1853 u32 tmp;
1854
1855 /* clear irq */
1856 writel(~0, mmio + NV_INT_STATUS_MCP55);
1857
1858 /* enable irq */
1859 writel(0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1860
1861 /* enable swncq */
1862 tmp = readl(mmio + NV_CTL_MCP55);
1863 writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1864
1865 return 0;
1866}
1867#endif
1868
1869static void nv_swncq_host_init(struct ata_host *host)
1870{
1871 u32 tmp;
1872 void __iomem *mmio = host->iomap[NV_MMIO_BAR];
1873 struct pci_dev *pdev = to_pci_dev(host->dev);
1874 u8 regval;
1875
1876 /* disable ECO 398 */
1877 pci_read_config_byte(pdev, 0x7f, &regval);
1878 regval &= ~(1 << 7);
1879 pci_write_config_byte(pdev, 0x7f, regval);
1880
1881 /* enable swncq */
1882 tmp = readl(mmio + NV_CTL_MCP55);
1883 VPRINTK("HOST_CTL:0x%X\n", tmp);
1884 writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1885
1886 /* enable irq intr */
1887 tmp = readl(mmio + NV_INT_ENABLE_MCP55);
1888 VPRINTK("HOST_ENABLE:0x%X\n", tmp);
1889 writel(tmp | 0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1890
1891 /* clear port irq */
1892 writel(~0x0, mmio + NV_INT_STATUS_MCP55);
1893}
1894
1895static int nv_swncq_slave_config(struct scsi_device *sdev)
1896{
1897 struct ata_port *ap = ata_shost_to_port(sdev->host);
1898 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
1899 struct ata_device *dev;
1900 int rc;
1901 u8 rev;
1902 u8 check_maxtor = 0;
1903 unsigned char model_num[ATA_ID_PROD_LEN + 1];
1904
1905 rc = ata_scsi_slave_config(sdev);
1906 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
1907 /* Not a proper libata device, ignore */
1908 return rc;
1909
1910 dev = &ap->link.device[sdev->id];
1911 if (!(ap->flags & ATA_FLAG_NCQ) || dev->class == ATA_DEV_ATAPI)
1912 return rc;
1913
1914 /* if MCP51 and Maxtor, then disable ncq */
1915 if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA ||
1916 pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2)
1917 check_maxtor = 1;
1918
1919 /* if MCP55 and rev <= a2 and Maxtor, then disable ncq */
1920 if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA ||
1921 pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2) {
1922 pci_read_config_byte(pdev, 0x8, &rev);
1923 if (rev <= 0xa2)
1924 check_maxtor = 1;
1925 }
1926
1927 if (!check_maxtor)
1928 return rc;
1929
1930 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
1931
1932 if (strncmp(model_num, "Maxtor", 6) == 0) {
1933 ata_scsi_change_queue_depth(sdev, 1);
1934 ata_dev_printk(dev, KERN_NOTICE,
1935 "Disabling SWNCQ mode (depth %x)\n", sdev->queue_depth);
1936 }
1937
1938 return rc;
1939}
1940
1941static int nv_swncq_port_start(struct ata_port *ap)
1942{
1943 struct device *dev = ap->host->dev;
1944 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1945 struct nv_swncq_port_priv *pp;
1946 int rc;
1947
1948 rc = ata_port_start(ap);
1949 if (rc)
1950 return rc;
1951
1952 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1953 if (!pp)
1954 return -ENOMEM;
1955
1956 pp->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE,
1957 &pp->prd_dma, GFP_KERNEL);
1958 if (!pp->prd)
1959 return -ENOMEM;
1960 memset(pp->prd, 0, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE);
1961
1962 ap->private_data = pp;
1963 pp->sactive_block = ap->ioaddr.scr_addr + 4 * SCR_ACTIVE;
1964 pp->irq_block = mmio + NV_INT_STATUS_MCP55 + ap->port_no * 2;
1965 pp->tag_block = mmio + NV_NCQ_REG_MCP55 + ap->port_no * 2;
1966
1967 return 0;
1968}
1969
1970static void nv_swncq_qc_prep(struct ata_queued_cmd *qc)
1971{
1972 if (qc->tf.protocol != ATA_PROT_NCQ) {
9363c382 1973 ata_sff_qc_prep(qc);
f140f0f1
KL
1974 return;
1975 }
1976
1977 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1978 return;
1979
1980 nv_swncq_fill_sg(qc);
1981}
1982
1983static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
1984{
1985 struct ata_port *ap = qc->ap;
1986 struct scatterlist *sg;
f140f0f1
KL
1987 struct nv_swncq_port_priv *pp = ap->private_data;
1988 struct ata_prd *prd;
ff2aeb1e 1989 unsigned int si, idx;
f140f0f1
KL
1990
1991 prd = pp->prd + ATA_MAX_PRD * qc->tag;
1992
1993 idx = 0;
ff2aeb1e 1994 for_each_sg(qc->sg, sg, qc->n_elem, si) {
f140f0f1
KL
1995 u32 addr, offset;
1996 u32 sg_len, len;
1997
1998 addr = (u32)sg_dma_address(sg);
1999 sg_len = sg_dma_len(sg);
2000
2001 while (sg_len) {
2002 offset = addr & 0xffff;
2003 len = sg_len;
2004 if ((offset + sg_len) > 0x10000)
2005 len = 0x10000 - offset;
2006
2007 prd[idx].addr = cpu_to_le32(addr);
2008 prd[idx].flags_len = cpu_to_le32(len & 0xffff);
2009
2010 idx++;
2011 sg_len -= len;
2012 addr += len;
2013 }
2014 }
2015
ff2aeb1e 2016 prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
f140f0f1
KL
2017}
2018
2019static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap,
2020 struct ata_queued_cmd *qc)
2021{
2022 struct nv_swncq_port_priv *pp = ap->private_data;
2023
2024 if (qc == NULL)
2025 return 0;
2026
2027 DPRINTK("Enter\n");
2028
2029 writel((1 << qc->tag), pp->sactive_block);
2030 pp->last_issue_tag = qc->tag;
2031 pp->dhfis_bits &= ~(1 << qc->tag);
2032 pp->dmafis_bits &= ~(1 << qc->tag);
2033 pp->qc_active |= (0x1 << qc->tag);
2034
5682ed33
TH
2035 ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
2036 ap->ops->sff_exec_command(ap, &qc->tf);
f140f0f1
KL
2037
2038 DPRINTK("Issued tag %u\n", qc->tag);
2039
2040 return 0;
2041}
2042
2043static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc)
2044{
2045 struct ata_port *ap = qc->ap;
2046 struct nv_swncq_port_priv *pp = ap->private_data;
2047
2048 if (qc->tf.protocol != ATA_PROT_NCQ)
9363c382 2049 return ata_sff_qc_issue(qc);
f140f0f1
KL
2050
2051 DPRINTK("Enter\n");
2052
2053 if (!pp->qc_active)
2054 nv_swncq_issue_atacmd(ap, qc);
2055 else
2056 nv_swncq_qc_to_dq(ap, qc); /* add qc to defer queue */
2057
2058 return 0;
2059}
2060
2061static void nv_swncq_hotplug(struct ata_port *ap, u32 fis)
2062{
2063 u32 serror;
2064 struct ata_eh_info *ehi = &ap->link.eh_info;
2065
2066 ata_ehi_clear_desc(ehi);
2067
2068 /* AHCI needs SError cleared; otherwise, it might lock up */
2069 sata_scr_read(&ap->link, SCR_ERROR, &serror);
2070 sata_scr_write(&ap->link, SCR_ERROR, serror);
2071
2072 /* analyze @irq_stat */
2073 if (fis & NV_SWNCQ_IRQ_ADDED)
2074 ata_ehi_push_desc(ehi, "hot plug");
2075 else if (fis & NV_SWNCQ_IRQ_REMOVED)
2076 ata_ehi_push_desc(ehi, "hot unplug");
2077
2078 ata_ehi_hotplugged(ehi);
2079
2080 /* okay, let's hand over to EH */
2081 ehi->serror |= serror;
2082
2083 ata_port_freeze(ap);
2084}
2085
2086static int nv_swncq_sdbfis(struct ata_port *ap)
2087{
2088 struct ata_queued_cmd *qc;
2089 struct nv_swncq_port_priv *pp = ap->private_data;
2090 struct ata_eh_info *ehi = &ap->link.eh_info;
2091 u32 sactive;
2092 int nr_done = 0;
2093 u32 done_mask;
2094 int i;
2095 u8 host_stat;
2096 u8 lack_dhfis = 0;
2097
2098 host_stat = ap->ops->bmdma_status(ap);
2099 if (unlikely(host_stat & ATA_DMA_ERR)) {
2100 /* error when transfering data to/from memory */
2101 ata_ehi_clear_desc(ehi);
2102 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
2103 ehi->err_mask |= AC_ERR_HOST_BUS;
cf480626 2104 ehi->action |= ATA_EH_RESET;
f140f0f1
KL
2105 return -EINVAL;
2106 }
2107
5682ed33 2108 ap->ops->sff_irq_clear(ap);
f140f0f1
KL
2109 __ata_bmdma_stop(ap);
2110
2111 sactive = readl(pp->sactive_block);
2112 done_mask = pp->qc_active ^ sactive;
2113
2114 if (unlikely(done_mask & sactive)) {
2115 ata_ehi_clear_desc(ehi);
2116 ata_ehi_push_desc(ehi, "illegal SWNCQ:qc_active transition"
2117 "(%08x->%08x)", pp->qc_active, sactive);
2118 ehi->err_mask |= AC_ERR_HSM;
cf480626 2119 ehi->action |= ATA_EH_RESET;
f140f0f1
KL
2120 return -EINVAL;
2121 }
2122 for (i = 0; i < ATA_MAX_QUEUE; i++) {
2123 if (!(done_mask & (1 << i)))
2124 continue;
2125
2126 qc = ata_qc_from_tag(ap, i);
2127 if (qc) {
2128 ata_qc_complete(qc);
2129 pp->qc_active &= ~(1 << i);
2130 pp->dhfis_bits &= ~(1 << i);
2131 pp->dmafis_bits &= ~(1 << i);
2132 pp->sdbfis_bits |= (1 << i);
2133 nr_done++;
2134 }
2135 }
2136
2137 if (!ap->qc_active) {
2138 DPRINTK("over\n");
2139 nv_swncq_pp_reinit(ap);
2140 return nr_done;
2141 }
2142
2143 if (pp->qc_active & pp->dhfis_bits)
2144 return nr_done;
2145
2146 if ((pp->ncq_flags & ncq_saw_backout) ||
2147 (pp->qc_active ^ pp->dhfis_bits))
2148 /* if the controller cann't get a device to host register FIS,
2149 * The driver needs to reissue the new command.
2150 */
2151 lack_dhfis = 1;
2152
2153 DPRINTK("id 0x%x QC: qc_active 0x%x,"
2154 "SWNCQ:qc_active 0x%X defer_bits %X "
2155 "dhfis 0x%X dmafis 0x%X last_issue_tag %x\n",
2156 ap->print_id, ap->qc_active, pp->qc_active,
2157 pp->defer_queue.defer_bits, pp->dhfis_bits,
2158 pp->dmafis_bits, pp->last_issue_tag);
2159
2160 nv_swncq_fis_reinit(ap);
2161
2162 if (lack_dhfis) {
2163 qc = ata_qc_from_tag(ap, pp->last_issue_tag);
2164 nv_swncq_issue_atacmd(ap, qc);
2165 return nr_done;
2166 }
2167
2168 if (pp->defer_queue.defer_bits) {
2169 /* send deferral queue command */
2170 qc = nv_swncq_qc_from_dq(ap);
2171 WARN_ON(qc == NULL);
2172 nv_swncq_issue_atacmd(ap, qc);
2173 }
2174
2175 return nr_done;
2176}
2177
2178static inline u32 nv_swncq_tag(struct ata_port *ap)
2179{
2180 struct nv_swncq_port_priv *pp = ap->private_data;
2181 u32 tag;
2182
2183 tag = readb(pp->tag_block) >> 2;
2184 return (tag & 0x1f);
2185}
2186
2187static int nv_swncq_dmafis(struct ata_port *ap)
2188{
2189 struct ata_queued_cmd *qc;
2190 unsigned int rw;
2191 u8 dmactl;
2192 u32 tag;
2193 struct nv_swncq_port_priv *pp = ap->private_data;
2194
2195 __ata_bmdma_stop(ap);
2196 tag = nv_swncq_tag(ap);
2197
2198 DPRINTK("dma setup tag 0x%x\n", tag);
2199 qc = ata_qc_from_tag(ap, tag);
2200
2201 if (unlikely(!qc))
2202 return 0;
2203
2204 rw = qc->tf.flags & ATA_TFLAG_WRITE;
2205
2206 /* load PRD table addr. */
2207 iowrite32(pp->prd_dma + ATA_PRD_TBL_SZ * qc->tag,
2208 ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
2209
2210 /* specify data direction, triple-check start bit is clear */
2211 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2212 dmactl &= ~ATA_DMA_WR;
2213 if (!rw)
2214 dmactl |= ATA_DMA_WR;
2215
2216 iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2217
2218 return 1;
2219}
2220
2221static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis)
2222{
2223 struct nv_swncq_port_priv *pp = ap->private_data;
2224 struct ata_queued_cmd *qc;
2225 struct ata_eh_info *ehi = &ap->link.eh_info;
2226 u32 serror;
2227 u8 ata_stat;
2228 int rc = 0;
2229
5682ed33 2230 ata_stat = ap->ops->sff_check_status(ap);
f140f0f1
KL
2231 nv_swncq_irq_clear(ap, fis);
2232 if (!fis)
2233 return;
2234
2235 if (ap->pflags & ATA_PFLAG_FROZEN)
2236 return;
2237
2238 if (fis & NV_SWNCQ_IRQ_HOTPLUG) {
2239 nv_swncq_hotplug(ap, fis);
2240 return;
2241 }
2242
2243 if (!pp->qc_active)
2244 return;
2245
82ef04fb 2246 if (ap->ops->scr_read(&ap->link, SCR_ERROR, &serror))
f140f0f1 2247 return;
82ef04fb 2248 ap->ops->scr_write(&ap->link, SCR_ERROR, serror);
f140f0f1
KL
2249
2250 if (ata_stat & ATA_ERR) {
2251 ata_ehi_clear_desc(ehi);
2252 ata_ehi_push_desc(ehi, "Ata error. fis:0x%X", fis);
2253 ehi->err_mask |= AC_ERR_DEV;
2254 ehi->serror |= serror;
cf480626 2255 ehi->action |= ATA_EH_RESET;
f140f0f1
KL
2256 ata_port_freeze(ap);
2257 return;
2258 }
2259
2260 if (fis & NV_SWNCQ_IRQ_BACKOUT) {
2261 /* If the IRQ is backout, driver must issue
2262 * the new command again some time later.
2263 */
2264 pp->ncq_flags |= ncq_saw_backout;
2265 }
2266
2267 if (fis & NV_SWNCQ_IRQ_SDBFIS) {
2268 pp->ncq_flags |= ncq_saw_sdb;
2269 DPRINTK("id 0x%x SWNCQ: qc_active 0x%X "
2270 "dhfis 0x%X dmafis 0x%X sactive 0x%X\n",
2271 ap->print_id, pp->qc_active, pp->dhfis_bits,
2272 pp->dmafis_bits, readl(pp->sactive_block));
2273 rc = nv_swncq_sdbfis(ap);
2274 if (rc < 0)
2275 goto irq_error;
2276 }
2277
2278 if (fis & NV_SWNCQ_IRQ_DHREGFIS) {
2279 /* The interrupt indicates the new command
2280 * was transmitted correctly to the drive.
2281 */
2282 pp->dhfis_bits |= (0x1 << pp->last_issue_tag);
2283 pp->ncq_flags |= ncq_saw_d2h;
2284 if (pp->ncq_flags & (ncq_saw_sdb | ncq_saw_backout)) {
2285 ata_ehi_push_desc(ehi, "illegal fis transaction");
2286 ehi->err_mask |= AC_ERR_HSM;
cf480626 2287 ehi->action |= ATA_EH_RESET;
f140f0f1
KL
2288 goto irq_error;
2289 }
2290
2291 if (!(fis & NV_SWNCQ_IRQ_DMASETUP) &&
2292 !(pp->ncq_flags & ncq_saw_dmas)) {
5682ed33 2293 ata_stat = ap->ops->sff_check_status(ap);
f140f0f1
KL
2294 if (ata_stat & ATA_BUSY)
2295 goto irq_exit;
2296
2297 if (pp->defer_queue.defer_bits) {
2298 DPRINTK("send next command\n");
2299 qc = nv_swncq_qc_from_dq(ap);
2300 nv_swncq_issue_atacmd(ap, qc);
2301 }
2302 }
2303 }
2304
2305 if (fis & NV_SWNCQ_IRQ_DMASETUP) {
2306 /* program the dma controller with appropriate PRD buffers
2307 * and start the DMA transfer for requested command.
2308 */
2309 pp->dmafis_bits |= (0x1 << nv_swncq_tag(ap));
2310 pp->ncq_flags |= ncq_saw_dmas;
2311 rc = nv_swncq_dmafis(ap);
2312 }
2313
2314irq_exit:
2315 return;
2316irq_error:
2317 ata_ehi_push_desc(ehi, "fis:0x%x", fis);
2318 ata_port_freeze(ap);
2319 return;
2320}
2321
2322static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance)
2323{
2324 struct ata_host *host = dev_instance;
2325 unsigned int i;
2326 unsigned int handled = 0;
2327 unsigned long flags;
2328 u32 irq_stat;
2329
2330 spin_lock_irqsave(&host->lock, flags);
2331
2332 irq_stat = readl(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_MCP55);
2333
2334 for (i = 0; i < host->n_ports; i++) {
2335 struct ata_port *ap = host->ports[i];
2336
2337 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
2338 if (ap->link.sactive) {
2339 nv_swncq_host_interrupt(ap, (u16)irq_stat);
2340 handled = 1;
2341 } else {
2342 if (irq_stat) /* reserve Hotplug */
2343 nv_swncq_irq_clear(ap, 0xfff0);
2344
2345 handled += nv_host_intr(ap, (u8)irq_stat);
2346 }
2347 }
2348 irq_stat >>= NV_INT_PORT_SHIFT_MCP55;
2349 }
2350
2351 spin_unlock_irqrestore(&host->lock, flags);
2352
2353 return IRQ_RETVAL(handled);
2354}
2355
5796d1c4 2356static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1da177e4 2357{
5796d1c4 2358 static int printed_version;
1626aeb8 2359 const struct ata_port_info *ppi[] = { NULL, NULL };
95947193 2360 struct nv_pi_priv *ipriv;
9a829ccf 2361 struct ata_host *host;
cdf56bcf 2362 struct nv_host_priv *hpriv;
1da177e4
LT
2363 int rc;
2364 u32 bar;
0d5ff566 2365 void __iomem *base;
fbbb262d 2366 unsigned long type = ent->driver_data;
1da177e4
LT
2367
2368 // Make sure this is a SATA controller by counting the number of bars
2369 // (NVIDIA SATA controllers will always have six bars). Otherwise,
2370 // it's an IDE controller and we ignore it.
5796d1c4 2371 for (bar = 0; bar < 6; bar++)
1da177e4
LT
2372 if (pci_resource_start(pdev, bar) == 0)
2373 return -ENODEV;
2374
cdf56bcf 2375 if (!printed_version++)
a9524a76 2376 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
1da177e4 2377
24dc5f33 2378 rc = pcim_enable_device(pdev);
1da177e4 2379 if (rc)
24dc5f33 2380 return rc;
1da177e4 2381
9a829ccf 2382 /* determine type and allocate host */
f140f0f1 2383 if (type == CK804 && adma_enabled) {
fbbb262d
RH
2384 dev_printk(KERN_NOTICE, &pdev->dev, "Using ADMA mode\n");
2385 type = ADMA;
2d775708
TH
2386 } else if (type == MCP5x && swncq_enabled) {
2387 dev_printk(KERN_NOTICE, &pdev->dev, "Using SWNCQ mode\n");
2388 type = SWNCQ;
360737a9
JG
2389 }
2390
1626aeb8 2391 ppi[0] = &nv_port_info[type];
95947193 2392 ipriv = ppi[0]->private_data;
9363c382 2393 rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
9a829ccf
TH
2394 if (rc)
2395 return rc;
1da177e4 2396
24dc5f33 2397 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
cdf56bcf 2398 if (!hpriv)
24dc5f33 2399 return -ENOMEM;
9a829ccf
TH
2400 hpriv->type = type;
2401 host->private_data = hpriv;
cdf56bcf 2402
9a829ccf
TH
2403 /* request and iomap NV_MMIO_BAR */
2404 rc = pcim_iomap_regions(pdev, 1 << NV_MMIO_BAR, DRV_NAME);
2405 if (rc)
2406 return rc;
1da177e4 2407
9a829ccf
TH
2408 /* configure SCR access */
2409 base = host->iomap[NV_MMIO_BAR];
2410 host->ports[0]->ioaddr.scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
2411 host->ports[1]->ioaddr.scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
1da177e4 2412
ada364e8 2413 /* enable SATA space for CK804 */
fbbb262d 2414 if (type >= CK804) {
ada364e8
TH
2415 u8 regval;
2416
2417 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2418 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2419 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2420 }
2421
9a829ccf 2422 /* init ADMA */
fbbb262d 2423 if (type == ADMA) {
9a829ccf 2424 rc = nv_adma_host_init(host);
fbbb262d 2425 if (rc)
24dc5f33 2426 return rc;
360737a9 2427 } else if (type == SWNCQ)
f140f0f1 2428 nv_swncq_host_init(host);
fbbb262d 2429
9a829ccf 2430 pci_set_master(pdev);
95947193
TH
2431 return ata_host_activate(host, pdev->irq, ipriv->irq_handler,
2432 IRQF_SHARED, ipriv->sht);
1da177e4
LT
2433}
2434
438ac6d5 2435#ifdef CONFIG_PM
cdf56bcf
RH
2436static int nv_pci_device_resume(struct pci_dev *pdev)
2437{
2438 struct ata_host *host = dev_get_drvdata(&pdev->dev);
2439 struct nv_host_priv *hpriv = host->private_data;
ce053fa8 2440 int rc;
cdf56bcf 2441
ce053fa8 2442 rc = ata_pci_device_do_resume(pdev);
b447916e 2443 if (rc)
ce053fa8 2444 return rc;
cdf56bcf
RH
2445
2446 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
b447916e 2447 if (hpriv->type >= CK804) {
cdf56bcf
RH
2448 u8 regval;
2449
2450 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2451 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2452 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2453 }
b447916e 2454 if (hpriv->type == ADMA) {
cdf56bcf
RH
2455 u32 tmp32;
2456 struct nv_adma_port_priv *pp;
2457 /* enable/disable ADMA on the ports appropriately */
2458 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2459
2460 pp = host->ports[0]->private_data;
b447916e 2461 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
cdf56bcf 2462 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
5796d1c4 2463 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
cdf56bcf
RH
2464 else
2465 tmp32 |= (NV_MCP_SATA_CFG_20_PORT0_EN |
5796d1c4 2466 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
cdf56bcf 2467 pp = host->ports[1]->private_data;
b447916e 2468 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
cdf56bcf 2469 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
5796d1c4 2470 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
cdf56bcf
RH
2471 else
2472 tmp32 |= (NV_MCP_SATA_CFG_20_PORT1_EN |
5796d1c4 2473 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
cdf56bcf
RH
2474
2475 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2476 }
2477 }
2478
2479 ata_host_resume(host);
2480
2481 return 0;
2482}
438ac6d5 2483#endif
cdf56bcf 2484
cca3974e 2485static void nv_ck804_host_stop(struct ata_host *host)
ada364e8 2486{
cca3974e 2487 struct pci_dev *pdev = to_pci_dev(host->dev);
ada364e8
TH
2488 u8 regval;
2489
2490 /* disable SATA space for CK804 */
2491 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2492 regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2493 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
ada364e8
TH
2494}
2495
fbbb262d
RH
2496static void nv_adma_host_stop(struct ata_host *host)
2497{
2498 struct pci_dev *pdev = to_pci_dev(host->dev);
fbbb262d
RH
2499 u32 tmp32;
2500
fbbb262d
RH
2501 /* disable ADMA on the ports */
2502 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2503 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2504 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
2505 NV_MCP_SATA_CFG_20_PORT1_EN |
2506 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2507
2508 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2509
2510 nv_ck804_host_stop(host);
2511}
2512
1da177e4
LT
2513static int __init nv_init(void)
2514{
b7887196 2515 return pci_register_driver(&nv_pci_driver);
1da177e4
LT
2516}
2517
2518static void __exit nv_exit(void)
2519{
2520 pci_unregister_driver(&nv_pci_driver);
2521}
2522
2523module_init(nv_init);
2524module_exit(nv_exit);
fbbb262d
RH
2525module_param_named(adma, adma_enabled, bool, 0444);
2526MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: true)");
f140f0f1 2527module_param_named(swncq, swncq_enabled, bool, 0444);
d21279f4 2528MODULE_PARM_DESC(swncq, "Enable use of SWNCQ (Default: true)");
f140f0f1 2529
This page took 0.594942 seconds and 5 git commands to generate.