dma-mapping: add the device argument to dma_mapping_error()
[deliverable/linux.git] / drivers / ata / sata_nv.c
CommitLineData
1da177e4
LT
1/*
2 * sata_nv.c - NVIDIA nForce SATA
3 *
4 * Copyright 2004 NVIDIA Corp. All rights reserved.
5 * Copyright 2004 Andrew Chew
6 *
aa7e16d6
JG
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
1da177e4 21 *
af36d7f0
JG
22 *
23 * libata documentation is available via 'make {ps|pdf}docs',
24 * as Documentation/DocBook/libata.*
25 *
26 * No hardware documentation available outside of NVIDIA.
27 * This driver programs the NVIDIA SATA controller in a similar
28 * fashion as with other PCI IDE BMDMA controllers, with a few
29 * NV-specific details such as register offsets, SATA phy location,
30 * hotplug info, etc.
31 *
fbbb262d
RH
32 * CK804/MCP04 controllers support an alternate programming interface
33 * similar to the ADMA specification (with some modifications).
34 * This allows the use of NCQ. Non-DMA-mapped ATA commands are still
35 * sent through the legacy interface.
36 *
1da177e4
LT
37 */
38
1da177e4
LT
39#include <linux/kernel.h>
40#include <linux/module.h>
41#include <linux/pci.h>
42#include <linux/init.h>
43#include <linux/blkdev.h>
44#include <linux/delay.h>
45#include <linux/interrupt.h>
a9524a76 46#include <linux/device.h>
1da177e4 47#include <scsi/scsi_host.h>
fbbb262d 48#include <scsi/scsi_device.h>
1da177e4
LT
49#include <linux/libata.h>
50
51#define DRV_NAME "sata_nv"
2a3103ce 52#define DRV_VERSION "3.5"
fbbb262d
RH
53
54#define NV_ADMA_DMA_BOUNDARY 0xffffffffUL
1da177e4 55
10ad05df 56enum {
0d5ff566
TH
57 NV_MMIO_BAR = 5,
58
10ad05df
JG
59 NV_PORTS = 2,
60 NV_PIO_MASK = 0x1f,
61 NV_MWDMA_MASK = 0x07,
62 NV_UDMA_MASK = 0x7f,
63 NV_PORT0_SCR_REG_OFFSET = 0x00,
64 NV_PORT1_SCR_REG_OFFSET = 0x40,
1da177e4 65
27e4b274 66 /* INT_STATUS/ENABLE */
10ad05df 67 NV_INT_STATUS = 0x10,
10ad05df 68 NV_INT_ENABLE = 0x11,
27e4b274 69 NV_INT_STATUS_CK804 = 0x440,
10ad05df 70 NV_INT_ENABLE_CK804 = 0x441,
1da177e4 71
27e4b274
TH
72 /* INT_STATUS/ENABLE bits */
73 NV_INT_DEV = 0x01,
74 NV_INT_PM = 0x02,
75 NV_INT_ADDED = 0x04,
76 NV_INT_REMOVED = 0x08,
77
78 NV_INT_PORT_SHIFT = 4, /* each port occupies 4 bits */
79
39f87582 80 NV_INT_ALL = 0x0f,
5a44efff
TH
81 NV_INT_MASK = NV_INT_DEV |
82 NV_INT_ADDED | NV_INT_REMOVED,
39f87582 83
27e4b274 84 /* INT_CONFIG */
10ad05df
JG
85 NV_INT_CONFIG = 0x12,
86 NV_INT_CONFIG_METHD = 0x01, // 0 = INT, 1 = SMI
1da177e4 87
10ad05df
JG
88 // For PCI config register 20
89 NV_MCP_SATA_CFG_20 = 0x50,
90 NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
fbbb262d
RH
91 NV_MCP_SATA_CFG_20_PORT0_EN = (1 << 17),
92 NV_MCP_SATA_CFG_20_PORT1_EN = (1 << 16),
93 NV_MCP_SATA_CFG_20_PORT0_PWB_EN = (1 << 14),
94 NV_MCP_SATA_CFG_20_PORT1_PWB_EN = (1 << 12),
95
96 NV_ADMA_MAX_CPBS = 32,
97 NV_ADMA_CPB_SZ = 128,
98 NV_ADMA_APRD_SZ = 16,
99 NV_ADMA_SGTBL_LEN = (1024 - NV_ADMA_CPB_SZ) /
100 NV_ADMA_APRD_SZ,
101 NV_ADMA_SGTBL_TOTAL_LEN = NV_ADMA_SGTBL_LEN + 5,
102 NV_ADMA_SGTBL_SZ = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
103 NV_ADMA_PORT_PRIV_DMA_SZ = NV_ADMA_MAX_CPBS *
104 (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
105
106 /* BAR5 offset to ADMA general registers */
107 NV_ADMA_GEN = 0x400,
108 NV_ADMA_GEN_CTL = 0x00,
109 NV_ADMA_NOTIFIER_CLEAR = 0x30,
110
111 /* BAR5 offset to ADMA ports */
112 NV_ADMA_PORT = 0x480,
113
114 /* size of ADMA port register space */
115 NV_ADMA_PORT_SIZE = 0x100,
116
117 /* ADMA port registers */
118 NV_ADMA_CTL = 0x40,
119 NV_ADMA_CPB_COUNT = 0x42,
120 NV_ADMA_NEXT_CPB_IDX = 0x43,
121 NV_ADMA_STAT = 0x44,
122 NV_ADMA_CPB_BASE_LOW = 0x48,
123 NV_ADMA_CPB_BASE_HIGH = 0x4C,
124 NV_ADMA_APPEND = 0x50,
125 NV_ADMA_NOTIFIER = 0x68,
126 NV_ADMA_NOTIFIER_ERROR = 0x6C,
127
128 /* NV_ADMA_CTL register bits */
129 NV_ADMA_CTL_HOTPLUG_IEN = (1 << 0),
130 NV_ADMA_CTL_CHANNEL_RESET = (1 << 5),
131 NV_ADMA_CTL_GO = (1 << 7),
132 NV_ADMA_CTL_AIEN = (1 << 8),
133 NV_ADMA_CTL_READ_NON_COHERENT = (1 << 11),
134 NV_ADMA_CTL_WRITE_NON_COHERENT = (1 << 12),
135
136 /* CPB response flag bits */
137 NV_CPB_RESP_DONE = (1 << 0),
138 NV_CPB_RESP_ATA_ERR = (1 << 3),
139 NV_CPB_RESP_CMD_ERR = (1 << 4),
140 NV_CPB_RESP_CPB_ERR = (1 << 7),
141
142 /* CPB control flag bits */
143 NV_CPB_CTL_CPB_VALID = (1 << 0),
144 NV_CPB_CTL_QUEUE = (1 << 1),
145 NV_CPB_CTL_APRD_VALID = (1 << 2),
146 NV_CPB_CTL_IEN = (1 << 3),
147 NV_CPB_CTL_FPDMA = (1 << 4),
148
149 /* APRD flags */
150 NV_APRD_WRITE = (1 << 1),
151 NV_APRD_END = (1 << 2),
152 NV_APRD_CONT = (1 << 3),
153
154 /* NV_ADMA_STAT flags */
155 NV_ADMA_STAT_TIMEOUT = (1 << 0),
156 NV_ADMA_STAT_HOTUNPLUG = (1 << 1),
157 NV_ADMA_STAT_HOTPLUG = (1 << 2),
158 NV_ADMA_STAT_CPBERR = (1 << 4),
159 NV_ADMA_STAT_SERROR = (1 << 5),
160 NV_ADMA_STAT_CMD_COMPLETE = (1 << 6),
161 NV_ADMA_STAT_IDLE = (1 << 8),
162 NV_ADMA_STAT_LEGACY = (1 << 9),
163 NV_ADMA_STAT_STOPPED = (1 << 10),
164 NV_ADMA_STAT_DONE = (1 << 12),
165 NV_ADMA_STAT_ERR = NV_ADMA_STAT_CPBERR |
2dcb407e 166 NV_ADMA_STAT_TIMEOUT,
fbbb262d
RH
167
168 /* port flags */
169 NV_ADMA_PORT_REGISTER_MODE = (1 << 0),
2dec7555 170 NV_ADMA_ATAPI_SETUP_COMPLETE = (1 << 1),
fbbb262d 171
f140f0f1
KL
172 /* MCP55 reg offset */
173 NV_CTL_MCP55 = 0x400,
174 NV_INT_STATUS_MCP55 = 0x440,
175 NV_INT_ENABLE_MCP55 = 0x444,
176 NV_NCQ_REG_MCP55 = 0x448,
177
178 /* MCP55 */
179 NV_INT_ALL_MCP55 = 0xffff,
180 NV_INT_PORT_SHIFT_MCP55 = 16, /* each port occupies 16 bits */
181 NV_INT_MASK_MCP55 = NV_INT_ALL_MCP55 & 0xfffd,
182
183 /* SWNCQ ENABLE BITS*/
184 NV_CTL_PRI_SWNCQ = 0x02,
185 NV_CTL_SEC_SWNCQ = 0x04,
186
187 /* SW NCQ status bits*/
188 NV_SWNCQ_IRQ_DEV = (1 << 0),
189 NV_SWNCQ_IRQ_PM = (1 << 1),
190 NV_SWNCQ_IRQ_ADDED = (1 << 2),
191 NV_SWNCQ_IRQ_REMOVED = (1 << 3),
192
193 NV_SWNCQ_IRQ_BACKOUT = (1 << 4),
194 NV_SWNCQ_IRQ_SDBFIS = (1 << 5),
195 NV_SWNCQ_IRQ_DHREGFIS = (1 << 6),
196 NV_SWNCQ_IRQ_DMASETUP = (1 << 7),
197
198 NV_SWNCQ_IRQ_HOTPLUG = NV_SWNCQ_IRQ_ADDED |
199 NV_SWNCQ_IRQ_REMOVED,
200
fbbb262d
RH
201};
202
203/* ADMA Physical Region Descriptor - one SG segment */
204struct nv_adma_prd {
205 __le64 addr;
206 __le32 len;
207 u8 flags;
208 u8 packet_len;
209 __le16 reserved;
210};
211
212enum nv_adma_regbits {
213 CMDEND = (1 << 15), /* end of command list */
214 WNB = (1 << 14), /* wait-not-BSY */
215 IGN = (1 << 13), /* ignore this entry */
216 CS1n = (1 << (4 + 8)), /* std. PATA signals follow... */
217 DA2 = (1 << (2 + 8)),
218 DA1 = (1 << (1 + 8)),
219 DA0 = (1 << (0 + 8)),
220};
221
222/* ADMA Command Parameter Block
223 The first 5 SG segments are stored inside the Command Parameter Block itself.
224 If there are more than 5 segments the remainder are stored in a separate
225 memory area indicated by next_aprd. */
226struct nv_adma_cpb {
227 u8 resp_flags; /* 0 */
228 u8 reserved1; /* 1 */
229 u8 ctl_flags; /* 2 */
230 /* len is length of taskfile in 64 bit words */
2dcb407e 231 u8 len; /* 3 */
fbbb262d
RH
232 u8 tag; /* 4 */
233 u8 next_cpb_idx; /* 5 */
234 __le16 reserved2; /* 6-7 */
235 __le16 tf[12]; /* 8-31 */
236 struct nv_adma_prd aprd[5]; /* 32-111 */
237 __le64 next_aprd; /* 112-119 */
238 __le64 reserved3; /* 120-127 */
10ad05df 239};
1da177e4 240
fbbb262d
RH
241
242struct nv_adma_port_priv {
243 struct nv_adma_cpb *cpb;
244 dma_addr_t cpb_dma;
245 struct nv_adma_prd *aprd;
246 dma_addr_t aprd_dma;
2dcb407e
JG
247 void __iomem *ctl_block;
248 void __iomem *gen_block;
249 void __iomem *notifier_clear_block;
8959d300 250 u64 adma_dma_mask;
fbbb262d 251 u8 flags;
5e5c74a5 252 int last_issue_ncq;
fbbb262d
RH
253};
254
cdf56bcf
RH
255struct nv_host_priv {
256 unsigned long type;
257};
258
f140f0f1
KL
259struct defer_queue {
260 u32 defer_bits;
261 unsigned int head;
262 unsigned int tail;
263 unsigned int tag[ATA_MAX_QUEUE];
264};
265
266enum ncq_saw_flag_list {
267 ncq_saw_d2h = (1U << 0),
268 ncq_saw_dmas = (1U << 1),
269 ncq_saw_sdb = (1U << 2),
270 ncq_saw_backout = (1U << 3),
271};
272
273struct nv_swncq_port_priv {
274 struct ata_prd *prd; /* our SG list */
275 dma_addr_t prd_dma; /* and its DMA mapping */
276 void __iomem *sactive_block;
277 void __iomem *irq_block;
278 void __iomem *tag_block;
279 u32 qc_active;
280
281 unsigned int last_issue_tag;
282
283 /* fifo circular queue to store deferral command */
284 struct defer_queue defer_queue;
285
286 /* for NCQ interrupt analysis */
287 u32 dhfis_bits;
288 u32 dmafis_bits;
289 u32 sdbfis_bits;
290
291 unsigned int ncq_flags;
292};
293
294
5796d1c4 295#define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & (1 << (19 + (12 * (PORT)))))
fbbb262d 296
2dcb407e 297static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
438ac6d5 298#ifdef CONFIG_PM
cdf56bcf 299static int nv_pci_device_resume(struct pci_dev *pdev);
438ac6d5 300#endif
cca3974e 301static void nv_ck804_host_stop(struct ata_host *host);
7d12e780
DH
302static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
303static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
304static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
2dcb407e
JG
305static int nv_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val);
306static int nv_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val);
1da177e4 307
39f87582
TH
308static void nv_nf2_freeze(struct ata_port *ap);
309static void nv_nf2_thaw(struct ata_port *ap);
310static void nv_ck804_freeze(struct ata_port *ap);
311static void nv_ck804_thaw(struct ata_port *ap);
a1efdaba
TH
312static int nv_hardreset(struct ata_link *link, unsigned int *class,
313 unsigned long deadline);
fbbb262d 314static int nv_adma_slave_config(struct scsi_device *sdev);
2dec7555 315static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
fbbb262d
RH
316static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
317static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
318static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
319static void nv_adma_irq_clear(struct ata_port *ap);
320static int nv_adma_port_start(struct ata_port *ap);
321static void nv_adma_port_stop(struct ata_port *ap);
438ac6d5 322#ifdef CONFIG_PM
cdf56bcf
RH
323static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
324static int nv_adma_port_resume(struct ata_port *ap);
438ac6d5 325#endif
53014e25
RH
326static void nv_adma_freeze(struct ata_port *ap);
327static void nv_adma_thaw(struct ata_port *ap);
fbbb262d
RH
328static void nv_adma_error_handler(struct ata_port *ap);
329static void nv_adma_host_stop(struct ata_host *host);
f5ecac2d 330static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
f2fb344b 331static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
39f87582 332
f140f0f1
KL
333static void nv_mcp55_thaw(struct ata_port *ap);
334static void nv_mcp55_freeze(struct ata_port *ap);
335static void nv_swncq_error_handler(struct ata_port *ap);
336static int nv_swncq_slave_config(struct scsi_device *sdev);
337static int nv_swncq_port_start(struct ata_port *ap);
338static void nv_swncq_qc_prep(struct ata_queued_cmd *qc);
339static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
340static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc);
341static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis);
342static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance);
343#ifdef CONFIG_PM
344static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg);
345static int nv_swncq_port_resume(struct ata_port *ap);
346#endif
347
1da177e4
LT
348enum nv_host_type
349{
350 GENERIC,
351 NFORCE2,
27e4b274 352 NFORCE3 = NFORCE2, /* NF2 == NF3 as far as sata_nv is concerned */
fbbb262d 353 CK804,
f140f0f1
KL
354 ADMA,
355 SWNCQ,
1da177e4
LT
356};
357
3b7d697d 358static const struct pci_device_id nv_pci_tbl[] = {
54bb3a94
JG
359 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
360 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
361 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
362 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
363 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
364 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
365 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
f140f0f1
KL
366 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), SWNCQ },
367 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), SWNCQ },
368 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), SWNCQ },
369 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), SWNCQ },
e2e031eb
KL
370 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
371 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
372 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
2d2744fc
JG
373
374 { } /* terminate list */
1da177e4
LT
375};
376
1da177e4
LT
377static struct pci_driver nv_pci_driver = {
378 .name = DRV_NAME,
379 .id_table = nv_pci_tbl,
380 .probe = nv_init_one,
438ac6d5 381#ifdef CONFIG_PM
cdf56bcf
RH
382 .suspend = ata_pci_device_suspend,
383 .resume = nv_pci_device_resume,
438ac6d5 384#endif
1daf9ce7 385 .remove = ata_pci_remove_one,
1da177e4
LT
386};
387
193515d5 388static struct scsi_host_template nv_sht = {
68d1d07b 389 ATA_BMDMA_SHT(DRV_NAME),
1da177e4
LT
390};
391
fbbb262d 392static struct scsi_host_template nv_adma_sht = {
68d1d07b 393 ATA_NCQ_SHT(DRV_NAME),
fbbb262d 394 .can_queue = NV_ADMA_MAX_CPBS,
fbbb262d 395 .sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN,
fbbb262d
RH
396 .dma_boundary = NV_ADMA_DMA_BOUNDARY,
397 .slave_configure = nv_adma_slave_config,
fbbb262d
RH
398};
399
f140f0f1 400static struct scsi_host_template nv_swncq_sht = {
68d1d07b 401 ATA_NCQ_SHT(DRV_NAME),
f140f0f1 402 .can_queue = ATA_MAX_QUEUE,
f140f0f1 403 .sg_tablesize = LIBATA_MAX_PRD,
f140f0f1
KL
404 .dma_boundary = ATA_DMA_BOUNDARY,
405 .slave_configure = nv_swncq_slave_config,
f140f0f1
KL
406};
407
029cfd6b
TH
408static struct ata_port_operations nv_generic_ops = {
409 .inherits = &ata_bmdma_port_ops,
a1efdaba 410 .hardreset = nv_hardreset,
1da177e4
LT
411 .scr_read = nv_scr_read,
412 .scr_write = nv_scr_write,
1da177e4
LT
413};
414
029cfd6b
TH
415static struct ata_port_operations nv_nf2_ops = {
416 .inherits = &nv_generic_ops,
39f87582
TH
417 .freeze = nv_nf2_freeze,
418 .thaw = nv_nf2_thaw,
ada364e8
TH
419};
420
029cfd6b
TH
421static struct ata_port_operations nv_ck804_ops = {
422 .inherits = &nv_generic_ops,
39f87582
TH
423 .freeze = nv_ck804_freeze,
424 .thaw = nv_ck804_thaw,
ada364e8
TH
425 .host_stop = nv_ck804_host_stop,
426};
427
029cfd6b
TH
428static struct ata_port_operations nv_adma_ops = {
429 .inherits = &nv_generic_ops,
430
2dec7555 431 .check_atapi_dma = nv_adma_check_atapi_dma,
5682ed33 432 .sff_tf_read = nv_adma_tf_read,
31cc23b3 433 .qc_defer = ata_std_qc_defer,
fbbb262d
RH
434 .qc_prep = nv_adma_qc_prep,
435 .qc_issue = nv_adma_qc_issue,
5682ed33 436 .sff_irq_clear = nv_adma_irq_clear,
029cfd6b 437
53014e25
RH
438 .freeze = nv_adma_freeze,
439 .thaw = nv_adma_thaw,
fbbb262d 440 .error_handler = nv_adma_error_handler,
f5ecac2d 441 .post_internal_cmd = nv_adma_post_internal_cmd,
029cfd6b 442
fbbb262d
RH
443 .port_start = nv_adma_port_start,
444 .port_stop = nv_adma_port_stop,
438ac6d5 445#ifdef CONFIG_PM
cdf56bcf
RH
446 .port_suspend = nv_adma_port_suspend,
447 .port_resume = nv_adma_port_resume,
438ac6d5 448#endif
fbbb262d
RH
449 .host_stop = nv_adma_host_stop,
450};
451
029cfd6b
TH
452static struct ata_port_operations nv_swncq_ops = {
453 .inherits = &nv_generic_ops,
454
f140f0f1
KL
455 .qc_defer = ata_std_qc_defer,
456 .qc_prep = nv_swncq_qc_prep,
457 .qc_issue = nv_swncq_qc_issue,
029cfd6b 458
f140f0f1
KL
459 .freeze = nv_mcp55_freeze,
460 .thaw = nv_mcp55_thaw,
461 .error_handler = nv_swncq_error_handler,
029cfd6b 462
f140f0f1
KL
463#ifdef CONFIG_PM
464 .port_suspend = nv_swncq_port_suspend,
465 .port_resume = nv_swncq_port_resume,
466#endif
467 .port_start = nv_swncq_port_start,
468};
469
95947193
TH
470struct nv_pi_priv {
471 irq_handler_t irq_handler;
472 struct scsi_host_template *sht;
473};
474
475#define NV_PI_PRIV(_irq_handler, _sht) \
476 &(struct nv_pi_priv){ .irq_handler = _irq_handler, .sht = _sht }
477
1626aeb8 478static const struct ata_port_info nv_port_info[] = {
ada364e8
TH
479 /* generic */
480 {
0c88758b 481 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
ada364e8
TH
482 .pio_mask = NV_PIO_MASK,
483 .mwdma_mask = NV_MWDMA_MASK,
484 .udma_mask = NV_UDMA_MASK,
485 .port_ops = &nv_generic_ops,
95947193 486 .private_data = NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
ada364e8
TH
487 },
488 /* nforce2/3 */
489 {
0c88758b 490 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
ada364e8
TH
491 .pio_mask = NV_PIO_MASK,
492 .mwdma_mask = NV_MWDMA_MASK,
493 .udma_mask = NV_UDMA_MASK,
494 .port_ops = &nv_nf2_ops,
95947193 495 .private_data = NV_PI_PRIV(nv_nf2_interrupt, &nv_sht),
ada364e8
TH
496 },
497 /* ck804 */
498 {
0c88758b 499 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
ada364e8
TH
500 .pio_mask = NV_PIO_MASK,
501 .mwdma_mask = NV_MWDMA_MASK,
502 .udma_mask = NV_UDMA_MASK,
503 .port_ops = &nv_ck804_ops,
95947193 504 .private_data = NV_PI_PRIV(nv_ck804_interrupt, &nv_sht),
ada364e8 505 },
fbbb262d
RH
506 /* ADMA */
507 {
fbbb262d
RH
508 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
509 ATA_FLAG_MMIO | ATA_FLAG_NCQ,
510 .pio_mask = NV_PIO_MASK,
511 .mwdma_mask = NV_MWDMA_MASK,
512 .udma_mask = NV_UDMA_MASK,
513 .port_ops = &nv_adma_ops,
95947193 514 .private_data = NV_PI_PRIV(nv_adma_interrupt, &nv_adma_sht),
fbbb262d 515 },
f140f0f1
KL
516 /* SWNCQ */
517 {
f140f0f1
KL
518 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
519 ATA_FLAG_NCQ,
f140f0f1
KL
520 .pio_mask = NV_PIO_MASK,
521 .mwdma_mask = NV_MWDMA_MASK,
522 .udma_mask = NV_UDMA_MASK,
523 .port_ops = &nv_swncq_ops,
95947193 524 .private_data = NV_PI_PRIV(nv_swncq_interrupt, &nv_swncq_sht),
f140f0f1 525 },
1da177e4
LT
526};
527
528MODULE_AUTHOR("NVIDIA");
529MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
530MODULE_LICENSE("GPL");
531MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
532MODULE_VERSION(DRV_VERSION);
533
06993d22 534static int adma_enabled;
d21279f4 535static int swncq_enabled = 1;
fbbb262d 536
2dec7555
RH
537static void nv_adma_register_mode(struct ata_port *ap)
538{
2dec7555 539 struct nv_adma_port_priv *pp = ap->private_data;
cdf56bcf 540 void __iomem *mmio = pp->ctl_block;
a2cfe81a
RH
541 u16 tmp, status;
542 int count = 0;
2dec7555
RH
543
544 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
545 return;
546
a2cfe81a 547 status = readw(mmio + NV_ADMA_STAT);
2dcb407e 548 while (!(status & NV_ADMA_STAT_IDLE) && count < 20) {
a2cfe81a
RH
549 ndelay(50);
550 status = readw(mmio + NV_ADMA_STAT);
551 count++;
552 }
2dcb407e 553 if (count == 20)
a2cfe81a
RH
554 ata_port_printk(ap, KERN_WARNING,
555 "timeout waiting for ADMA IDLE, stat=0x%hx\n",
556 status);
557
2dec7555
RH
558 tmp = readw(mmio + NV_ADMA_CTL);
559 writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
560
a2cfe81a
RH
561 count = 0;
562 status = readw(mmio + NV_ADMA_STAT);
2dcb407e 563 while (!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
a2cfe81a
RH
564 ndelay(50);
565 status = readw(mmio + NV_ADMA_STAT);
566 count++;
567 }
2dcb407e 568 if (count == 20)
a2cfe81a
RH
569 ata_port_printk(ap, KERN_WARNING,
570 "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
571 status);
572
2dec7555
RH
573 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
574}
575
576static void nv_adma_mode(struct ata_port *ap)
577{
2dec7555 578 struct nv_adma_port_priv *pp = ap->private_data;
cdf56bcf 579 void __iomem *mmio = pp->ctl_block;
a2cfe81a
RH
580 u16 tmp, status;
581 int count = 0;
2dec7555
RH
582
583 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
584 return;
f20b16ff 585
2dec7555
RH
586 WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
587
588 tmp = readw(mmio + NV_ADMA_CTL);
589 writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
590
a2cfe81a 591 status = readw(mmio + NV_ADMA_STAT);
2dcb407e 592 while (((status & NV_ADMA_STAT_LEGACY) ||
a2cfe81a
RH
593 !(status & NV_ADMA_STAT_IDLE)) && count < 20) {
594 ndelay(50);
595 status = readw(mmio + NV_ADMA_STAT);
596 count++;
597 }
2dcb407e 598 if (count == 20)
a2cfe81a
RH
599 ata_port_printk(ap, KERN_WARNING,
600 "timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
601 status);
602
2dec7555
RH
603 pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
604}
605
fbbb262d
RH
606static int nv_adma_slave_config(struct scsi_device *sdev)
607{
608 struct ata_port *ap = ata_shost_to_port(sdev->host);
2dec7555 609 struct nv_adma_port_priv *pp = ap->private_data;
8959d300
RH
610 struct nv_adma_port_priv *port0, *port1;
611 struct scsi_device *sdev0, *sdev1;
2dec7555 612 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
8959d300 613 unsigned long segment_boundary, flags;
fbbb262d
RH
614 unsigned short sg_tablesize;
615 int rc;
2dec7555
RH
616 int adma_enable;
617 u32 current_reg, new_reg, config_mask;
fbbb262d
RH
618
619 rc = ata_scsi_slave_config(sdev);
620
621 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
622 /* Not a proper libata device, ignore */
623 return rc;
624
8959d300
RH
625 spin_lock_irqsave(ap->lock, flags);
626
9af5c9c9 627 if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) {
fbbb262d
RH
628 /*
629 * NVIDIA reports that ADMA mode does not support ATAPI commands.
630 * Therefore ATAPI commands are sent through the legacy interface.
631 * However, the legacy interface only supports 32-bit DMA.
632 * Restrict DMA parameters as required by the legacy interface
633 * when an ATAPI device is connected.
634 */
fbbb262d
RH
635 segment_boundary = ATA_DMA_BOUNDARY;
636 /* Subtract 1 since an extra entry may be needed for padding, see
637 libata-scsi.c */
638 sg_tablesize = LIBATA_MAX_PRD - 1;
f20b16ff 639
2dec7555
RH
640 /* Since the legacy DMA engine is in use, we need to disable ADMA
641 on the port. */
642 adma_enable = 0;
643 nv_adma_register_mode(ap);
2dcb407e 644 } else {
fbbb262d
RH
645 segment_boundary = NV_ADMA_DMA_BOUNDARY;
646 sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
2dec7555 647 adma_enable = 1;
fbbb262d 648 }
f20b16ff 649
2dec7555
RH
650 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg);
651
2dcb407e 652 if (ap->port_no == 1)
2dec7555
RH
653 config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
654 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
655 else
656 config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
657 NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
f20b16ff 658
2dcb407e 659 if (adma_enable) {
2dec7555
RH
660 new_reg = current_reg | config_mask;
661 pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
2dcb407e 662 } else {
2dec7555
RH
663 new_reg = current_reg & ~config_mask;
664 pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
665 }
f20b16ff 666
2dcb407e 667 if (current_reg != new_reg)
2dec7555 668 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
f20b16ff 669
8959d300
RH
670 port0 = ap->host->ports[0]->private_data;
671 port1 = ap->host->ports[1]->private_data;
672 sdev0 = ap->host->ports[0]->link.device[0].sdev;
673 sdev1 = ap->host->ports[1]->link.device[0].sdev;
674 if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
675 (port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
676 /** We have to set the DMA mask to 32-bit if either port is in
677 ATAPI mode, since they are on the same PCI device which is
678 used for DMA mapping. If we set the mask we also need to set
679 the bounce limit on both ports to ensure that the block
680 layer doesn't feed addresses that cause DMA mapping to
681 choke. If either SCSI device is not allocated yet, it's OK
682 since that port will discover its correct setting when it
683 does get allocated.
684 Note: Setting 32-bit mask should not fail. */
685 if (sdev0)
686 blk_queue_bounce_limit(sdev0->request_queue,
687 ATA_DMA_MASK);
688 if (sdev1)
689 blk_queue_bounce_limit(sdev1->request_queue,
690 ATA_DMA_MASK);
691
692 pci_set_dma_mask(pdev, ATA_DMA_MASK);
693 } else {
694 /** This shouldn't fail as it was set to this value before */
695 pci_set_dma_mask(pdev, pp->adma_dma_mask);
696 if (sdev0)
697 blk_queue_bounce_limit(sdev0->request_queue,
698 pp->adma_dma_mask);
699 if (sdev1)
700 blk_queue_bounce_limit(sdev1->request_queue,
701 pp->adma_dma_mask);
702 }
703
fbbb262d
RH
704 blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
705 blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize);
706 ata_port_printk(ap, KERN_INFO,
8959d300
RH
707 "DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
708 (unsigned long long)*ap->host->dev->dma_mask,
709 segment_boundary, sg_tablesize);
710
711 spin_unlock_irqrestore(ap->lock, flags);
712
fbbb262d
RH
713 return rc;
714}
715
2dec7555
RH
716static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
717{
718 struct nv_adma_port_priv *pp = qc->ap->private_data;
719 return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
720}
721
f2fb344b
RH
722static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
723{
3f3debdb
RH
724 /* Other than when internal or pass-through commands are executed,
725 the only time this function will be called in ADMA mode will be
726 if a command fails. In the failure case we don't care about going
727 into register mode with ADMA commands pending, as the commands will
728 all shortly be aborted anyway. We assume that NCQ commands are not
729 issued via passthrough, which is the only way that switching into
730 ADMA mode could abort outstanding commands. */
f2fb344b
RH
731 nv_adma_register_mode(ap);
732
9363c382 733 ata_sff_tf_read(ap, tf);
f2fb344b
RH
734}
735
2dec7555 736static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
fbbb262d
RH
737{
738 unsigned int idx = 0;
739
2dcb407e 740 if (tf->flags & ATA_TFLAG_ISADDR) {
ac3d6b86
RH
741 if (tf->flags & ATA_TFLAG_LBA48) {
742 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->hob_feature | WNB);
743 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
744 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->hob_lbal);
745 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->hob_lbam);
746 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->hob_lbah);
747 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature);
748 } else
749 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature | WNB);
a84471fe 750
ac3d6b86
RH
751 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->nsect);
752 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->lbal);
753 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->lbam);
754 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->lbah);
fbbb262d 755 }
a84471fe 756
2dcb407e 757 if (tf->flags & ATA_TFLAG_DEVICE)
ac3d6b86 758 cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device);
fbbb262d
RH
759
760 cpb[idx++] = cpu_to_le16((ATA_REG_CMD << 8) | tf->command | CMDEND);
a84471fe 761
2dcb407e 762 while (idx < 12)
ac3d6b86 763 cpb[idx++] = cpu_to_le16(IGN);
fbbb262d
RH
764
765 return idx;
766}
767
5bd28a4b 768static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
fbbb262d
RH
769{
770 struct nv_adma_port_priv *pp = ap->private_data;
2dec7555 771 u8 flags = pp->cpb[cpb_num].resp_flags;
fbbb262d
RH
772
773 VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
774
5bd28a4b
RH
775 if (unlikely((force_err ||
776 flags & (NV_CPB_RESP_ATA_ERR |
777 NV_CPB_RESP_CMD_ERR |
778 NV_CPB_RESP_CPB_ERR)))) {
9af5c9c9 779 struct ata_eh_info *ehi = &ap->link.eh_info;
5bd28a4b
RH
780 int freeze = 0;
781
782 ata_ehi_clear_desc(ehi);
2dcb407e 783 __ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x: ", flags);
5bd28a4b 784 if (flags & NV_CPB_RESP_ATA_ERR) {
b64bbc39 785 ata_ehi_push_desc(ehi, "ATA error");
5bd28a4b
RH
786 ehi->err_mask |= AC_ERR_DEV;
787 } else if (flags & NV_CPB_RESP_CMD_ERR) {
b64bbc39 788 ata_ehi_push_desc(ehi, "CMD error");
5bd28a4b
RH
789 ehi->err_mask |= AC_ERR_DEV;
790 } else if (flags & NV_CPB_RESP_CPB_ERR) {
b64bbc39 791 ata_ehi_push_desc(ehi, "CPB error");
5bd28a4b
RH
792 ehi->err_mask |= AC_ERR_SYSTEM;
793 freeze = 1;
794 } else {
795 /* notifier error, but no error in CPB flags? */
b64bbc39 796 ata_ehi_push_desc(ehi, "unknown");
5bd28a4b
RH
797 ehi->err_mask |= AC_ERR_OTHER;
798 freeze = 1;
799 }
800 /* Kill all commands. EH will determine what actually failed. */
801 if (freeze)
802 ata_port_freeze(ap);
803 else
804 ata_port_abort(ap);
805 return 1;
fbbb262d 806 }
5bd28a4b 807
f2fb344b 808 if (likely(flags & NV_CPB_RESP_DONE)) {
fbbb262d 809 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num);
5bd28a4b
RH
810 VPRINTK("CPB flags done, flags=0x%x\n", flags);
811 if (likely(qc)) {
2dcb407e 812 DPRINTK("Completing qc from tag %d\n", cpb_num);
fbbb262d 813 ata_qc_complete(qc);
2a54cf76 814 } else {
9af5c9c9 815 struct ata_eh_info *ehi = &ap->link.eh_info;
2a54cf76
RH
816 /* Notifier bits set without a command may indicate the drive
817 is misbehaving. Raise host state machine violation on this
818 condition. */
5796d1c4
JG
819 ata_port_printk(ap, KERN_ERR,
820 "notifier for tag %d with no cmd?\n",
821 cpb_num);
2a54cf76 822 ehi->err_mask |= AC_ERR_HSM;
cf480626 823 ehi->action |= ATA_EH_RESET;
2a54cf76
RH
824 ata_port_freeze(ap);
825 return 1;
fbbb262d
RH
826 }
827 }
5bd28a4b 828 return 0;
fbbb262d
RH
829}
830
2dec7555
RH
831static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
832{
9af5c9c9 833 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
2dec7555
RH
834
835 /* freeze if hotplugged */
836 if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
837 ata_port_freeze(ap);
838 return 1;
839 }
840
841 /* bail out if not our interrupt */
842 if (!(irq_stat & NV_INT_DEV))
843 return 0;
844
845 /* DEV interrupt w/ no active qc? */
846 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
9363c382 847 ata_sff_check_status(ap);
2dec7555
RH
848 return 1;
849 }
850
851 /* handle interrupt */
9363c382 852 return ata_sff_host_intr(ap, qc);
2dec7555
RH
853}
854
fbbb262d
RH
855static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
856{
857 struct ata_host *host = dev_instance;
858 int i, handled = 0;
2dec7555 859 u32 notifier_clears[2];
fbbb262d
RH
860
861 spin_lock(&host->lock);
862
863 for (i = 0; i < host->n_ports; i++) {
864 struct ata_port *ap = host->ports[i];
2dec7555 865 notifier_clears[i] = 0;
fbbb262d
RH
866
867 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
868 struct nv_adma_port_priv *pp = ap->private_data;
cdf56bcf 869 void __iomem *mmio = pp->ctl_block;
fbbb262d
RH
870 u16 status;
871 u32 gen_ctl;
fbbb262d 872 u32 notifier, notifier_error;
a617c09f 873
53014e25
RH
874 /* if ADMA is disabled, use standard ata interrupt handler */
875 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
876 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
877 >> (NV_INT_PORT_SHIFT * i);
878 handled += nv_host_intr(ap, irq_stat);
879 continue;
880 }
fbbb262d 881
53014e25 882 /* if in ATA register mode, check for standard interrupts */
fbbb262d 883 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
0d5ff566 884 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
2dec7555 885 >> (NV_INT_PORT_SHIFT * i);
2dcb407e 886 if (ata_tag_valid(ap->link.active_tag))
f740d168
RH
887 /** NV_INT_DEV indication seems unreliable at times
888 at least in ADMA mode. Force it on always when a
889 command is active, to prevent losing interrupts. */
890 irq_stat |= NV_INT_DEV;
2dec7555 891 handled += nv_host_intr(ap, irq_stat);
fbbb262d
RH
892 }
893
894 notifier = readl(mmio + NV_ADMA_NOTIFIER);
895 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
2dec7555 896 notifier_clears[i] = notifier | notifier_error;
fbbb262d 897
cdf56bcf 898 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
fbbb262d 899
2dcb407e 900 if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
fbbb262d
RH
901 !notifier_error)
902 /* Nothing to do */
903 continue;
904
905 status = readw(mmio + NV_ADMA_STAT);
906
907 /* Clear status. Ensure the controller sees the clearing before we start
908 looking at any of the CPB statuses, so that any CPB completions after
909 this point in the handler will raise another interrupt. */
910 writew(status, mmio + NV_ADMA_STAT);
911 readw(mmio + NV_ADMA_STAT); /* flush posted write */
912 rmb();
913
5bd28a4b
RH
914 handled++; /* irq handled if we got here */
915
916 /* freeze if hotplugged or controller error */
917 if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
918 NV_ADMA_STAT_HOTUNPLUG |
5278b50c
RH
919 NV_ADMA_STAT_TIMEOUT |
920 NV_ADMA_STAT_SERROR))) {
9af5c9c9 921 struct ata_eh_info *ehi = &ap->link.eh_info;
5bd28a4b
RH
922
923 ata_ehi_clear_desc(ehi);
2dcb407e 924 __ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status);
5bd28a4b
RH
925 if (status & NV_ADMA_STAT_TIMEOUT) {
926 ehi->err_mask |= AC_ERR_SYSTEM;
b64bbc39 927 ata_ehi_push_desc(ehi, "timeout");
5bd28a4b
RH
928 } else if (status & NV_ADMA_STAT_HOTPLUG) {
929 ata_ehi_hotplugged(ehi);
b64bbc39 930 ata_ehi_push_desc(ehi, "hotplug");
5bd28a4b
RH
931 } else if (status & NV_ADMA_STAT_HOTUNPLUG) {
932 ata_ehi_hotplugged(ehi);
b64bbc39 933 ata_ehi_push_desc(ehi, "hot unplug");
5278b50c
RH
934 } else if (status & NV_ADMA_STAT_SERROR) {
935 /* let libata analyze SError and figure out the cause */
b64bbc39
TH
936 ata_ehi_push_desc(ehi, "SError");
937 } else
938 ata_ehi_push_desc(ehi, "unknown");
fbbb262d 939 ata_port_freeze(ap);
fbbb262d
RH
940 continue;
941 }
942
5bd28a4b 943 if (status & (NV_ADMA_STAT_DONE |
a1fe7824
RH
944 NV_ADMA_STAT_CPBERR |
945 NV_ADMA_STAT_CMD_COMPLETE)) {
946 u32 check_commands = notifier_clears[i];
721449bf 947 int pos, error = 0;
8ba5e4cb 948
a1fe7824
RH
949 if (status & NV_ADMA_STAT_CPBERR) {
950 /* Check all active commands */
951 if (ata_tag_valid(ap->link.active_tag))
952 check_commands = 1 <<
953 ap->link.active_tag;
954 else
955 check_commands = ap->
956 link.sactive;
957 }
8ba5e4cb 958
fbbb262d 959 /** Check CPBs for completed commands */
721449bf
RH
960 while ((pos = ffs(check_commands)) && !error) {
961 pos--;
962 error = nv_adma_check_cpb(ap, pos,
5796d1c4
JG
963 notifier_error & (1 << pos));
964 check_commands &= ~(1 << pos);
fbbb262d
RH
965 }
966 }
fbbb262d
RH
967 }
968 }
f20b16ff 969
b447916e 970 if (notifier_clears[0] || notifier_clears[1]) {
2dec7555
RH
971 /* Note: Both notifier clear registers must be written
972 if either is set, even if one is zero, according to NVIDIA. */
cdf56bcf
RH
973 struct nv_adma_port_priv *pp = host->ports[0]->private_data;
974 writel(notifier_clears[0], pp->notifier_clear_block);
975 pp = host->ports[1]->private_data;
976 writel(notifier_clears[1], pp->notifier_clear_block);
2dec7555 977 }
fbbb262d
RH
978
979 spin_unlock(&host->lock);
980
981 return IRQ_RETVAL(handled);
982}
983
53014e25
RH
984static void nv_adma_freeze(struct ata_port *ap)
985{
986 struct nv_adma_port_priv *pp = ap->private_data;
987 void __iomem *mmio = pp->ctl_block;
988 u16 tmp;
989
990 nv_ck804_freeze(ap);
991
992 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
993 return;
994
995 /* clear any outstanding CK804 notifications */
2dcb407e 996 writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
53014e25
RH
997 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
998
999 /* Disable interrupt */
1000 tmp = readw(mmio + NV_ADMA_CTL);
2dcb407e 1001 writew(tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
53014e25 1002 mmio + NV_ADMA_CTL);
5796d1c4 1003 readw(mmio + NV_ADMA_CTL); /* flush posted write */
53014e25
RH
1004}
1005
1006static void nv_adma_thaw(struct ata_port *ap)
1007{
1008 struct nv_adma_port_priv *pp = ap->private_data;
1009 void __iomem *mmio = pp->ctl_block;
1010 u16 tmp;
1011
1012 nv_ck804_thaw(ap);
1013
1014 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1015 return;
1016
1017 /* Enable interrupt */
1018 tmp = readw(mmio + NV_ADMA_CTL);
2dcb407e 1019 writew(tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
53014e25 1020 mmio + NV_ADMA_CTL);
5796d1c4 1021 readw(mmio + NV_ADMA_CTL); /* flush posted write */
53014e25
RH
1022}
1023
fbbb262d
RH
1024static void nv_adma_irq_clear(struct ata_port *ap)
1025{
cdf56bcf
RH
1026 struct nv_adma_port_priv *pp = ap->private_data;
1027 void __iomem *mmio = pp->ctl_block;
53014e25 1028 u32 notifier_clears[2];
fbbb262d 1029
53014e25 1030 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
9363c382 1031 ata_sff_irq_clear(ap);
53014e25
RH
1032 return;
1033 }
1034
1035 /* clear any outstanding CK804 notifications */
2dcb407e 1036 writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
53014e25 1037 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
fbbb262d 1038
53014e25
RH
1039 /* clear ADMA status */
1040 writew(0xffff, mmio + NV_ADMA_STAT);
a617c09f 1041
53014e25
RH
1042 /* clear notifiers - note both ports need to be written with
1043 something even though we are only clearing on one */
1044 if (ap->port_no == 0) {
1045 notifier_clears[0] = 0xFFFFFFFF;
1046 notifier_clears[1] = 0;
1047 } else {
1048 notifier_clears[0] = 0;
1049 notifier_clears[1] = 0xFFFFFFFF;
1050 }
1051 pp = ap->host->ports[0]->private_data;
1052 writel(notifier_clears[0], pp->notifier_clear_block);
1053 pp = ap->host->ports[1]->private_data;
1054 writel(notifier_clears[1], pp->notifier_clear_block);
fbbb262d
RH
1055}
1056
f5ecac2d 1057static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
fbbb262d 1058{
f5ecac2d 1059 struct nv_adma_port_priv *pp = qc->ap->private_data;
fbbb262d 1060
b447916e 1061 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
9363c382 1062 ata_sff_post_internal_cmd(qc);
fbbb262d
RH
1063}
1064
1065static int nv_adma_port_start(struct ata_port *ap)
1066{
1067 struct device *dev = ap->host->dev;
1068 struct nv_adma_port_priv *pp;
1069 int rc;
1070 void *mem;
1071 dma_addr_t mem_dma;
cdf56bcf 1072 void __iomem *mmio;
8959d300 1073 struct pci_dev *pdev = to_pci_dev(dev);
fbbb262d
RH
1074 u16 tmp;
1075
1076 VPRINTK("ENTER\n");
1077
8959d300
RH
1078 /* Ensure DMA mask is set to 32-bit before allocating legacy PRD and
1079 pad buffers */
1080 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1081 if (rc)
1082 return rc;
1083 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1084 if (rc)
1085 return rc;
1086
fbbb262d
RH
1087 rc = ata_port_start(ap);
1088 if (rc)
1089 return rc;
1090
24dc5f33
TH
1091 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1092 if (!pp)
1093 return -ENOMEM;
fbbb262d 1094
0d5ff566 1095 mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
cdf56bcf
RH
1096 ap->port_no * NV_ADMA_PORT_SIZE;
1097 pp->ctl_block = mmio;
0d5ff566 1098 pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
cdf56bcf
RH
1099 pp->notifier_clear_block = pp->gen_block +
1100 NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
1101
8959d300
RH
1102 /* Now that the legacy PRD and padding buffer are allocated we can
1103 safely raise the DMA mask to allocate the CPB/APRD table.
1104 These are allowed to fail since we store the value that ends up
1105 being used to set as the bounce limit in slave_config later if
1106 needed. */
1107 pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1108 pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1109 pp->adma_dma_mask = *dev->dma_mask;
1110
24dc5f33
TH
1111 mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
1112 &mem_dma, GFP_KERNEL);
1113 if (!mem)
1114 return -ENOMEM;
fbbb262d
RH
1115 memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
1116
1117 /*
1118 * First item in chunk of DMA memory:
1119 * 128-byte command parameter block (CPB)
1120 * one for each command tag
1121 */
1122 pp->cpb = mem;
1123 pp->cpb_dma = mem_dma;
1124
1125 writel(mem_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
5796d1c4 1126 writel((mem_dma >> 16) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
fbbb262d
RH
1127
1128 mem += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1129 mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1130
1131 /*
1132 * Second item: block of ADMA_SGTBL_LEN s/g entries
1133 */
1134 pp->aprd = mem;
1135 pp->aprd_dma = mem_dma;
1136
1137 ap->private_data = pp;
1138
1139 /* clear any outstanding interrupt conditions */
1140 writew(0xffff, mmio + NV_ADMA_STAT);
1141
1142 /* initialize port variables */
1143 pp->flags = NV_ADMA_PORT_REGISTER_MODE;
1144
1145 /* clear CPB fetch count */
1146 writew(0, mmio + NV_ADMA_CPB_COUNT);
1147
cdf56bcf 1148 /* clear GO for register mode, enable interrupt */
fbbb262d 1149 tmp = readw(mmio + NV_ADMA_CTL);
5796d1c4
JG
1150 writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1151 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
fbbb262d
RH
1152
1153 tmp = readw(mmio + NV_ADMA_CTL);
1154 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
5796d1c4 1155 readw(mmio + NV_ADMA_CTL); /* flush posted write */
fbbb262d
RH
1156 udelay(1);
1157 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
5796d1c4 1158 readw(mmio + NV_ADMA_CTL); /* flush posted write */
fbbb262d
RH
1159
1160 return 0;
fbbb262d
RH
1161}
1162
1163static void nv_adma_port_stop(struct ata_port *ap)
1164{
fbbb262d 1165 struct nv_adma_port_priv *pp = ap->private_data;
cdf56bcf 1166 void __iomem *mmio = pp->ctl_block;
fbbb262d
RH
1167
1168 VPRINTK("ENTER\n");
fbbb262d 1169 writew(0, mmio + NV_ADMA_CTL);
fbbb262d
RH
1170}
1171
438ac6d5 1172#ifdef CONFIG_PM
cdf56bcf
RH
1173static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
1174{
1175 struct nv_adma_port_priv *pp = ap->private_data;
1176 void __iomem *mmio = pp->ctl_block;
1177
1178 /* Go to register mode - clears GO */
1179 nv_adma_register_mode(ap);
1180
1181 /* clear CPB fetch count */
1182 writew(0, mmio + NV_ADMA_CPB_COUNT);
1183
1184 /* disable interrupt, shut down port */
1185 writew(0, mmio + NV_ADMA_CTL);
1186
1187 return 0;
1188}
1189
1190static int nv_adma_port_resume(struct ata_port *ap)
1191{
1192 struct nv_adma_port_priv *pp = ap->private_data;
1193 void __iomem *mmio = pp->ctl_block;
1194 u16 tmp;
1195
1196 /* set CPB block location */
1197 writel(pp->cpb_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
5796d1c4 1198 writel((pp->cpb_dma >> 16) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
cdf56bcf
RH
1199
1200 /* clear any outstanding interrupt conditions */
1201 writew(0xffff, mmio + NV_ADMA_STAT);
1202
1203 /* initialize port variables */
1204 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
1205
1206 /* clear CPB fetch count */
1207 writew(0, mmio + NV_ADMA_CPB_COUNT);
1208
1209 /* clear GO for register mode, enable interrupt */
1210 tmp = readw(mmio + NV_ADMA_CTL);
5796d1c4
JG
1211 writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1212 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
cdf56bcf
RH
1213
1214 tmp = readw(mmio + NV_ADMA_CTL);
1215 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
5796d1c4 1216 readw(mmio + NV_ADMA_CTL); /* flush posted write */
cdf56bcf
RH
1217 udelay(1);
1218 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
5796d1c4 1219 readw(mmio + NV_ADMA_CTL); /* flush posted write */
cdf56bcf
RH
1220
1221 return 0;
1222}
438ac6d5 1223#endif
fbbb262d 1224
9a829ccf 1225static void nv_adma_setup_port(struct ata_port *ap)
fbbb262d 1226{
9a829ccf
TH
1227 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1228 struct ata_ioports *ioport = &ap->ioaddr;
fbbb262d
RH
1229
1230 VPRINTK("ENTER\n");
1231
9a829ccf 1232 mmio += NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE;
fbbb262d 1233
0d5ff566
TH
1234 ioport->cmd_addr = mmio;
1235 ioport->data_addr = mmio + (ATA_REG_DATA * 4);
fbbb262d 1236 ioport->error_addr =
0d5ff566
TH
1237 ioport->feature_addr = mmio + (ATA_REG_ERR * 4);
1238 ioport->nsect_addr = mmio + (ATA_REG_NSECT * 4);
1239 ioport->lbal_addr = mmio + (ATA_REG_LBAL * 4);
1240 ioport->lbam_addr = mmio + (ATA_REG_LBAM * 4);
1241 ioport->lbah_addr = mmio + (ATA_REG_LBAH * 4);
1242 ioport->device_addr = mmio + (ATA_REG_DEVICE * 4);
fbbb262d 1243 ioport->status_addr =
0d5ff566 1244 ioport->command_addr = mmio + (ATA_REG_STATUS * 4);
fbbb262d 1245 ioport->altstatus_addr =
0d5ff566 1246 ioport->ctl_addr = mmio + 0x20;
fbbb262d
RH
1247}
1248
9a829ccf 1249static int nv_adma_host_init(struct ata_host *host)
fbbb262d 1250{
9a829ccf 1251 struct pci_dev *pdev = to_pci_dev(host->dev);
fbbb262d
RH
1252 unsigned int i;
1253 u32 tmp32;
1254
1255 VPRINTK("ENTER\n");
1256
1257 /* enable ADMA on the ports */
1258 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1259 tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1260 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1261 NV_MCP_SATA_CFG_20_PORT1_EN |
1262 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1263
1264 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1265
9a829ccf
TH
1266 for (i = 0; i < host->n_ports; i++)
1267 nv_adma_setup_port(host->ports[i]);
fbbb262d 1268
fbbb262d
RH
1269 return 0;
1270}
1271
1272static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1273 struct scatterlist *sg,
1274 int idx,
1275 struct nv_adma_prd *aprd)
1276{
41949ed5 1277 u8 flags = 0;
fbbb262d
RH
1278 if (qc->tf.flags & ATA_TFLAG_WRITE)
1279 flags |= NV_APRD_WRITE;
1280 if (idx == qc->n_elem - 1)
1281 flags |= NV_APRD_END;
1282 else if (idx != 4)
1283 flags |= NV_APRD_CONT;
1284
1285 aprd->addr = cpu_to_le64(((u64)sg_dma_address(sg)));
1286 aprd->len = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
2dec7555 1287 aprd->flags = flags;
41949ed5 1288 aprd->packet_len = 0;
fbbb262d
RH
1289}
1290
1291static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1292{
1293 struct nv_adma_port_priv *pp = qc->ap->private_data;
fbbb262d
RH
1294 struct nv_adma_prd *aprd;
1295 struct scatterlist *sg;
ff2aeb1e 1296 unsigned int si;
fbbb262d
RH
1297
1298 VPRINTK("ENTER\n");
1299
ff2aeb1e
TH
1300 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1301 aprd = (si < 5) ? &cpb->aprd[si] :
1302 &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (si-5)];
1303 nv_adma_fill_aprd(qc, sg, si, aprd);
fbbb262d 1304 }
ff2aeb1e 1305 if (si > 5)
fbbb262d 1306 cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));
41949ed5
RH
1307 else
1308 cpb->next_aprd = cpu_to_le64(0);
fbbb262d
RH
1309}
1310
382a6652
RH
1311static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
1312{
1313 struct nv_adma_port_priv *pp = qc->ap->private_data;
1314
1315 /* ADMA engine can only be used for non-ATAPI DMA commands,
3f3debdb 1316 or interrupt-driven no-data commands. */
b447916e 1317 if ((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
3f3debdb 1318 (qc->tf.flags & ATA_TFLAG_POLLING))
382a6652
RH
1319 return 1;
1320
b447916e 1321 if ((qc->flags & ATA_QCFLAG_DMAMAP) ||
382a6652
RH
1322 (qc->tf.protocol == ATA_PROT_NODATA))
1323 return 0;
1324
1325 return 1;
1326}
1327
fbbb262d
RH
1328static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1329{
1330 struct nv_adma_port_priv *pp = qc->ap->private_data;
1331 struct nv_adma_cpb *cpb = &pp->cpb[qc->tag];
1332 u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
fbbb262d
RH
1333 NV_CPB_CTL_IEN;
1334
382a6652 1335 if (nv_adma_use_reg_mode(qc)) {
3f3debdb
RH
1336 BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1337 (qc->flags & ATA_QCFLAG_DMAMAP));
2dec7555 1338 nv_adma_register_mode(qc->ap);
9363c382 1339 ata_sff_qc_prep(qc);
fbbb262d
RH
1340 return;
1341 }
1342
41949ed5
RH
1343 cpb->resp_flags = NV_CPB_RESP_DONE;
1344 wmb();
1345 cpb->ctl_flags = 0;
1346 wmb();
fbbb262d
RH
1347
1348 cpb->len = 3;
1349 cpb->tag = qc->tag;
1350 cpb->next_cpb_idx = 0;
1351
1352 /* turn on NCQ flags for NCQ commands */
1353 if (qc->tf.protocol == ATA_PROT_NCQ)
1354 ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1355
cdf56bcf
RH
1356 VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1357
fbbb262d
RH
1358 nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1359
b447916e 1360 if (qc->flags & ATA_QCFLAG_DMAMAP) {
382a6652
RH
1361 nv_adma_fill_sg(qc, cpb);
1362 ctl_flags |= NV_CPB_CTL_APRD_VALID;
1363 } else
1364 memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
fbbb262d 1365
5796d1c4
JG
1366 /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID
1367 until we are finished filling in all of the contents */
fbbb262d
RH
1368 wmb();
1369 cpb->ctl_flags = ctl_flags;
41949ed5
RH
1370 wmb();
1371 cpb->resp_flags = 0;
fbbb262d
RH
1372}
1373
1374static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1375{
2dec7555 1376 struct nv_adma_port_priv *pp = qc->ap->private_data;
cdf56bcf 1377 void __iomem *mmio = pp->ctl_block;
5e5c74a5 1378 int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ);
fbbb262d
RH
1379
1380 VPRINTK("ENTER\n");
1381
3f3debdb
RH
1382 /* We can't handle result taskfile with NCQ commands, since
1383 retrieving the taskfile switches us out of ADMA mode and would abort
1384 existing commands. */
1385 if (unlikely(qc->tf.protocol == ATA_PROT_NCQ &&
1386 (qc->flags & ATA_QCFLAG_RESULT_TF))) {
1387 ata_dev_printk(qc->dev, KERN_ERR,
1388 "NCQ w/ RESULT_TF not allowed\n");
1389 return AC_ERR_SYSTEM;
1390 }
1391
382a6652 1392 if (nv_adma_use_reg_mode(qc)) {
fbbb262d 1393 /* use ATA register mode */
382a6652 1394 VPRINTK("using ATA register mode: 0x%lx\n", qc->flags);
3f3debdb
RH
1395 BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1396 (qc->flags & ATA_QCFLAG_DMAMAP));
fbbb262d 1397 nv_adma_register_mode(qc->ap);
9363c382 1398 return ata_sff_qc_issue(qc);
fbbb262d
RH
1399 } else
1400 nv_adma_mode(qc->ap);
1401
1402 /* write append register, command tag in lower 8 bits
1403 and (number of cpbs to append -1) in top 8 bits */
1404 wmb();
5e5c74a5 1405
b447916e 1406 if (curr_ncq != pp->last_issue_ncq) {
5796d1c4
JG
1407 /* Seems to need some delay before switching between NCQ and
1408 non-NCQ commands, else we get command timeouts and such. */
5e5c74a5
RH
1409 udelay(20);
1410 pp->last_issue_ncq = curr_ncq;
1411 }
1412
fbbb262d
RH
1413 writew(qc->tag, mmio + NV_ADMA_APPEND);
1414
5796d1c4 1415 DPRINTK("Issued tag %u\n", qc->tag);
fbbb262d
RH
1416
1417 return 0;
1418}
1419
7d12e780 1420static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1da177e4 1421{
cca3974e 1422 struct ata_host *host = dev_instance;
1da177e4
LT
1423 unsigned int i;
1424 unsigned int handled = 0;
1425 unsigned long flags;
1426
cca3974e 1427 spin_lock_irqsave(&host->lock, flags);
1da177e4 1428
cca3974e 1429 for (i = 0; i < host->n_ports; i++) {
1da177e4
LT
1430 struct ata_port *ap;
1431
cca3974e 1432 ap = host->ports[i];
c1389503 1433 if (ap &&
029f5468 1434 !(ap->flags & ATA_FLAG_DISABLED)) {
1da177e4
LT
1435 struct ata_queued_cmd *qc;
1436
9af5c9c9 1437 qc = ata_qc_from_tag(ap, ap->link.active_tag);
e50362ec 1438 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
9363c382 1439 handled += ata_sff_host_intr(ap, qc);
b887030a
AC
1440 else
1441 // No request pending? Clear interrupt status
1442 // anyway, in case there's one pending.
5682ed33 1443 ap->ops->sff_check_status(ap);
1da177e4
LT
1444 }
1445
1446 }
1447
cca3974e 1448 spin_unlock_irqrestore(&host->lock, flags);
1da177e4
LT
1449
1450 return IRQ_RETVAL(handled);
1451}
1452
cca3974e 1453static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
ada364e8
TH
1454{
1455 int i, handled = 0;
1456
cca3974e
JG
1457 for (i = 0; i < host->n_ports; i++) {
1458 struct ata_port *ap = host->ports[i];
ada364e8
TH
1459
1460 if (ap && !(ap->flags & ATA_FLAG_DISABLED))
1461 handled += nv_host_intr(ap, irq_stat);
1462
1463 irq_stat >>= NV_INT_PORT_SHIFT;
1464 }
1465
1466 return IRQ_RETVAL(handled);
1467}
1468
7d12e780 1469static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
ada364e8 1470{
cca3974e 1471 struct ata_host *host = dev_instance;
ada364e8
TH
1472 u8 irq_stat;
1473 irqreturn_t ret;
1474
cca3974e 1475 spin_lock(&host->lock);
0d5ff566 1476 irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
cca3974e
JG
1477 ret = nv_do_interrupt(host, irq_stat);
1478 spin_unlock(&host->lock);
ada364e8
TH
1479
1480 return ret;
1481}
1482
7d12e780 1483static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
ada364e8 1484{
cca3974e 1485 struct ata_host *host = dev_instance;
ada364e8
TH
1486 u8 irq_stat;
1487 irqreturn_t ret;
1488
cca3974e 1489 spin_lock(&host->lock);
0d5ff566 1490 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
cca3974e
JG
1491 ret = nv_do_interrupt(host, irq_stat);
1492 spin_unlock(&host->lock);
ada364e8
TH
1493
1494 return ret;
1495}
1496
da3dbb17 1497static int nv_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val)
1da177e4 1498{
1da177e4 1499 if (sc_reg > SCR_CONTROL)
da3dbb17 1500 return -EINVAL;
1da177e4 1501
da3dbb17
TH
1502 *val = ioread32(ap->ioaddr.scr_addr + (sc_reg * 4));
1503 return 0;
1da177e4
LT
1504}
1505
da3dbb17 1506static int nv_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val)
1da177e4 1507{
1da177e4 1508 if (sc_reg > SCR_CONTROL)
da3dbb17 1509 return -EINVAL;
1da177e4 1510
0d5ff566 1511 iowrite32(val, ap->ioaddr.scr_addr + (sc_reg * 4));
da3dbb17 1512 return 0;
1da177e4
LT
1513}
1514
39f87582
TH
1515static void nv_nf2_freeze(struct ata_port *ap)
1516{
0d5ff566 1517 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
39f87582
TH
1518 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1519 u8 mask;
1520
0d5ff566 1521 mask = ioread8(scr_addr + NV_INT_ENABLE);
39f87582 1522 mask &= ~(NV_INT_ALL << shift);
0d5ff566 1523 iowrite8(mask, scr_addr + NV_INT_ENABLE);
39f87582
TH
1524}
1525
1526static void nv_nf2_thaw(struct ata_port *ap)
1527{
0d5ff566 1528 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
39f87582
TH
1529 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1530 u8 mask;
1531
0d5ff566 1532 iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
39f87582 1533
0d5ff566 1534 mask = ioread8(scr_addr + NV_INT_ENABLE);
39f87582 1535 mask |= (NV_INT_MASK << shift);
0d5ff566 1536 iowrite8(mask, scr_addr + NV_INT_ENABLE);
39f87582
TH
1537}
1538
1539static void nv_ck804_freeze(struct ata_port *ap)
1540{
0d5ff566 1541 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
39f87582
TH
1542 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1543 u8 mask;
1544
1545 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1546 mask &= ~(NV_INT_ALL << shift);
1547 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1548}
1549
1550static void nv_ck804_thaw(struct ata_port *ap)
1551{
0d5ff566 1552 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
39f87582
TH
1553 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1554 u8 mask;
1555
1556 writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1557
1558 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1559 mask |= (NV_INT_MASK << shift);
1560 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1561}
1562
f140f0f1
KL
1563static void nv_mcp55_freeze(struct ata_port *ap)
1564{
1565 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1566 int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1567 u32 mask;
1568
1569 writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1570
1571 mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1572 mask &= ~(NV_INT_ALL_MCP55 << shift);
1573 writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
9363c382 1574 ata_sff_freeze(ap);
f140f0f1
KL
1575}
1576
1577static void nv_mcp55_thaw(struct ata_port *ap)
1578{
1579 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1580 int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1581 u32 mask;
1582
1583 writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1584
1585 mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1586 mask |= (NV_INT_MASK_MCP55 << shift);
1587 writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
9363c382 1588 ata_sff_thaw(ap);
f140f0f1
KL
1589}
1590
cc0680a5 1591static int nv_hardreset(struct ata_link *link, unsigned int *class,
d4b2bab4 1592 unsigned long deadline)
39f87582 1593{
a0b9f4bc 1594 int rc;
39f87582
TH
1595
1596 /* SATA hardreset fails to retrieve proper device signature on
a0b9f4bc
TH
1597 * some controllers. Request follow up SRST. For more info,
1598 * see http://bugzilla.kernel.org/show_bug.cgi?id=3352
39f87582 1599 */
a0b9f4bc
TH
1600 rc = sata_sff_hardreset(link, class, deadline);
1601 if (rc)
1602 return rc;
1603 return -EAGAIN;
39f87582
TH
1604}
1605
fbbb262d
RH
1606static void nv_adma_error_handler(struct ata_port *ap)
1607{
1608 struct nv_adma_port_priv *pp = ap->private_data;
b447916e 1609 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
cdf56bcf 1610 void __iomem *mmio = pp->ctl_block;
fbbb262d
RH
1611 int i;
1612 u16 tmp;
a84471fe 1613
b447916e 1614 if (ata_tag_valid(ap->link.active_tag) || ap->link.sactive) {
2cb27853
RH
1615 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1616 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1617 u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
1618 u32 status = readw(mmio + NV_ADMA_STAT);
08af7414
RH
1619 u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
1620 u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
2cb27853 1621
5796d1c4
JG
1622 ata_port_printk(ap, KERN_ERR,
1623 "EH in ADMA mode, notifier 0x%X "
08af7414
RH
1624 "notifier_error 0x%X gen_ctl 0x%X status 0x%X "
1625 "next cpb count 0x%X next cpb idx 0x%x\n",
1626 notifier, notifier_error, gen_ctl, status,
1627 cpb_count, next_cpb_idx);
2cb27853 1628
b447916e 1629 for (i = 0; i < NV_ADMA_MAX_CPBS; i++) {
2cb27853 1630 struct nv_adma_cpb *cpb = &pp->cpb[i];
b447916e 1631 if ((ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) ||
5796d1c4 1632 ap->link.sactive & (1 << i))
2cb27853
RH
1633 ata_port_printk(ap, KERN_ERR,
1634 "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1635 i, cpb->ctl_flags, cpb->resp_flags);
1636 }
1637 }
fbbb262d 1638
fbbb262d
RH
1639 /* Push us back into port register mode for error handling. */
1640 nv_adma_register_mode(ap);
1641
5796d1c4
JG
1642 /* Mark all of the CPBs as invalid to prevent them from
1643 being executed */
b447916e 1644 for (i = 0; i < NV_ADMA_MAX_CPBS; i++)
fbbb262d
RH
1645 pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1646
1647 /* clear CPB fetch count */
1648 writew(0, mmio + NV_ADMA_CPB_COUNT);
1649
1650 /* Reset channel */
1651 tmp = readw(mmio + NV_ADMA_CTL);
1652 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
b447916e 1653 readw(mmio + NV_ADMA_CTL); /* flush posted write */
fbbb262d
RH
1654 udelay(1);
1655 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
b447916e 1656 readw(mmio + NV_ADMA_CTL); /* flush posted write */
fbbb262d
RH
1657 }
1658
9363c382 1659 ata_sff_error_handler(ap);
fbbb262d
RH
1660}
1661
f140f0f1
KL
1662static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc)
1663{
1664 struct nv_swncq_port_priv *pp = ap->private_data;
1665 struct defer_queue *dq = &pp->defer_queue;
1666
1667 /* queue is full */
1668 WARN_ON(dq->tail - dq->head == ATA_MAX_QUEUE);
1669 dq->defer_bits |= (1 << qc->tag);
1670 dq->tag[dq->tail++ & (ATA_MAX_QUEUE - 1)] = qc->tag;
1671}
1672
1673static struct ata_queued_cmd *nv_swncq_qc_from_dq(struct ata_port *ap)
1674{
1675 struct nv_swncq_port_priv *pp = ap->private_data;
1676 struct defer_queue *dq = &pp->defer_queue;
1677 unsigned int tag;
1678
1679 if (dq->head == dq->tail) /* null queue */
1680 return NULL;
1681
1682 tag = dq->tag[dq->head & (ATA_MAX_QUEUE - 1)];
1683 dq->tag[dq->head++ & (ATA_MAX_QUEUE - 1)] = ATA_TAG_POISON;
1684 WARN_ON(!(dq->defer_bits & (1 << tag)));
1685 dq->defer_bits &= ~(1 << tag);
1686
1687 return ata_qc_from_tag(ap, tag);
1688}
1689
1690static void nv_swncq_fis_reinit(struct ata_port *ap)
1691{
1692 struct nv_swncq_port_priv *pp = ap->private_data;
1693
1694 pp->dhfis_bits = 0;
1695 pp->dmafis_bits = 0;
1696 pp->sdbfis_bits = 0;
1697 pp->ncq_flags = 0;
1698}
1699
1700static void nv_swncq_pp_reinit(struct ata_port *ap)
1701{
1702 struct nv_swncq_port_priv *pp = ap->private_data;
1703 struct defer_queue *dq = &pp->defer_queue;
1704
1705 dq->head = 0;
1706 dq->tail = 0;
1707 dq->defer_bits = 0;
1708 pp->qc_active = 0;
1709 pp->last_issue_tag = ATA_TAG_POISON;
1710 nv_swncq_fis_reinit(ap);
1711}
1712
1713static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis)
1714{
1715 struct nv_swncq_port_priv *pp = ap->private_data;
1716
1717 writew(fis, pp->irq_block);
1718}
1719
1720static void __ata_bmdma_stop(struct ata_port *ap)
1721{
1722 struct ata_queued_cmd qc;
1723
1724 qc.ap = ap;
1725 ata_bmdma_stop(&qc);
1726}
1727
1728static void nv_swncq_ncq_stop(struct ata_port *ap)
1729{
1730 struct nv_swncq_port_priv *pp = ap->private_data;
1731 unsigned int i;
1732 u32 sactive;
1733 u32 done_mask;
1734
1735 ata_port_printk(ap, KERN_ERR,
1736 "EH in SWNCQ mode,QC:qc_active 0x%X sactive 0x%X\n",
1737 ap->qc_active, ap->link.sactive);
1738 ata_port_printk(ap, KERN_ERR,
1739 "SWNCQ:qc_active 0x%X defer_bits 0x%X last_issue_tag 0x%x\n "
1740 "dhfis 0x%X dmafis 0x%X sdbfis 0x%X\n",
1741 pp->qc_active, pp->defer_queue.defer_bits, pp->last_issue_tag,
1742 pp->dhfis_bits, pp->dmafis_bits, pp->sdbfis_bits);
1743
1744 ata_port_printk(ap, KERN_ERR, "ATA_REG 0x%X ERR_REG 0x%X\n",
5682ed33 1745 ap->ops->sff_check_status(ap),
f140f0f1
KL
1746 ioread8(ap->ioaddr.error_addr));
1747
1748 sactive = readl(pp->sactive_block);
1749 done_mask = pp->qc_active ^ sactive;
1750
1751 ata_port_printk(ap, KERN_ERR, "tag : dhfis dmafis sdbfis sacitve\n");
1752 for (i = 0; i < ATA_MAX_QUEUE; i++) {
1753 u8 err = 0;
1754 if (pp->qc_active & (1 << i))
1755 err = 0;
1756 else if (done_mask & (1 << i))
1757 err = 1;
1758 else
1759 continue;
1760
1761 ata_port_printk(ap, KERN_ERR,
1762 "tag 0x%x: %01x %01x %01x %01x %s\n", i,
1763 (pp->dhfis_bits >> i) & 0x1,
1764 (pp->dmafis_bits >> i) & 0x1,
1765 (pp->sdbfis_bits >> i) & 0x1,
1766 (sactive >> i) & 0x1,
1767 (err ? "error! tag doesn't exit" : " "));
1768 }
1769
1770 nv_swncq_pp_reinit(ap);
5682ed33 1771 ap->ops->sff_irq_clear(ap);
f140f0f1
KL
1772 __ata_bmdma_stop(ap);
1773 nv_swncq_irq_clear(ap, 0xffff);
1774}
1775
1776static void nv_swncq_error_handler(struct ata_port *ap)
1777{
1778 struct ata_eh_context *ehc = &ap->link.eh_context;
1779
1780 if (ap->link.sactive) {
1781 nv_swncq_ncq_stop(ap);
cf480626 1782 ehc->i.action |= ATA_EH_RESET;
f140f0f1
KL
1783 }
1784
9363c382 1785 ata_sff_error_handler(ap);
f140f0f1
KL
1786}
1787
1788#ifdef CONFIG_PM
1789static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg)
1790{
1791 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1792 u32 tmp;
1793
1794 /* clear irq */
1795 writel(~0, mmio + NV_INT_STATUS_MCP55);
1796
1797 /* disable irq */
1798 writel(0, mmio + NV_INT_ENABLE_MCP55);
1799
1800 /* disable swncq */
1801 tmp = readl(mmio + NV_CTL_MCP55);
1802 tmp &= ~(NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ);
1803 writel(tmp, mmio + NV_CTL_MCP55);
1804
1805 return 0;
1806}
1807
1808static int nv_swncq_port_resume(struct ata_port *ap)
1809{
1810 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1811 u32 tmp;
1812
1813 /* clear irq */
1814 writel(~0, mmio + NV_INT_STATUS_MCP55);
1815
1816 /* enable irq */
1817 writel(0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1818
1819 /* enable swncq */
1820 tmp = readl(mmio + NV_CTL_MCP55);
1821 writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1822
1823 return 0;
1824}
1825#endif
1826
1827static void nv_swncq_host_init(struct ata_host *host)
1828{
1829 u32 tmp;
1830 void __iomem *mmio = host->iomap[NV_MMIO_BAR];
1831 struct pci_dev *pdev = to_pci_dev(host->dev);
1832 u8 regval;
1833
1834 /* disable ECO 398 */
1835 pci_read_config_byte(pdev, 0x7f, &regval);
1836 regval &= ~(1 << 7);
1837 pci_write_config_byte(pdev, 0x7f, regval);
1838
1839 /* enable swncq */
1840 tmp = readl(mmio + NV_CTL_MCP55);
1841 VPRINTK("HOST_CTL:0x%X\n", tmp);
1842 writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1843
1844 /* enable irq intr */
1845 tmp = readl(mmio + NV_INT_ENABLE_MCP55);
1846 VPRINTK("HOST_ENABLE:0x%X\n", tmp);
1847 writel(tmp | 0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1848
1849 /* clear port irq */
1850 writel(~0x0, mmio + NV_INT_STATUS_MCP55);
1851}
1852
1853static int nv_swncq_slave_config(struct scsi_device *sdev)
1854{
1855 struct ata_port *ap = ata_shost_to_port(sdev->host);
1856 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
1857 struct ata_device *dev;
1858 int rc;
1859 u8 rev;
1860 u8 check_maxtor = 0;
1861 unsigned char model_num[ATA_ID_PROD_LEN + 1];
1862
1863 rc = ata_scsi_slave_config(sdev);
1864 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
1865 /* Not a proper libata device, ignore */
1866 return rc;
1867
1868 dev = &ap->link.device[sdev->id];
1869 if (!(ap->flags & ATA_FLAG_NCQ) || dev->class == ATA_DEV_ATAPI)
1870 return rc;
1871
1872 /* if MCP51 and Maxtor, then disable ncq */
1873 if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA ||
1874 pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2)
1875 check_maxtor = 1;
1876
1877 /* if MCP55 and rev <= a2 and Maxtor, then disable ncq */
1878 if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA ||
1879 pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2) {
1880 pci_read_config_byte(pdev, 0x8, &rev);
1881 if (rev <= 0xa2)
1882 check_maxtor = 1;
1883 }
1884
1885 if (!check_maxtor)
1886 return rc;
1887
1888 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
1889
1890 if (strncmp(model_num, "Maxtor", 6) == 0) {
1891 ata_scsi_change_queue_depth(sdev, 1);
1892 ata_dev_printk(dev, KERN_NOTICE,
1893 "Disabling SWNCQ mode (depth %x)\n", sdev->queue_depth);
1894 }
1895
1896 return rc;
1897}
1898
1899static int nv_swncq_port_start(struct ata_port *ap)
1900{
1901 struct device *dev = ap->host->dev;
1902 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1903 struct nv_swncq_port_priv *pp;
1904 int rc;
1905
1906 rc = ata_port_start(ap);
1907 if (rc)
1908 return rc;
1909
1910 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1911 if (!pp)
1912 return -ENOMEM;
1913
1914 pp->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE,
1915 &pp->prd_dma, GFP_KERNEL);
1916 if (!pp->prd)
1917 return -ENOMEM;
1918 memset(pp->prd, 0, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE);
1919
1920 ap->private_data = pp;
1921 pp->sactive_block = ap->ioaddr.scr_addr + 4 * SCR_ACTIVE;
1922 pp->irq_block = mmio + NV_INT_STATUS_MCP55 + ap->port_no * 2;
1923 pp->tag_block = mmio + NV_NCQ_REG_MCP55 + ap->port_no * 2;
1924
1925 return 0;
1926}
1927
1928static void nv_swncq_qc_prep(struct ata_queued_cmd *qc)
1929{
1930 if (qc->tf.protocol != ATA_PROT_NCQ) {
9363c382 1931 ata_sff_qc_prep(qc);
f140f0f1
KL
1932 return;
1933 }
1934
1935 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1936 return;
1937
1938 nv_swncq_fill_sg(qc);
1939}
1940
1941static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
1942{
1943 struct ata_port *ap = qc->ap;
1944 struct scatterlist *sg;
f140f0f1
KL
1945 struct nv_swncq_port_priv *pp = ap->private_data;
1946 struct ata_prd *prd;
ff2aeb1e 1947 unsigned int si, idx;
f140f0f1
KL
1948
1949 prd = pp->prd + ATA_MAX_PRD * qc->tag;
1950
1951 idx = 0;
ff2aeb1e 1952 for_each_sg(qc->sg, sg, qc->n_elem, si) {
f140f0f1
KL
1953 u32 addr, offset;
1954 u32 sg_len, len;
1955
1956 addr = (u32)sg_dma_address(sg);
1957 sg_len = sg_dma_len(sg);
1958
1959 while (sg_len) {
1960 offset = addr & 0xffff;
1961 len = sg_len;
1962 if ((offset + sg_len) > 0x10000)
1963 len = 0x10000 - offset;
1964
1965 prd[idx].addr = cpu_to_le32(addr);
1966 prd[idx].flags_len = cpu_to_le32(len & 0xffff);
1967
1968 idx++;
1969 sg_len -= len;
1970 addr += len;
1971 }
1972 }
1973
ff2aeb1e 1974 prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
f140f0f1
KL
1975}
1976
1977static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap,
1978 struct ata_queued_cmd *qc)
1979{
1980 struct nv_swncq_port_priv *pp = ap->private_data;
1981
1982 if (qc == NULL)
1983 return 0;
1984
1985 DPRINTK("Enter\n");
1986
1987 writel((1 << qc->tag), pp->sactive_block);
1988 pp->last_issue_tag = qc->tag;
1989 pp->dhfis_bits &= ~(1 << qc->tag);
1990 pp->dmafis_bits &= ~(1 << qc->tag);
1991 pp->qc_active |= (0x1 << qc->tag);
1992
5682ed33
TH
1993 ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
1994 ap->ops->sff_exec_command(ap, &qc->tf);
f140f0f1
KL
1995
1996 DPRINTK("Issued tag %u\n", qc->tag);
1997
1998 return 0;
1999}
2000
2001static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc)
2002{
2003 struct ata_port *ap = qc->ap;
2004 struct nv_swncq_port_priv *pp = ap->private_data;
2005
2006 if (qc->tf.protocol != ATA_PROT_NCQ)
9363c382 2007 return ata_sff_qc_issue(qc);
f140f0f1
KL
2008
2009 DPRINTK("Enter\n");
2010
2011 if (!pp->qc_active)
2012 nv_swncq_issue_atacmd(ap, qc);
2013 else
2014 nv_swncq_qc_to_dq(ap, qc); /* add qc to defer queue */
2015
2016 return 0;
2017}
2018
2019static void nv_swncq_hotplug(struct ata_port *ap, u32 fis)
2020{
2021 u32 serror;
2022 struct ata_eh_info *ehi = &ap->link.eh_info;
2023
2024 ata_ehi_clear_desc(ehi);
2025
2026 /* AHCI needs SError cleared; otherwise, it might lock up */
2027 sata_scr_read(&ap->link, SCR_ERROR, &serror);
2028 sata_scr_write(&ap->link, SCR_ERROR, serror);
2029
2030 /* analyze @irq_stat */
2031 if (fis & NV_SWNCQ_IRQ_ADDED)
2032 ata_ehi_push_desc(ehi, "hot plug");
2033 else if (fis & NV_SWNCQ_IRQ_REMOVED)
2034 ata_ehi_push_desc(ehi, "hot unplug");
2035
2036 ata_ehi_hotplugged(ehi);
2037
2038 /* okay, let's hand over to EH */
2039 ehi->serror |= serror;
2040
2041 ata_port_freeze(ap);
2042}
2043
2044static int nv_swncq_sdbfis(struct ata_port *ap)
2045{
2046 struct ata_queued_cmd *qc;
2047 struct nv_swncq_port_priv *pp = ap->private_data;
2048 struct ata_eh_info *ehi = &ap->link.eh_info;
2049 u32 sactive;
2050 int nr_done = 0;
2051 u32 done_mask;
2052 int i;
2053 u8 host_stat;
2054 u8 lack_dhfis = 0;
2055
2056 host_stat = ap->ops->bmdma_status(ap);
2057 if (unlikely(host_stat & ATA_DMA_ERR)) {
2058 /* error when transfering data to/from memory */
2059 ata_ehi_clear_desc(ehi);
2060 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
2061 ehi->err_mask |= AC_ERR_HOST_BUS;
cf480626 2062 ehi->action |= ATA_EH_RESET;
f140f0f1
KL
2063 return -EINVAL;
2064 }
2065
5682ed33 2066 ap->ops->sff_irq_clear(ap);
f140f0f1
KL
2067 __ata_bmdma_stop(ap);
2068
2069 sactive = readl(pp->sactive_block);
2070 done_mask = pp->qc_active ^ sactive;
2071
2072 if (unlikely(done_mask & sactive)) {
2073 ata_ehi_clear_desc(ehi);
2074 ata_ehi_push_desc(ehi, "illegal SWNCQ:qc_active transition"
2075 "(%08x->%08x)", pp->qc_active, sactive);
2076 ehi->err_mask |= AC_ERR_HSM;
cf480626 2077 ehi->action |= ATA_EH_RESET;
f140f0f1
KL
2078 return -EINVAL;
2079 }
2080 for (i = 0; i < ATA_MAX_QUEUE; i++) {
2081 if (!(done_mask & (1 << i)))
2082 continue;
2083
2084 qc = ata_qc_from_tag(ap, i);
2085 if (qc) {
2086 ata_qc_complete(qc);
2087 pp->qc_active &= ~(1 << i);
2088 pp->dhfis_bits &= ~(1 << i);
2089 pp->dmafis_bits &= ~(1 << i);
2090 pp->sdbfis_bits |= (1 << i);
2091 nr_done++;
2092 }
2093 }
2094
2095 if (!ap->qc_active) {
2096 DPRINTK("over\n");
2097 nv_swncq_pp_reinit(ap);
2098 return nr_done;
2099 }
2100
2101 if (pp->qc_active & pp->dhfis_bits)
2102 return nr_done;
2103
2104 if ((pp->ncq_flags & ncq_saw_backout) ||
2105 (pp->qc_active ^ pp->dhfis_bits))
2106 /* if the controller cann't get a device to host register FIS,
2107 * The driver needs to reissue the new command.
2108 */
2109 lack_dhfis = 1;
2110
2111 DPRINTK("id 0x%x QC: qc_active 0x%x,"
2112 "SWNCQ:qc_active 0x%X defer_bits %X "
2113 "dhfis 0x%X dmafis 0x%X last_issue_tag %x\n",
2114 ap->print_id, ap->qc_active, pp->qc_active,
2115 pp->defer_queue.defer_bits, pp->dhfis_bits,
2116 pp->dmafis_bits, pp->last_issue_tag);
2117
2118 nv_swncq_fis_reinit(ap);
2119
2120 if (lack_dhfis) {
2121 qc = ata_qc_from_tag(ap, pp->last_issue_tag);
2122 nv_swncq_issue_atacmd(ap, qc);
2123 return nr_done;
2124 }
2125
2126 if (pp->defer_queue.defer_bits) {
2127 /* send deferral queue command */
2128 qc = nv_swncq_qc_from_dq(ap);
2129 WARN_ON(qc == NULL);
2130 nv_swncq_issue_atacmd(ap, qc);
2131 }
2132
2133 return nr_done;
2134}
2135
2136static inline u32 nv_swncq_tag(struct ata_port *ap)
2137{
2138 struct nv_swncq_port_priv *pp = ap->private_data;
2139 u32 tag;
2140
2141 tag = readb(pp->tag_block) >> 2;
2142 return (tag & 0x1f);
2143}
2144
2145static int nv_swncq_dmafis(struct ata_port *ap)
2146{
2147 struct ata_queued_cmd *qc;
2148 unsigned int rw;
2149 u8 dmactl;
2150 u32 tag;
2151 struct nv_swncq_port_priv *pp = ap->private_data;
2152
2153 __ata_bmdma_stop(ap);
2154 tag = nv_swncq_tag(ap);
2155
2156 DPRINTK("dma setup tag 0x%x\n", tag);
2157 qc = ata_qc_from_tag(ap, tag);
2158
2159 if (unlikely(!qc))
2160 return 0;
2161
2162 rw = qc->tf.flags & ATA_TFLAG_WRITE;
2163
2164 /* load PRD table addr. */
2165 iowrite32(pp->prd_dma + ATA_PRD_TBL_SZ * qc->tag,
2166 ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
2167
2168 /* specify data direction, triple-check start bit is clear */
2169 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2170 dmactl &= ~ATA_DMA_WR;
2171 if (!rw)
2172 dmactl |= ATA_DMA_WR;
2173
2174 iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2175
2176 return 1;
2177}
2178
2179static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis)
2180{
2181 struct nv_swncq_port_priv *pp = ap->private_data;
2182 struct ata_queued_cmd *qc;
2183 struct ata_eh_info *ehi = &ap->link.eh_info;
2184 u32 serror;
2185 u8 ata_stat;
2186 int rc = 0;
2187
5682ed33 2188 ata_stat = ap->ops->sff_check_status(ap);
f140f0f1
KL
2189 nv_swncq_irq_clear(ap, fis);
2190 if (!fis)
2191 return;
2192
2193 if (ap->pflags & ATA_PFLAG_FROZEN)
2194 return;
2195
2196 if (fis & NV_SWNCQ_IRQ_HOTPLUG) {
2197 nv_swncq_hotplug(ap, fis);
2198 return;
2199 }
2200
2201 if (!pp->qc_active)
2202 return;
2203
2204 if (ap->ops->scr_read(ap, SCR_ERROR, &serror))
2205 return;
2206 ap->ops->scr_write(ap, SCR_ERROR, serror);
2207
2208 if (ata_stat & ATA_ERR) {
2209 ata_ehi_clear_desc(ehi);
2210 ata_ehi_push_desc(ehi, "Ata error. fis:0x%X", fis);
2211 ehi->err_mask |= AC_ERR_DEV;
2212 ehi->serror |= serror;
cf480626 2213 ehi->action |= ATA_EH_RESET;
f140f0f1
KL
2214 ata_port_freeze(ap);
2215 return;
2216 }
2217
2218 if (fis & NV_SWNCQ_IRQ_BACKOUT) {
2219 /* If the IRQ is backout, driver must issue
2220 * the new command again some time later.
2221 */
2222 pp->ncq_flags |= ncq_saw_backout;
2223 }
2224
2225 if (fis & NV_SWNCQ_IRQ_SDBFIS) {
2226 pp->ncq_flags |= ncq_saw_sdb;
2227 DPRINTK("id 0x%x SWNCQ: qc_active 0x%X "
2228 "dhfis 0x%X dmafis 0x%X sactive 0x%X\n",
2229 ap->print_id, pp->qc_active, pp->dhfis_bits,
2230 pp->dmafis_bits, readl(pp->sactive_block));
2231 rc = nv_swncq_sdbfis(ap);
2232 if (rc < 0)
2233 goto irq_error;
2234 }
2235
2236 if (fis & NV_SWNCQ_IRQ_DHREGFIS) {
2237 /* The interrupt indicates the new command
2238 * was transmitted correctly to the drive.
2239 */
2240 pp->dhfis_bits |= (0x1 << pp->last_issue_tag);
2241 pp->ncq_flags |= ncq_saw_d2h;
2242 if (pp->ncq_flags & (ncq_saw_sdb | ncq_saw_backout)) {
2243 ata_ehi_push_desc(ehi, "illegal fis transaction");
2244 ehi->err_mask |= AC_ERR_HSM;
cf480626 2245 ehi->action |= ATA_EH_RESET;
f140f0f1
KL
2246 goto irq_error;
2247 }
2248
2249 if (!(fis & NV_SWNCQ_IRQ_DMASETUP) &&
2250 !(pp->ncq_flags & ncq_saw_dmas)) {
5682ed33 2251 ata_stat = ap->ops->sff_check_status(ap);
f140f0f1
KL
2252 if (ata_stat & ATA_BUSY)
2253 goto irq_exit;
2254
2255 if (pp->defer_queue.defer_bits) {
2256 DPRINTK("send next command\n");
2257 qc = nv_swncq_qc_from_dq(ap);
2258 nv_swncq_issue_atacmd(ap, qc);
2259 }
2260 }
2261 }
2262
2263 if (fis & NV_SWNCQ_IRQ_DMASETUP) {
2264 /* program the dma controller with appropriate PRD buffers
2265 * and start the DMA transfer for requested command.
2266 */
2267 pp->dmafis_bits |= (0x1 << nv_swncq_tag(ap));
2268 pp->ncq_flags |= ncq_saw_dmas;
2269 rc = nv_swncq_dmafis(ap);
2270 }
2271
2272irq_exit:
2273 return;
2274irq_error:
2275 ata_ehi_push_desc(ehi, "fis:0x%x", fis);
2276 ata_port_freeze(ap);
2277 return;
2278}
2279
2280static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance)
2281{
2282 struct ata_host *host = dev_instance;
2283 unsigned int i;
2284 unsigned int handled = 0;
2285 unsigned long flags;
2286 u32 irq_stat;
2287
2288 spin_lock_irqsave(&host->lock, flags);
2289
2290 irq_stat = readl(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_MCP55);
2291
2292 for (i = 0; i < host->n_ports; i++) {
2293 struct ata_port *ap = host->ports[i];
2294
2295 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
2296 if (ap->link.sactive) {
2297 nv_swncq_host_interrupt(ap, (u16)irq_stat);
2298 handled = 1;
2299 } else {
2300 if (irq_stat) /* reserve Hotplug */
2301 nv_swncq_irq_clear(ap, 0xfff0);
2302
2303 handled += nv_host_intr(ap, (u8)irq_stat);
2304 }
2305 }
2306 irq_stat >>= NV_INT_PORT_SHIFT_MCP55;
2307 }
2308
2309 spin_unlock_irqrestore(&host->lock, flags);
2310
2311 return IRQ_RETVAL(handled);
2312}
2313
5796d1c4 2314static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1da177e4 2315{
5796d1c4 2316 static int printed_version;
1626aeb8 2317 const struct ata_port_info *ppi[] = { NULL, NULL };
95947193 2318 struct nv_pi_priv *ipriv;
9a829ccf 2319 struct ata_host *host;
cdf56bcf 2320 struct nv_host_priv *hpriv;
1da177e4
LT
2321 int rc;
2322 u32 bar;
0d5ff566 2323 void __iomem *base;
fbbb262d 2324 unsigned long type = ent->driver_data;
1da177e4
LT
2325
2326 // Make sure this is a SATA controller by counting the number of bars
2327 // (NVIDIA SATA controllers will always have six bars). Otherwise,
2328 // it's an IDE controller and we ignore it.
5796d1c4 2329 for (bar = 0; bar < 6; bar++)
1da177e4
LT
2330 if (pci_resource_start(pdev, bar) == 0)
2331 return -ENODEV;
2332
cdf56bcf 2333 if (!printed_version++)
a9524a76 2334 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
1da177e4 2335
24dc5f33 2336 rc = pcim_enable_device(pdev);
1da177e4 2337 if (rc)
24dc5f33 2338 return rc;
1da177e4 2339
9a829ccf 2340 /* determine type and allocate host */
f140f0f1 2341 if (type == CK804 && adma_enabled) {
fbbb262d
RH
2342 dev_printk(KERN_NOTICE, &pdev->dev, "Using ADMA mode\n");
2343 type = ADMA;
fbbb262d
RH
2344 }
2345
360737a9
JG
2346 if (type == SWNCQ) {
2347 if (swncq_enabled)
2348 dev_printk(KERN_NOTICE, &pdev->dev,
2349 "Using SWNCQ mode\n");
2350 else
2351 type = GENERIC;
2352 }
2353
1626aeb8 2354 ppi[0] = &nv_port_info[type];
95947193 2355 ipriv = ppi[0]->private_data;
9363c382 2356 rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
9a829ccf
TH
2357 if (rc)
2358 return rc;
1da177e4 2359
24dc5f33 2360 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
cdf56bcf 2361 if (!hpriv)
24dc5f33 2362 return -ENOMEM;
9a829ccf
TH
2363 hpriv->type = type;
2364 host->private_data = hpriv;
cdf56bcf 2365
9a829ccf
TH
2366 /* request and iomap NV_MMIO_BAR */
2367 rc = pcim_iomap_regions(pdev, 1 << NV_MMIO_BAR, DRV_NAME);
2368 if (rc)
2369 return rc;
1da177e4 2370
9a829ccf
TH
2371 /* configure SCR access */
2372 base = host->iomap[NV_MMIO_BAR];
2373 host->ports[0]->ioaddr.scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
2374 host->ports[1]->ioaddr.scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
1da177e4 2375
ada364e8 2376 /* enable SATA space for CK804 */
fbbb262d 2377 if (type >= CK804) {
ada364e8
TH
2378 u8 regval;
2379
2380 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2381 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2382 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2383 }
2384
9a829ccf 2385 /* init ADMA */
fbbb262d 2386 if (type == ADMA) {
9a829ccf 2387 rc = nv_adma_host_init(host);
fbbb262d 2388 if (rc)
24dc5f33 2389 return rc;
360737a9 2390 } else if (type == SWNCQ)
f140f0f1 2391 nv_swncq_host_init(host);
fbbb262d 2392
9a829ccf 2393 pci_set_master(pdev);
95947193
TH
2394 return ata_host_activate(host, pdev->irq, ipriv->irq_handler,
2395 IRQF_SHARED, ipriv->sht);
1da177e4
LT
2396}
2397
438ac6d5 2398#ifdef CONFIG_PM
cdf56bcf
RH
2399static int nv_pci_device_resume(struct pci_dev *pdev)
2400{
2401 struct ata_host *host = dev_get_drvdata(&pdev->dev);
2402 struct nv_host_priv *hpriv = host->private_data;
ce053fa8 2403 int rc;
cdf56bcf 2404
ce053fa8 2405 rc = ata_pci_device_do_resume(pdev);
b447916e 2406 if (rc)
ce053fa8 2407 return rc;
cdf56bcf
RH
2408
2409 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
b447916e 2410 if (hpriv->type >= CK804) {
cdf56bcf
RH
2411 u8 regval;
2412
2413 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2414 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2415 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2416 }
b447916e 2417 if (hpriv->type == ADMA) {
cdf56bcf
RH
2418 u32 tmp32;
2419 struct nv_adma_port_priv *pp;
2420 /* enable/disable ADMA on the ports appropriately */
2421 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2422
2423 pp = host->ports[0]->private_data;
b447916e 2424 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
cdf56bcf 2425 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
5796d1c4 2426 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
cdf56bcf
RH
2427 else
2428 tmp32 |= (NV_MCP_SATA_CFG_20_PORT0_EN |
5796d1c4 2429 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
cdf56bcf 2430 pp = host->ports[1]->private_data;
b447916e 2431 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
cdf56bcf 2432 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
5796d1c4 2433 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
cdf56bcf
RH
2434 else
2435 tmp32 |= (NV_MCP_SATA_CFG_20_PORT1_EN |
5796d1c4 2436 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
cdf56bcf
RH
2437
2438 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2439 }
2440 }
2441
2442 ata_host_resume(host);
2443
2444 return 0;
2445}
438ac6d5 2446#endif
cdf56bcf 2447
cca3974e 2448static void nv_ck804_host_stop(struct ata_host *host)
ada364e8 2449{
cca3974e 2450 struct pci_dev *pdev = to_pci_dev(host->dev);
ada364e8
TH
2451 u8 regval;
2452
2453 /* disable SATA space for CK804 */
2454 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2455 regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2456 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
ada364e8
TH
2457}
2458
fbbb262d
RH
2459static void nv_adma_host_stop(struct ata_host *host)
2460{
2461 struct pci_dev *pdev = to_pci_dev(host->dev);
fbbb262d
RH
2462 u32 tmp32;
2463
fbbb262d
RH
2464 /* disable ADMA on the ports */
2465 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2466 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2467 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
2468 NV_MCP_SATA_CFG_20_PORT1_EN |
2469 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2470
2471 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2472
2473 nv_ck804_host_stop(host);
2474}
2475
1da177e4
LT
2476static int __init nv_init(void)
2477{
b7887196 2478 return pci_register_driver(&nv_pci_driver);
1da177e4
LT
2479}
2480
2481static void __exit nv_exit(void)
2482{
2483 pci_unregister_driver(&nv_pci_driver);
2484}
2485
2486module_init(nv_init);
2487module_exit(nv_exit);
fbbb262d
RH
2488module_param_named(adma, adma_enabled, bool, 0444);
2489MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: true)");
f140f0f1 2490module_param_named(swncq, swncq_enabled, bool, 0444);
d21279f4 2491MODULE_PARM_DESC(swncq, "Enable use of SWNCQ (Default: true)");
f140f0f1 2492
This page took 0.591667 seconds and 5 git commands to generate.