libata: convert to iomap
[deliverable/linux.git] / drivers / ata / sata_nv.c
CommitLineData
1da177e4
LT
1/*
2 * sata_nv.c - NVIDIA nForce SATA
3 *
4 * Copyright 2004 NVIDIA Corp. All rights reserved.
5 * Copyright 2004 Andrew Chew
6 *
aa7e16d6
JG
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
1da177e4 21 *
af36d7f0
JG
22 *
23 * libata documentation is available via 'make {ps|pdf}docs',
24 * as Documentation/DocBook/libata.*
25 *
26 * No hardware documentation available outside of NVIDIA.
27 * This driver programs the NVIDIA SATA controller in a similar
28 * fashion as with other PCI IDE BMDMA controllers, with a few
29 * NV-specific details such as register offsets, SATA phy location,
30 * hotplug info, etc.
31 *
fbbb262d
RH
32 * CK804/MCP04 controllers support an alternate programming interface
33 * similar to the ADMA specification (with some modifications).
34 * This allows the use of NCQ. Non-DMA-mapped ATA commands are still
35 * sent through the legacy interface.
36 *
1da177e4
LT
37 */
38
1da177e4
LT
39#include <linux/kernel.h>
40#include <linux/module.h>
41#include <linux/pci.h>
42#include <linux/init.h>
43#include <linux/blkdev.h>
44#include <linux/delay.h>
45#include <linux/interrupt.h>
a9524a76 46#include <linux/device.h>
1da177e4 47#include <scsi/scsi_host.h>
fbbb262d 48#include <scsi/scsi_device.h>
1da177e4
LT
49#include <linux/libata.h>
50
51#define DRV_NAME "sata_nv"
cdf56bcf 52#define DRV_VERSION "3.3"
fbbb262d
RH
53
54#define NV_ADMA_DMA_BOUNDARY 0xffffffffUL
1da177e4 55
10ad05df 56enum {
0d5ff566
TH
57 NV_MMIO_BAR = 5,
58
10ad05df
JG
59 NV_PORTS = 2,
60 NV_PIO_MASK = 0x1f,
61 NV_MWDMA_MASK = 0x07,
62 NV_UDMA_MASK = 0x7f,
63 NV_PORT0_SCR_REG_OFFSET = 0x00,
64 NV_PORT1_SCR_REG_OFFSET = 0x40,
1da177e4 65
27e4b274 66 /* INT_STATUS/ENABLE */
10ad05df 67 NV_INT_STATUS = 0x10,
10ad05df 68 NV_INT_ENABLE = 0x11,
27e4b274 69 NV_INT_STATUS_CK804 = 0x440,
10ad05df 70 NV_INT_ENABLE_CK804 = 0x441,
1da177e4 71
27e4b274
TH
72 /* INT_STATUS/ENABLE bits */
73 NV_INT_DEV = 0x01,
74 NV_INT_PM = 0x02,
75 NV_INT_ADDED = 0x04,
76 NV_INT_REMOVED = 0x08,
77
78 NV_INT_PORT_SHIFT = 4, /* each port occupies 4 bits */
79
39f87582 80 NV_INT_ALL = 0x0f,
5a44efff
TH
81 NV_INT_MASK = NV_INT_DEV |
82 NV_INT_ADDED | NV_INT_REMOVED,
39f87582 83
27e4b274 84 /* INT_CONFIG */
10ad05df
JG
85 NV_INT_CONFIG = 0x12,
86 NV_INT_CONFIG_METHD = 0x01, // 0 = INT, 1 = SMI
1da177e4 87
10ad05df
JG
88 // For PCI config register 20
89 NV_MCP_SATA_CFG_20 = 0x50,
90 NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
fbbb262d
RH
91 NV_MCP_SATA_CFG_20_PORT0_EN = (1 << 17),
92 NV_MCP_SATA_CFG_20_PORT1_EN = (1 << 16),
93 NV_MCP_SATA_CFG_20_PORT0_PWB_EN = (1 << 14),
94 NV_MCP_SATA_CFG_20_PORT1_PWB_EN = (1 << 12),
95
96 NV_ADMA_MAX_CPBS = 32,
97 NV_ADMA_CPB_SZ = 128,
98 NV_ADMA_APRD_SZ = 16,
99 NV_ADMA_SGTBL_LEN = (1024 - NV_ADMA_CPB_SZ) /
100 NV_ADMA_APRD_SZ,
101 NV_ADMA_SGTBL_TOTAL_LEN = NV_ADMA_SGTBL_LEN + 5,
102 NV_ADMA_SGTBL_SZ = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
103 NV_ADMA_PORT_PRIV_DMA_SZ = NV_ADMA_MAX_CPBS *
104 (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
105
106 /* BAR5 offset to ADMA general registers */
107 NV_ADMA_GEN = 0x400,
108 NV_ADMA_GEN_CTL = 0x00,
109 NV_ADMA_NOTIFIER_CLEAR = 0x30,
110
111 /* BAR5 offset to ADMA ports */
112 NV_ADMA_PORT = 0x480,
113
114 /* size of ADMA port register space */
115 NV_ADMA_PORT_SIZE = 0x100,
116
117 /* ADMA port registers */
118 NV_ADMA_CTL = 0x40,
119 NV_ADMA_CPB_COUNT = 0x42,
120 NV_ADMA_NEXT_CPB_IDX = 0x43,
121 NV_ADMA_STAT = 0x44,
122 NV_ADMA_CPB_BASE_LOW = 0x48,
123 NV_ADMA_CPB_BASE_HIGH = 0x4C,
124 NV_ADMA_APPEND = 0x50,
125 NV_ADMA_NOTIFIER = 0x68,
126 NV_ADMA_NOTIFIER_ERROR = 0x6C,
127
128 /* NV_ADMA_CTL register bits */
129 NV_ADMA_CTL_HOTPLUG_IEN = (1 << 0),
130 NV_ADMA_CTL_CHANNEL_RESET = (1 << 5),
131 NV_ADMA_CTL_GO = (1 << 7),
132 NV_ADMA_CTL_AIEN = (1 << 8),
133 NV_ADMA_CTL_READ_NON_COHERENT = (1 << 11),
134 NV_ADMA_CTL_WRITE_NON_COHERENT = (1 << 12),
135
136 /* CPB response flag bits */
137 NV_CPB_RESP_DONE = (1 << 0),
138 NV_CPB_RESP_ATA_ERR = (1 << 3),
139 NV_CPB_RESP_CMD_ERR = (1 << 4),
140 NV_CPB_RESP_CPB_ERR = (1 << 7),
141
142 /* CPB control flag bits */
143 NV_CPB_CTL_CPB_VALID = (1 << 0),
144 NV_CPB_CTL_QUEUE = (1 << 1),
145 NV_CPB_CTL_APRD_VALID = (1 << 2),
146 NV_CPB_CTL_IEN = (1 << 3),
147 NV_CPB_CTL_FPDMA = (1 << 4),
148
149 /* APRD flags */
150 NV_APRD_WRITE = (1 << 1),
151 NV_APRD_END = (1 << 2),
152 NV_APRD_CONT = (1 << 3),
153
154 /* NV_ADMA_STAT flags */
155 NV_ADMA_STAT_TIMEOUT = (1 << 0),
156 NV_ADMA_STAT_HOTUNPLUG = (1 << 1),
157 NV_ADMA_STAT_HOTPLUG = (1 << 2),
158 NV_ADMA_STAT_CPBERR = (1 << 4),
159 NV_ADMA_STAT_SERROR = (1 << 5),
160 NV_ADMA_STAT_CMD_COMPLETE = (1 << 6),
161 NV_ADMA_STAT_IDLE = (1 << 8),
162 NV_ADMA_STAT_LEGACY = (1 << 9),
163 NV_ADMA_STAT_STOPPED = (1 << 10),
164 NV_ADMA_STAT_DONE = (1 << 12),
165 NV_ADMA_STAT_ERR = NV_ADMA_STAT_CPBERR |
166 NV_ADMA_STAT_TIMEOUT,
167
168 /* port flags */
169 NV_ADMA_PORT_REGISTER_MODE = (1 << 0),
2dec7555 170 NV_ADMA_ATAPI_SETUP_COMPLETE = (1 << 1),
fbbb262d
RH
171
172};
173
174/* ADMA Physical Region Descriptor - one SG segment */
175struct nv_adma_prd {
176 __le64 addr;
177 __le32 len;
178 u8 flags;
179 u8 packet_len;
180 __le16 reserved;
181};
182
183enum nv_adma_regbits {
184 CMDEND = (1 << 15), /* end of command list */
185 WNB = (1 << 14), /* wait-not-BSY */
186 IGN = (1 << 13), /* ignore this entry */
187 CS1n = (1 << (4 + 8)), /* std. PATA signals follow... */
188 DA2 = (1 << (2 + 8)),
189 DA1 = (1 << (1 + 8)),
190 DA0 = (1 << (0 + 8)),
191};
192
193/* ADMA Command Parameter Block
194 The first 5 SG segments are stored inside the Command Parameter Block itself.
195 If there are more than 5 segments the remainder are stored in a separate
196 memory area indicated by next_aprd. */
197struct nv_adma_cpb {
198 u8 resp_flags; /* 0 */
199 u8 reserved1; /* 1 */
200 u8 ctl_flags; /* 2 */
201 /* len is length of taskfile in 64 bit words */
202 u8 len; /* 3 */
203 u8 tag; /* 4 */
204 u8 next_cpb_idx; /* 5 */
205 __le16 reserved2; /* 6-7 */
206 __le16 tf[12]; /* 8-31 */
207 struct nv_adma_prd aprd[5]; /* 32-111 */
208 __le64 next_aprd; /* 112-119 */
209 __le64 reserved3; /* 120-127 */
10ad05df 210};
1da177e4 211
fbbb262d
RH
212
213struct nv_adma_port_priv {
214 struct nv_adma_cpb *cpb;
215 dma_addr_t cpb_dma;
216 struct nv_adma_prd *aprd;
217 dma_addr_t aprd_dma;
cdf56bcf
RH
218 void __iomem * ctl_block;
219 void __iomem * gen_block;
220 void __iomem * notifier_clear_block;
fbbb262d
RH
221 u8 flags;
222};
223
cdf56bcf
RH
224struct nv_host_priv {
225 unsigned long type;
226};
227
fbbb262d
RH
228#define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & ( 1 << (19 + (12 * (PORT)))))
229
1da177e4 230static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
cdf56bcf
RH
231static void nv_remove_one (struct pci_dev *pdev);
232static int nv_pci_device_resume(struct pci_dev *pdev);
cca3974e 233static void nv_ck804_host_stop(struct ata_host *host);
7d12e780
DH
234static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
235static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
236static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
1da177e4
LT
237static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg);
238static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
1da177e4 239
39f87582
TH
240static void nv_nf2_freeze(struct ata_port *ap);
241static void nv_nf2_thaw(struct ata_port *ap);
242static void nv_ck804_freeze(struct ata_port *ap);
243static void nv_ck804_thaw(struct ata_port *ap);
244static void nv_error_handler(struct ata_port *ap);
fbbb262d 245static int nv_adma_slave_config(struct scsi_device *sdev);
2dec7555 246static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
fbbb262d
RH
247static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
248static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
249static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
250static void nv_adma_irq_clear(struct ata_port *ap);
251static int nv_adma_port_start(struct ata_port *ap);
252static void nv_adma_port_stop(struct ata_port *ap);
cdf56bcf
RH
253static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
254static int nv_adma_port_resume(struct ata_port *ap);
fbbb262d
RH
255static void nv_adma_error_handler(struct ata_port *ap);
256static void nv_adma_host_stop(struct ata_host *host);
257static void nv_adma_bmdma_setup(struct ata_queued_cmd *qc);
258static void nv_adma_bmdma_start(struct ata_queued_cmd *qc);
259static void nv_adma_bmdma_stop(struct ata_queued_cmd *qc);
260static u8 nv_adma_bmdma_status(struct ata_port *ap);
39f87582 261
1da177e4
LT
262enum nv_host_type
263{
264 GENERIC,
265 NFORCE2,
27e4b274 266 NFORCE3 = NFORCE2, /* NF2 == NF3 as far as sata_nv is concerned */
fbbb262d
RH
267 CK804,
268 ADMA
1da177e4
LT
269};
270
3b7d697d 271static const struct pci_device_id nv_pci_tbl[] = {
54bb3a94
JG
272 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
273 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
274 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
275 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
276 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
277 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
278 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
279 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), GENERIC },
280 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), GENERIC },
281 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), GENERIC },
282 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), GENERIC },
283 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
284 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
285 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
1da177e4
LT
286 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
287 PCI_ANY_ID, PCI_ANY_ID,
288 PCI_CLASS_STORAGE_IDE<<8, 0xffff00, GENERIC },
541134cf
DD
289 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
290 PCI_ANY_ID, PCI_ANY_ID,
291 PCI_CLASS_STORAGE_RAID<<8, 0xffff00, GENERIC },
2d2744fc
JG
292
293 { } /* terminate list */
1da177e4
LT
294};
295
1da177e4
LT
296static struct pci_driver nv_pci_driver = {
297 .name = DRV_NAME,
298 .id_table = nv_pci_tbl,
299 .probe = nv_init_one,
cdf56bcf
RH
300 .suspend = ata_pci_device_suspend,
301 .resume = nv_pci_device_resume,
302 .remove = nv_remove_one,
1da177e4
LT
303};
304
193515d5 305static struct scsi_host_template nv_sht = {
1da177e4
LT
306 .module = THIS_MODULE,
307 .name = DRV_NAME,
308 .ioctl = ata_scsi_ioctl,
309 .queuecommand = ata_scsi_queuecmd,
1da177e4
LT
310 .can_queue = ATA_DEF_QUEUE,
311 .this_id = ATA_SHT_THIS_ID,
312 .sg_tablesize = LIBATA_MAX_PRD,
1da177e4
LT
313 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
314 .emulated = ATA_SHT_EMULATED,
315 .use_clustering = ATA_SHT_USE_CLUSTERING,
316 .proc_name = DRV_NAME,
317 .dma_boundary = ATA_DMA_BOUNDARY,
318 .slave_configure = ata_scsi_slave_config,
ccf68c34 319 .slave_destroy = ata_scsi_slave_destroy,
1da177e4 320 .bios_param = ata_std_bios_param,
cdf56bcf
RH
321 .suspend = ata_scsi_device_suspend,
322 .resume = ata_scsi_device_resume,
1da177e4
LT
323};
324
fbbb262d
RH
325static struct scsi_host_template nv_adma_sht = {
326 .module = THIS_MODULE,
327 .name = DRV_NAME,
328 .ioctl = ata_scsi_ioctl,
329 .queuecommand = ata_scsi_queuecmd,
330 .can_queue = NV_ADMA_MAX_CPBS,
331 .this_id = ATA_SHT_THIS_ID,
332 .sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN,
fbbb262d
RH
333 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
334 .emulated = ATA_SHT_EMULATED,
335 .use_clustering = ATA_SHT_USE_CLUSTERING,
336 .proc_name = DRV_NAME,
337 .dma_boundary = NV_ADMA_DMA_BOUNDARY,
338 .slave_configure = nv_adma_slave_config,
339 .slave_destroy = ata_scsi_slave_destroy,
340 .bios_param = ata_std_bios_param,
cdf56bcf
RH
341 .suspend = ata_scsi_device_suspend,
342 .resume = ata_scsi_device_resume,
fbbb262d
RH
343};
344
ada364e8 345static const struct ata_port_operations nv_generic_ops = {
1da177e4
LT
346 .port_disable = ata_port_disable,
347 .tf_load = ata_tf_load,
348 .tf_read = ata_tf_read,
349 .exec_command = ata_exec_command,
350 .check_status = ata_check_status,
351 .dev_select = ata_std_dev_select,
1da177e4
LT
352 .bmdma_setup = ata_bmdma_setup,
353 .bmdma_start = ata_bmdma_start,
354 .bmdma_stop = ata_bmdma_stop,
355 .bmdma_status = ata_bmdma_status,
356 .qc_prep = ata_qc_prep,
357 .qc_issue = ata_qc_issue_prot,
39f87582
TH
358 .freeze = ata_bmdma_freeze,
359 .thaw = ata_bmdma_thaw,
360 .error_handler = nv_error_handler,
361 .post_internal_cmd = ata_bmdma_post_internal_cmd,
0d5ff566 362 .data_xfer = ata_data_xfer,
ada364e8 363 .irq_handler = nv_generic_interrupt,
1da177e4
LT
364 .irq_clear = ata_bmdma_irq_clear,
365 .scr_read = nv_scr_read,
366 .scr_write = nv_scr_write,
367 .port_start = ata_port_start,
1da177e4
LT
368};
369
ada364e8
TH
370static const struct ata_port_operations nv_nf2_ops = {
371 .port_disable = ata_port_disable,
372 .tf_load = ata_tf_load,
373 .tf_read = ata_tf_read,
374 .exec_command = ata_exec_command,
375 .check_status = ata_check_status,
376 .dev_select = ata_std_dev_select,
ada364e8
TH
377 .bmdma_setup = ata_bmdma_setup,
378 .bmdma_start = ata_bmdma_start,
379 .bmdma_stop = ata_bmdma_stop,
380 .bmdma_status = ata_bmdma_status,
381 .qc_prep = ata_qc_prep,
382 .qc_issue = ata_qc_issue_prot,
39f87582
TH
383 .freeze = nv_nf2_freeze,
384 .thaw = nv_nf2_thaw,
385 .error_handler = nv_error_handler,
386 .post_internal_cmd = ata_bmdma_post_internal_cmd,
0d5ff566 387 .data_xfer = ata_data_xfer,
ada364e8
TH
388 .irq_handler = nv_nf2_interrupt,
389 .irq_clear = ata_bmdma_irq_clear,
390 .scr_read = nv_scr_read,
391 .scr_write = nv_scr_write,
392 .port_start = ata_port_start,
ada364e8
TH
393};
394
395static const struct ata_port_operations nv_ck804_ops = {
396 .port_disable = ata_port_disable,
397 .tf_load = ata_tf_load,
398 .tf_read = ata_tf_read,
399 .exec_command = ata_exec_command,
400 .check_status = ata_check_status,
401 .dev_select = ata_std_dev_select,
ada364e8
TH
402 .bmdma_setup = ata_bmdma_setup,
403 .bmdma_start = ata_bmdma_start,
404 .bmdma_stop = ata_bmdma_stop,
405 .bmdma_status = ata_bmdma_status,
406 .qc_prep = ata_qc_prep,
407 .qc_issue = ata_qc_issue_prot,
39f87582
TH
408 .freeze = nv_ck804_freeze,
409 .thaw = nv_ck804_thaw,
410 .error_handler = nv_error_handler,
411 .post_internal_cmd = ata_bmdma_post_internal_cmd,
0d5ff566 412 .data_xfer = ata_data_xfer,
ada364e8
TH
413 .irq_handler = nv_ck804_interrupt,
414 .irq_clear = ata_bmdma_irq_clear,
415 .scr_read = nv_scr_read,
416 .scr_write = nv_scr_write,
417 .port_start = ata_port_start,
ada364e8
TH
418 .host_stop = nv_ck804_host_stop,
419};
420
fbbb262d
RH
421static const struct ata_port_operations nv_adma_ops = {
422 .port_disable = ata_port_disable,
423 .tf_load = ata_tf_load,
424 .tf_read = ata_tf_read,
2dec7555 425 .check_atapi_dma = nv_adma_check_atapi_dma,
fbbb262d
RH
426 .exec_command = ata_exec_command,
427 .check_status = ata_check_status,
428 .dev_select = ata_std_dev_select,
429 .bmdma_setup = nv_adma_bmdma_setup,
430 .bmdma_start = nv_adma_bmdma_start,
431 .bmdma_stop = nv_adma_bmdma_stop,
432 .bmdma_status = nv_adma_bmdma_status,
433 .qc_prep = nv_adma_qc_prep,
434 .qc_issue = nv_adma_qc_issue,
435 .freeze = nv_ck804_freeze,
436 .thaw = nv_ck804_thaw,
437 .error_handler = nv_adma_error_handler,
438 .post_internal_cmd = nv_adma_bmdma_stop,
0d5ff566 439 .data_xfer = ata_data_xfer,
fbbb262d
RH
440 .irq_handler = nv_adma_interrupt,
441 .irq_clear = nv_adma_irq_clear,
442 .scr_read = nv_scr_read,
443 .scr_write = nv_scr_write,
444 .port_start = nv_adma_port_start,
445 .port_stop = nv_adma_port_stop,
cdf56bcf
RH
446 .port_suspend = nv_adma_port_suspend,
447 .port_resume = nv_adma_port_resume,
fbbb262d
RH
448 .host_stop = nv_adma_host_stop,
449};
450
ada364e8
TH
451static struct ata_port_info nv_port_info[] = {
452 /* generic */
453 {
454 .sht = &nv_sht,
722420fe
TH
455 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
456 ATA_FLAG_HRST_TO_RESUME,
ada364e8
TH
457 .pio_mask = NV_PIO_MASK,
458 .mwdma_mask = NV_MWDMA_MASK,
459 .udma_mask = NV_UDMA_MASK,
460 .port_ops = &nv_generic_ops,
461 },
462 /* nforce2/3 */
463 {
464 .sht = &nv_sht,
722420fe
TH
465 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
466 ATA_FLAG_HRST_TO_RESUME,
ada364e8
TH
467 .pio_mask = NV_PIO_MASK,
468 .mwdma_mask = NV_MWDMA_MASK,
469 .udma_mask = NV_UDMA_MASK,
470 .port_ops = &nv_nf2_ops,
471 },
472 /* ck804 */
473 {
474 .sht = &nv_sht,
722420fe
TH
475 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
476 ATA_FLAG_HRST_TO_RESUME,
ada364e8
TH
477 .pio_mask = NV_PIO_MASK,
478 .mwdma_mask = NV_MWDMA_MASK,
479 .udma_mask = NV_UDMA_MASK,
480 .port_ops = &nv_ck804_ops,
481 },
fbbb262d
RH
482 /* ADMA */
483 {
484 .sht = &nv_adma_sht,
485 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
cdf56bcf 486 ATA_FLAG_HRST_TO_RESUME |
fbbb262d
RH
487 ATA_FLAG_MMIO | ATA_FLAG_NCQ,
488 .pio_mask = NV_PIO_MASK,
489 .mwdma_mask = NV_MWDMA_MASK,
490 .udma_mask = NV_UDMA_MASK,
491 .port_ops = &nv_adma_ops,
492 },
1da177e4
LT
493};
494
495MODULE_AUTHOR("NVIDIA");
496MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
497MODULE_LICENSE("GPL");
498MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
499MODULE_VERSION(DRV_VERSION);
500
fbbb262d
RH
501static int adma_enabled = 1;
502
2dec7555
RH
503static void nv_adma_register_mode(struct ata_port *ap)
504{
2dec7555 505 struct nv_adma_port_priv *pp = ap->private_data;
cdf56bcf 506 void __iomem *mmio = pp->ctl_block;
2dec7555
RH
507 u16 tmp;
508
509 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
510 return;
511
512 tmp = readw(mmio + NV_ADMA_CTL);
513 writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
514
515 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
516}
517
518static void nv_adma_mode(struct ata_port *ap)
519{
2dec7555 520 struct nv_adma_port_priv *pp = ap->private_data;
cdf56bcf 521 void __iomem *mmio = pp->ctl_block;
2dec7555
RH
522 u16 tmp;
523
524 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
525 return;
f20b16ff 526
2dec7555
RH
527 WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
528
529 tmp = readw(mmio + NV_ADMA_CTL);
530 writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
531
532 pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
533}
534
fbbb262d
RH
535static int nv_adma_slave_config(struct scsi_device *sdev)
536{
537 struct ata_port *ap = ata_shost_to_port(sdev->host);
2dec7555
RH
538 struct nv_adma_port_priv *pp = ap->private_data;
539 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
fbbb262d
RH
540 u64 bounce_limit;
541 unsigned long segment_boundary;
542 unsigned short sg_tablesize;
543 int rc;
2dec7555
RH
544 int adma_enable;
545 u32 current_reg, new_reg, config_mask;
fbbb262d
RH
546
547 rc = ata_scsi_slave_config(sdev);
548
549 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
550 /* Not a proper libata device, ignore */
551 return rc;
552
553 if (ap->device[sdev->id].class == ATA_DEV_ATAPI) {
554 /*
555 * NVIDIA reports that ADMA mode does not support ATAPI commands.
556 * Therefore ATAPI commands are sent through the legacy interface.
557 * However, the legacy interface only supports 32-bit DMA.
558 * Restrict DMA parameters as required by the legacy interface
559 * when an ATAPI device is connected.
560 */
561 bounce_limit = ATA_DMA_MASK;
562 segment_boundary = ATA_DMA_BOUNDARY;
563 /* Subtract 1 since an extra entry may be needed for padding, see
564 libata-scsi.c */
565 sg_tablesize = LIBATA_MAX_PRD - 1;
f20b16ff 566
2dec7555
RH
567 /* Since the legacy DMA engine is in use, we need to disable ADMA
568 on the port. */
569 adma_enable = 0;
570 nv_adma_register_mode(ap);
fbbb262d
RH
571 }
572 else {
573 bounce_limit = *ap->dev->dma_mask;
574 segment_boundary = NV_ADMA_DMA_BOUNDARY;
575 sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
2dec7555 576 adma_enable = 1;
fbbb262d 577 }
f20b16ff 578
2dec7555
RH
579 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg);
580
581 if(ap->port_no == 1)
582 config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
583 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
584 else
585 config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
586 NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
f20b16ff 587
2dec7555
RH
588 if(adma_enable) {
589 new_reg = current_reg | config_mask;
590 pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
591 }
592 else {
593 new_reg = current_reg & ~config_mask;
594 pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
595 }
f20b16ff 596
2dec7555
RH
597 if(current_reg != new_reg)
598 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
f20b16ff 599
fbbb262d
RH
600 blk_queue_bounce_limit(sdev->request_queue, bounce_limit);
601 blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
602 blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize);
603 ata_port_printk(ap, KERN_INFO,
604 "bounce limit 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
605 (unsigned long long)bounce_limit, segment_boundary, sg_tablesize);
606 return rc;
607}
608
2dec7555
RH
609static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
610{
611 struct nv_adma_port_priv *pp = qc->ap->private_data;
612 return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
613}
614
615static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
fbbb262d
RH
616{
617 unsigned int idx = 0;
618
619 cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device | WNB);
620
621 if ((tf->flags & ATA_TFLAG_LBA48) == 0) {
622 cpb[idx++] = cpu_to_le16(IGN);
623 cpb[idx++] = cpu_to_le16(IGN);
624 cpb[idx++] = cpu_to_le16(IGN);
625 cpb[idx++] = cpu_to_le16(IGN);
626 cpb[idx++] = cpu_to_le16(IGN);
627 }
628 else {
629 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->hob_feature);
630 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
631 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->hob_lbal);
632 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->hob_lbam);
633 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->hob_lbah);
634 }
635 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature);
636 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->nsect);
637 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->lbal);
638 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->lbam);
639 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->lbah);
640
641 cpb[idx++] = cpu_to_le16((ATA_REG_CMD << 8) | tf->command | CMDEND);
642
643 return idx;
644}
645
fbbb262d
RH
646static void nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
647{
648 struct nv_adma_port_priv *pp = ap->private_data;
649 int complete = 0, have_err = 0;
2dec7555 650 u8 flags = pp->cpb[cpb_num].resp_flags;
fbbb262d
RH
651
652 VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
653
654 if (flags & NV_CPB_RESP_DONE) {
655 VPRINTK("CPB flags done, flags=0x%x\n", flags);
656 complete = 1;
657 }
658 if (flags & NV_CPB_RESP_ATA_ERR) {
659 ata_port_printk(ap, KERN_ERR, "CPB flags ATA err, flags=0x%x\n", flags);
660 have_err = 1;
661 complete = 1;
662 }
663 if (flags & NV_CPB_RESP_CMD_ERR) {
664 ata_port_printk(ap, KERN_ERR, "CPB flags CMD err, flags=0x%x\n", flags);
665 have_err = 1;
666 complete = 1;
667 }
668 if (flags & NV_CPB_RESP_CPB_ERR) {
669 ata_port_printk(ap, KERN_ERR, "CPB flags CPB err, flags=0x%x\n", flags);
670 have_err = 1;
671 complete = 1;
672 }
673 if(complete || force_err)
674 {
675 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num);
676 if(likely(qc)) {
677 u8 ata_status = 0;
678 /* Only use the ATA port status for non-NCQ commands.
679 For NCQ commands the current status may have nothing to do with
680 the command just completed. */
681 if(qc->tf.protocol != ATA_PROT_NCQ)
cdf56bcf 682 ata_status = readb(pp->ctl_block + (ATA_REG_STATUS * 4));
fbbb262d
RH
683
684 if(have_err || force_err)
685 ata_status |= ATA_ERR;
686
687 qc->err_mask |= ac_err_mask(ata_status);
688 DPRINTK("Completing qc from tag %d with err_mask %u\n",cpb_num,
689 qc->err_mask);
690 ata_qc_complete(qc);
691 }
692 }
693}
694
2dec7555
RH
695static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
696{
697 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
2dec7555
RH
698
699 /* freeze if hotplugged */
700 if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
701 ata_port_freeze(ap);
702 return 1;
703 }
704
705 /* bail out if not our interrupt */
706 if (!(irq_stat & NV_INT_DEV))
707 return 0;
708
709 /* DEV interrupt w/ no active qc? */
710 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
711 ata_check_status(ap);
712 return 1;
713 }
714
715 /* handle interrupt */
f740d168 716 return ata_host_intr(ap, qc);
2dec7555
RH
717}
718
fbbb262d
RH
719static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
720{
721 struct ata_host *host = dev_instance;
722 int i, handled = 0;
2dec7555 723 u32 notifier_clears[2];
fbbb262d
RH
724
725 spin_lock(&host->lock);
726
727 for (i = 0; i < host->n_ports; i++) {
728 struct ata_port *ap = host->ports[i];
2dec7555 729 notifier_clears[i] = 0;
fbbb262d
RH
730
731 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
732 struct nv_adma_port_priv *pp = ap->private_data;
cdf56bcf 733 void __iomem *mmio = pp->ctl_block;
fbbb262d
RH
734 u16 status;
735 u32 gen_ctl;
736 int have_global_err = 0;
737 u32 notifier, notifier_error;
738
739 /* if in ATA register mode, use standard ata interrupt handler */
740 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
0d5ff566 741 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
2dec7555 742 >> (NV_INT_PORT_SHIFT * i);
f740d168
RH
743 if(ata_tag_valid(ap->active_tag))
744 /** NV_INT_DEV indication seems unreliable at times
745 at least in ADMA mode. Force it on always when a
746 command is active, to prevent losing interrupts. */
747 irq_stat |= NV_INT_DEV;
2dec7555 748 handled += nv_host_intr(ap, irq_stat);
fbbb262d
RH
749 continue;
750 }
751
752 notifier = readl(mmio + NV_ADMA_NOTIFIER);
753 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
2dec7555 754 notifier_clears[i] = notifier | notifier_error;
fbbb262d 755
cdf56bcf 756 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
fbbb262d 757
fbbb262d
RH
758 if( !NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
759 !notifier_error)
760 /* Nothing to do */
761 continue;
762
763 status = readw(mmio + NV_ADMA_STAT);
764
765 /* Clear status. Ensure the controller sees the clearing before we start
766 looking at any of the CPB statuses, so that any CPB completions after
767 this point in the handler will raise another interrupt. */
768 writew(status, mmio + NV_ADMA_STAT);
769 readw(mmio + NV_ADMA_STAT); /* flush posted write */
770 rmb();
771
772 /* freeze if hotplugged */
773 if (unlikely(status & (NV_ADMA_STAT_HOTPLUG | NV_ADMA_STAT_HOTUNPLUG))) {
774 ata_port_printk(ap, KERN_NOTICE, "Hotplug event, freezing\n");
775 ata_port_freeze(ap);
776 handled++;
777 continue;
778 }
779
780 if (status & NV_ADMA_STAT_TIMEOUT) {
781 ata_port_printk(ap, KERN_ERR, "timeout, stat=0x%x\n", status);
782 have_global_err = 1;
783 }
784 if (status & NV_ADMA_STAT_CPBERR) {
785 ata_port_printk(ap, KERN_ERR, "CPB error, stat=0x%x\n", status);
786 have_global_err = 1;
787 }
788 if ((status & NV_ADMA_STAT_DONE) || have_global_err) {
789 /** Check CPBs for completed commands */
790
791 if(ata_tag_valid(ap->active_tag))
792 /* Non-NCQ command */
793 nv_adma_check_cpb(ap, ap->active_tag, have_global_err ||
794 (notifier_error & (1 << ap->active_tag)));
795 else {
796 int pos;
797 u32 active = ap->sactive;
798 while( (pos = ffs(active)) ) {
799 pos--;
800 nv_adma_check_cpb(ap, pos, have_global_err ||
801 (notifier_error & (1 << pos)) );
802 active &= ~(1 << pos );
803 }
804 }
805 }
806
807 handled++; /* irq handled if we got here */
808 }
809 }
f20b16ff 810
2dec7555
RH
811 if(notifier_clears[0] || notifier_clears[1]) {
812 /* Note: Both notifier clear registers must be written
813 if either is set, even if one is zero, according to NVIDIA. */
cdf56bcf
RH
814 struct nv_adma_port_priv *pp = host->ports[0]->private_data;
815 writel(notifier_clears[0], pp->notifier_clear_block);
816 pp = host->ports[1]->private_data;
817 writel(notifier_clears[1], pp->notifier_clear_block);
2dec7555 818 }
fbbb262d
RH
819
820 spin_unlock(&host->lock);
821
822 return IRQ_RETVAL(handled);
823}
824
825static void nv_adma_irq_clear(struct ata_port *ap)
826{
cdf56bcf
RH
827 struct nv_adma_port_priv *pp = ap->private_data;
828 void __iomem *mmio = pp->ctl_block;
fbbb262d
RH
829 u16 status = readw(mmio + NV_ADMA_STAT);
830 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
831 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
0d5ff566 832 void __iomem *dma_stat_addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS;
fbbb262d
RH
833
834 /* clear ADMA status */
835 writew(status, mmio + NV_ADMA_STAT);
836 writel(notifier | notifier_error,
cdf56bcf 837 pp->notifier_clear_block);
fbbb262d
RH
838
839 /** clear legacy status */
0d5ff566 840 iowrite8(ioread8(dma_stat_addr), dma_stat_addr);
fbbb262d
RH
841}
842
843static void nv_adma_bmdma_setup(struct ata_queued_cmd *qc)
844{
2dec7555
RH
845 struct ata_port *ap = qc->ap;
846 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
847 struct nv_adma_port_priv *pp = ap->private_data;
848 u8 dmactl;
fbbb262d 849
2dec7555 850 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
fbbb262d
RH
851 WARN_ON(1);
852 return;
853 }
854
2dec7555 855 /* load PRD table addr. */
0d5ff566 856 iowrite32(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
2dec7555
RH
857
858 /* specify data direction, triple-check start bit is clear */
0d5ff566 859 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2dec7555
RH
860 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
861 if (!rw)
862 dmactl |= ATA_DMA_WR;
863
0d5ff566 864 iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2dec7555
RH
865
866 /* issue r/w command */
867 ata_exec_command(ap, &qc->tf);
fbbb262d
RH
868}
869
870static void nv_adma_bmdma_start(struct ata_queued_cmd *qc)
871{
2dec7555
RH
872 struct ata_port *ap = qc->ap;
873 struct nv_adma_port_priv *pp = ap->private_data;
874 u8 dmactl;
fbbb262d 875
2dec7555 876 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
fbbb262d
RH
877 WARN_ON(1);
878 return;
879 }
880
2dec7555 881 /* start host DMA transaction */
0d5ff566
TH
882 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
883 iowrite8(dmactl | ATA_DMA_START,
884 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
fbbb262d
RH
885}
886
887static void nv_adma_bmdma_stop(struct ata_queued_cmd *qc)
888{
2dec7555 889 struct ata_port *ap = qc->ap;
fbbb262d
RH
890 struct nv_adma_port_priv *pp = ap->private_data;
891
2dec7555 892 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
fbbb262d
RH
893 return;
894
2dec7555 895 /* clear start/stop bit */
0d5ff566
TH
896 iowrite8(ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD) & ~ATA_DMA_START,
897 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
fbbb262d 898
2dec7555
RH
899 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
900 ata_altstatus(ap); /* dummy read */
fbbb262d
RH
901}
902
2dec7555 903static u8 nv_adma_bmdma_status(struct ata_port *ap)
fbbb262d 904{
fbbb262d 905 struct nv_adma_port_priv *pp = ap->private_data;
fbbb262d 906
2dec7555 907 WARN_ON(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE));
fbbb262d 908
0d5ff566 909 return ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
fbbb262d
RH
910}
911
912static int nv_adma_port_start(struct ata_port *ap)
913{
914 struct device *dev = ap->host->dev;
915 struct nv_adma_port_priv *pp;
916 int rc;
917 void *mem;
918 dma_addr_t mem_dma;
cdf56bcf 919 void __iomem *mmio;
fbbb262d
RH
920 u16 tmp;
921
922 VPRINTK("ENTER\n");
923
924 rc = ata_port_start(ap);
925 if (rc)
926 return rc;
927
24dc5f33
TH
928 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
929 if (!pp)
930 return -ENOMEM;
fbbb262d 931
0d5ff566 932 mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
cdf56bcf
RH
933 ap->port_no * NV_ADMA_PORT_SIZE;
934 pp->ctl_block = mmio;
0d5ff566 935 pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
cdf56bcf
RH
936 pp->notifier_clear_block = pp->gen_block +
937 NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
938
24dc5f33
TH
939 mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
940 &mem_dma, GFP_KERNEL);
941 if (!mem)
942 return -ENOMEM;
fbbb262d
RH
943 memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
944
945 /*
946 * First item in chunk of DMA memory:
947 * 128-byte command parameter block (CPB)
948 * one for each command tag
949 */
950 pp->cpb = mem;
951 pp->cpb_dma = mem_dma;
952
953 writel(mem_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
954 writel((mem_dma >> 16 ) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
955
956 mem += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
957 mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
958
959 /*
960 * Second item: block of ADMA_SGTBL_LEN s/g entries
961 */
962 pp->aprd = mem;
963 pp->aprd_dma = mem_dma;
964
965 ap->private_data = pp;
966
967 /* clear any outstanding interrupt conditions */
968 writew(0xffff, mmio + NV_ADMA_STAT);
969
970 /* initialize port variables */
971 pp->flags = NV_ADMA_PORT_REGISTER_MODE;
972
973 /* clear CPB fetch count */
974 writew(0, mmio + NV_ADMA_CPB_COUNT);
975
cdf56bcf 976 /* clear GO for register mode, enable interrupt */
fbbb262d 977 tmp = readw(mmio + NV_ADMA_CTL);
cdf56bcf 978 writew( (tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN, mmio + NV_ADMA_CTL);
fbbb262d
RH
979
980 tmp = readw(mmio + NV_ADMA_CTL);
981 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
982 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
983 udelay(1);
984 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
985 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
986
987 return 0;
fbbb262d
RH
988}
989
990static void nv_adma_port_stop(struct ata_port *ap)
991{
fbbb262d 992 struct nv_adma_port_priv *pp = ap->private_data;
cdf56bcf 993 void __iomem *mmio = pp->ctl_block;
fbbb262d
RH
994
995 VPRINTK("ENTER\n");
fbbb262d 996 writew(0, mmio + NV_ADMA_CTL);
fbbb262d
RH
997}
998
cdf56bcf
RH
999static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
1000{
1001 struct nv_adma_port_priv *pp = ap->private_data;
1002 void __iomem *mmio = pp->ctl_block;
1003
1004 /* Go to register mode - clears GO */
1005 nv_adma_register_mode(ap);
1006
1007 /* clear CPB fetch count */
1008 writew(0, mmio + NV_ADMA_CPB_COUNT);
1009
1010 /* disable interrupt, shut down port */
1011 writew(0, mmio + NV_ADMA_CTL);
1012
1013 return 0;
1014}
1015
1016static int nv_adma_port_resume(struct ata_port *ap)
1017{
1018 struct nv_adma_port_priv *pp = ap->private_data;
1019 void __iomem *mmio = pp->ctl_block;
1020 u16 tmp;
1021
1022 /* set CPB block location */
1023 writel(pp->cpb_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
1024 writel((pp->cpb_dma >> 16 ) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
1025
1026 /* clear any outstanding interrupt conditions */
1027 writew(0xffff, mmio + NV_ADMA_STAT);
1028
1029 /* initialize port variables */
1030 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
1031
1032 /* clear CPB fetch count */
1033 writew(0, mmio + NV_ADMA_CPB_COUNT);
1034
1035 /* clear GO for register mode, enable interrupt */
1036 tmp = readw(mmio + NV_ADMA_CTL);
1037 writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN, mmio + NV_ADMA_CTL);
1038
1039 tmp = readw(mmio + NV_ADMA_CTL);
1040 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1041 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1042 udelay(1);
1043 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1044 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1045
1046 return 0;
1047}
fbbb262d
RH
1048
1049static void nv_adma_setup_port(struct ata_probe_ent *probe_ent, unsigned int port)
1050{
0d5ff566 1051 void __iomem *mmio = probe_ent->iomap[NV_MMIO_BAR];
fbbb262d
RH
1052 struct ata_ioports *ioport = &probe_ent->port[port];
1053
1054 VPRINTK("ENTER\n");
1055
1056 mmio += NV_ADMA_PORT + port * NV_ADMA_PORT_SIZE;
1057
0d5ff566
TH
1058 ioport->cmd_addr = mmio;
1059 ioport->data_addr = mmio + (ATA_REG_DATA * 4);
fbbb262d 1060 ioport->error_addr =
0d5ff566
TH
1061 ioport->feature_addr = mmio + (ATA_REG_ERR * 4);
1062 ioport->nsect_addr = mmio + (ATA_REG_NSECT * 4);
1063 ioport->lbal_addr = mmio + (ATA_REG_LBAL * 4);
1064 ioport->lbam_addr = mmio + (ATA_REG_LBAM * 4);
1065 ioport->lbah_addr = mmio + (ATA_REG_LBAH * 4);
1066 ioport->device_addr = mmio + (ATA_REG_DEVICE * 4);
fbbb262d 1067 ioport->status_addr =
0d5ff566 1068 ioport->command_addr = mmio + (ATA_REG_STATUS * 4);
fbbb262d 1069 ioport->altstatus_addr =
0d5ff566 1070 ioport->ctl_addr = mmio + 0x20;
fbbb262d
RH
1071}
1072
1073static int nv_adma_host_init(struct ata_probe_ent *probe_ent)
1074{
1075 struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
1076 unsigned int i;
1077 u32 tmp32;
1078
1079 VPRINTK("ENTER\n");
1080
1081 /* enable ADMA on the ports */
1082 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1083 tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1084 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1085 NV_MCP_SATA_CFG_20_PORT1_EN |
1086 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1087
1088 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1089
1090 for (i = 0; i < probe_ent->n_ports; i++)
1091 nv_adma_setup_port(probe_ent, i);
1092
fbbb262d
RH
1093 return 0;
1094}
1095
1096static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1097 struct scatterlist *sg,
1098 int idx,
1099 struct nv_adma_prd *aprd)
1100{
2dec7555 1101 u8 flags;
fbbb262d
RH
1102
1103 memset(aprd, 0, sizeof(struct nv_adma_prd));
1104
1105 flags = 0;
1106 if (qc->tf.flags & ATA_TFLAG_WRITE)
1107 flags |= NV_APRD_WRITE;
1108 if (idx == qc->n_elem - 1)
1109 flags |= NV_APRD_END;
1110 else if (idx != 4)
1111 flags |= NV_APRD_CONT;
1112
1113 aprd->addr = cpu_to_le64(((u64)sg_dma_address(sg)));
1114 aprd->len = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
2dec7555 1115 aprd->flags = flags;
fbbb262d
RH
1116}
1117
1118static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1119{
1120 struct nv_adma_port_priv *pp = qc->ap->private_data;
1121 unsigned int idx;
1122 struct nv_adma_prd *aprd;
1123 struct scatterlist *sg;
1124
1125 VPRINTK("ENTER\n");
1126
1127 idx = 0;
1128
1129 ata_for_each_sg(sg, qc) {
1130 aprd = (idx < 5) ? &cpb->aprd[idx] : &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (idx-5)];
1131 nv_adma_fill_aprd(qc, sg, idx, aprd);
1132 idx++;
1133 }
1134 if (idx > 5)
1135 cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));
1136}
1137
1138static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1139{
1140 struct nv_adma_port_priv *pp = qc->ap->private_data;
1141 struct nv_adma_cpb *cpb = &pp->cpb[qc->tag];
1142 u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
1143 NV_CPB_CTL_APRD_VALID |
1144 NV_CPB_CTL_IEN;
1145
fbbb262d 1146 if (!(qc->flags & ATA_QCFLAG_DMAMAP) ||
2dec7555
RH
1147 (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
1148 nv_adma_register_mode(qc->ap);
fbbb262d
RH
1149 ata_qc_prep(qc);
1150 return;
1151 }
1152
1153 memset(cpb, 0, sizeof(struct nv_adma_cpb));
1154
1155 cpb->len = 3;
1156 cpb->tag = qc->tag;
1157 cpb->next_cpb_idx = 0;
1158
1159 /* turn on NCQ flags for NCQ commands */
1160 if (qc->tf.protocol == ATA_PROT_NCQ)
1161 ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1162
cdf56bcf
RH
1163 VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1164
fbbb262d
RH
1165 nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1166
1167 nv_adma_fill_sg(qc, cpb);
1168
1169 /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID until we are
1170 finished filling in all of the contents */
1171 wmb();
1172 cpb->ctl_flags = ctl_flags;
1173}
1174
1175static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1176{
2dec7555 1177 struct nv_adma_port_priv *pp = qc->ap->private_data;
cdf56bcf 1178 void __iomem *mmio = pp->ctl_block;
fbbb262d
RH
1179
1180 VPRINTK("ENTER\n");
1181
1182 if (!(qc->flags & ATA_QCFLAG_DMAMAP) ||
2dec7555 1183 (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
fbbb262d
RH
1184 /* use ATA register mode */
1185 VPRINTK("no dmamap or ATAPI, using ATA register mode: 0x%lx\n", qc->flags);
1186 nv_adma_register_mode(qc->ap);
1187 return ata_qc_issue_prot(qc);
1188 } else
1189 nv_adma_mode(qc->ap);
1190
1191 /* write append register, command tag in lower 8 bits
1192 and (number of cpbs to append -1) in top 8 bits */
1193 wmb();
1194 writew(qc->tag, mmio + NV_ADMA_APPEND);
1195
1196 DPRINTK("Issued tag %u\n",qc->tag);
1197
1198 return 0;
1199}
1200
7d12e780 1201static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1da177e4 1202{
cca3974e 1203 struct ata_host *host = dev_instance;
1da177e4
LT
1204 unsigned int i;
1205 unsigned int handled = 0;
1206 unsigned long flags;
1207
cca3974e 1208 spin_lock_irqsave(&host->lock, flags);
1da177e4 1209
cca3974e 1210 for (i = 0; i < host->n_ports; i++) {
1da177e4
LT
1211 struct ata_port *ap;
1212
cca3974e 1213 ap = host->ports[i];
c1389503 1214 if (ap &&
029f5468 1215 !(ap->flags & ATA_FLAG_DISABLED)) {
1da177e4
LT
1216 struct ata_queued_cmd *qc;
1217
1218 qc = ata_qc_from_tag(ap, ap->active_tag);
e50362ec 1219 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
1da177e4 1220 handled += ata_host_intr(ap, qc);
b887030a
AC
1221 else
1222 // No request pending? Clear interrupt status
1223 // anyway, in case there's one pending.
1224 ap->ops->check_status(ap);
1da177e4
LT
1225 }
1226
1227 }
1228
cca3974e 1229 spin_unlock_irqrestore(&host->lock, flags);
1da177e4
LT
1230
1231 return IRQ_RETVAL(handled);
1232}
1233
cca3974e 1234static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
ada364e8
TH
1235{
1236 int i, handled = 0;
1237
cca3974e
JG
1238 for (i = 0; i < host->n_ports; i++) {
1239 struct ata_port *ap = host->ports[i];
ada364e8
TH
1240
1241 if (ap && !(ap->flags & ATA_FLAG_DISABLED))
1242 handled += nv_host_intr(ap, irq_stat);
1243
1244 irq_stat >>= NV_INT_PORT_SHIFT;
1245 }
1246
1247 return IRQ_RETVAL(handled);
1248}
1249
7d12e780 1250static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
ada364e8 1251{
cca3974e 1252 struct ata_host *host = dev_instance;
ada364e8
TH
1253 u8 irq_stat;
1254 irqreturn_t ret;
1255
cca3974e 1256 spin_lock(&host->lock);
0d5ff566 1257 irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
cca3974e
JG
1258 ret = nv_do_interrupt(host, irq_stat);
1259 spin_unlock(&host->lock);
ada364e8
TH
1260
1261 return ret;
1262}
1263
7d12e780 1264static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
ada364e8 1265{
cca3974e 1266 struct ata_host *host = dev_instance;
ada364e8
TH
1267 u8 irq_stat;
1268 irqreturn_t ret;
1269
cca3974e 1270 spin_lock(&host->lock);
0d5ff566 1271 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
cca3974e
JG
1272 ret = nv_do_interrupt(host, irq_stat);
1273 spin_unlock(&host->lock);
ada364e8
TH
1274
1275 return ret;
1276}
1277
1da177e4
LT
1278static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg)
1279{
1da177e4
LT
1280 if (sc_reg > SCR_CONTROL)
1281 return 0xffffffffU;
1282
0d5ff566 1283 return ioread32(ap->ioaddr.scr_addr + (sc_reg * 4));
1da177e4
LT
1284}
1285
1286static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
1287{
1da177e4
LT
1288 if (sc_reg > SCR_CONTROL)
1289 return;
1290
0d5ff566 1291 iowrite32(val, ap->ioaddr.scr_addr + (sc_reg * 4));
1da177e4
LT
1292}
1293
39f87582
TH
1294static void nv_nf2_freeze(struct ata_port *ap)
1295{
0d5ff566 1296 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
39f87582
TH
1297 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1298 u8 mask;
1299
0d5ff566 1300 mask = ioread8(scr_addr + NV_INT_ENABLE);
39f87582 1301 mask &= ~(NV_INT_ALL << shift);
0d5ff566 1302 iowrite8(mask, scr_addr + NV_INT_ENABLE);
39f87582
TH
1303}
1304
1305static void nv_nf2_thaw(struct ata_port *ap)
1306{
0d5ff566 1307 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
39f87582
TH
1308 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1309 u8 mask;
1310
0d5ff566 1311 iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
39f87582 1312
0d5ff566 1313 mask = ioread8(scr_addr + NV_INT_ENABLE);
39f87582 1314 mask |= (NV_INT_MASK << shift);
0d5ff566 1315 iowrite8(mask, scr_addr + NV_INT_ENABLE);
39f87582
TH
1316}
1317
1318static void nv_ck804_freeze(struct ata_port *ap)
1319{
0d5ff566 1320 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
39f87582
TH
1321 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1322 u8 mask;
1323
1324 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1325 mask &= ~(NV_INT_ALL << shift);
1326 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1327}
1328
1329static void nv_ck804_thaw(struct ata_port *ap)
1330{
0d5ff566 1331 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
39f87582
TH
1332 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1333 u8 mask;
1334
1335 writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1336
1337 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1338 mask |= (NV_INT_MASK << shift);
1339 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1340}
1341
1342static int nv_hardreset(struct ata_port *ap, unsigned int *class)
1343{
1344 unsigned int dummy;
1345
1346 /* SATA hardreset fails to retrieve proper device signature on
1347 * some controllers. Don't classify on hardreset. For more
1348 * info, see http://bugme.osdl.org/show_bug.cgi?id=3352
1349 */
1350 return sata_std_hardreset(ap, &dummy);
1351}
1352
1353static void nv_error_handler(struct ata_port *ap)
1354{
1355 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1356 nv_hardreset, ata_std_postreset);
1357}
1358
fbbb262d
RH
1359static void nv_adma_error_handler(struct ata_port *ap)
1360{
1361 struct nv_adma_port_priv *pp = ap->private_data;
1362 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
cdf56bcf 1363 void __iomem *mmio = pp->ctl_block;
fbbb262d
RH
1364 int i;
1365 u16 tmp;
1366
1367 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1368 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
cdf56bcf 1369 u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
fbbb262d
RH
1370 u32 status = readw(mmio + NV_ADMA_STAT);
1371
1372 ata_port_printk(ap, KERN_ERR, "EH in ADMA mode, notifier 0x%X "
1373 "notifier_error 0x%X gen_ctl 0x%X status 0x%X\n",
1374 notifier, notifier_error, gen_ctl, status);
1375
1376 for( i=0;i<NV_ADMA_MAX_CPBS;i++) {
1377 struct nv_adma_cpb *cpb = &pp->cpb[i];
1378 if( cpb->ctl_flags || cpb->resp_flags )
1379 ata_port_printk(ap, KERN_ERR,
1380 "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1381 i, cpb->ctl_flags, cpb->resp_flags);
1382 }
1383
1384 /* Push us back into port register mode for error handling. */
1385 nv_adma_register_mode(ap);
1386
1387 ata_port_printk(ap, KERN_ERR, "Resetting port\n");
1388
1389 /* Mark all of the CPBs as invalid to prevent them from being executed */
1390 for( i=0;i<NV_ADMA_MAX_CPBS;i++)
1391 pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1392
1393 /* clear CPB fetch count */
1394 writew(0, mmio + NV_ADMA_CPB_COUNT);
1395
1396 /* Reset channel */
1397 tmp = readw(mmio + NV_ADMA_CTL);
1398 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1399 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1400 udelay(1);
1401 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1402 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1403 }
1404
1405 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1406 nv_hardreset, ata_std_postreset);
1407}
1408
1da177e4
LT
1409static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1410{
1411 static int printed_version = 0;
29da9f6d 1412 struct ata_port_info *ppi[2];
1da177e4 1413 struct ata_probe_ent *probe_ent;
cdf56bcf 1414 struct nv_host_priv *hpriv;
1da177e4
LT
1415 int rc;
1416 u32 bar;
0d5ff566 1417 void __iomem *base;
fbbb262d
RH
1418 unsigned long type = ent->driver_data;
1419 int mask_set = 0;
1da177e4
LT
1420
1421 // Make sure this is a SATA controller by counting the number of bars
1422 // (NVIDIA SATA controllers will always have six bars). Otherwise,
1423 // it's an IDE controller and we ignore it.
1424 for (bar=0; bar<6; bar++)
1425 if (pci_resource_start(pdev, bar) == 0)
1426 return -ENODEV;
1427
cdf56bcf 1428 if (!printed_version++)
a9524a76 1429 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
1da177e4 1430
24dc5f33 1431 rc = pcim_enable_device(pdev);
1da177e4 1432 if (rc)
24dc5f33 1433 return rc;
1da177e4
LT
1434
1435 rc = pci_request_regions(pdev, DRV_NAME);
1436 if (rc) {
24dc5f33
TH
1437 pcim_pin_device(pdev);
1438 return rc;
1da177e4
LT
1439 }
1440
fbbb262d
RH
1441 if(type >= CK804 && adma_enabled) {
1442 dev_printk(KERN_NOTICE, &pdev->dev, "Using ADMA mode\n");
1443 type = ADMA;
1444 if(!pci_set_dma_mask(pdev, DMA_64BIT_MASK) &&
1445 !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))
1446 mask_set = 1;
1447 }
1448
1449 if(!mask_set) {
1450 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
1451 if (rc)
24dc5f33 1452 return rc;
fbbb262d
RH
1453 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
1454 if (rc)
24dc5f33 1455 return rc;
fbbb262d 1456 }
1da177e4
LT
1457
1458 rc = -ENOMEM;
1459
24dc5f33 1460 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
cdf56bcf 1461 if (!hpriv)
24dc5f33 1462 return -ENOMEM;
cdf56bcf 1463
fbbb262d 1464 ppi[0] = ppi[1] = &nv_port_info[type];
29da9f6d 1465 probe_ent = ata_pci_init_native_mode(pdev, ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
1da177e4 1466 if (!probe_ent)
24dc5f33
TH
1467 return -ENOMEM;
1468
0d5ff566 1469 if (!pcim_iomap(pdev, NV_MMIO_BAR, 0))
24dc5f33 1470 return -EIO;
0d5ff566 1471 probe_ent->iomap = pcim_iomap_table(pdev);
1da177e4 1472
cdf56bcf
RH
1473 probe_ent->private_data = hpriv;
1474 hpriv->type = type;
1da177e4 1475
0d5ff566 1476 base = probe_ent->iomap[NV_MMIO_BAR];
02cbd926
JG
1477 probe_ent->port[0].scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
1478 probe_ent->port[1].scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
1da177e4 1479
ada364e8 1480 /* enable SATA space for CK804 */
fbbb262d 1481 if (type >= CK804) {
ada364e8
TH
1482 u8 regval;
1483
1484 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
1485 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1486 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1487 }
1488
1da177e4
LT
1489 pci_set_master(pdev);
1490
fbbb262d
RH
1491 if (type == ADMA) {
1492 rc = nv_adma_host_init(probe_ent);
1493 if (rc)
24dc5f33 1494 return rc;
fbbb262d
RH
1495 }
1496
1da177e4
LT
1497 rc = ata_device_add(probe_ent);
1498 if (rc != NV_PORTS)
24dc5f33 1499 return -ENODEV;
1da177e4 1500
24dc5f33 1501 devm_kfree(&pdev->dev, probe_ent);
1da177e4 1502 return 0;
1da177e4
LT
1503}
1504
cdf56bcf
RH
1505static void nv_remove_one (struct pci_dev *pdev)
1506{
1507 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1508 struct nv_host_priv *hpriv = host->private_data;
1509
1510 ata_pci_remove_one(pdev);
1511 kfree(hpriv);
1512}
1513
1514static int nv_pci_device_resume(struct pci_dev *pdev)
1515{
1516 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1517 struct nv_host_priv *hpriv = host->private_data;
1518
1519 ata_pci_device_do_resume(pdev);
1520
1521 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
1522 if(hpriv->type >= CK804) {
1523 u8 regval;
1524
1525 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
1526 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1527 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1528 }
1529 if(hpriv->type == ADMA) {
1530 u32 tmp32;
1531 struct nv_adma_port_priv *pp;
1532 /* enable/disable ADMA on the ports appropriately */
1533 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1534
1535 pp = host->ports[0]->private_data;
1536 if(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1537 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
1538 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
1539 else
1540 tmp32 |= (NV_MCP_SATA_CFG_20_PORT0_EN |
1541 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
1542 pp = host->ports[1]->private_data;
1543 if(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1544 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
1545 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
1546 else
1547 tmp32 |= (NV_MCP_SATA_CFG_20_PORT1_EN |
1548 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
1549
1550 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1551 }
1552 }
1553
1554 ata_host_resume(host);
1555
1556 return 0;
1557}
1558
cca3974e 1559static void nv_ck804_host_stop(struct ata_host *host)
ada364e8 1560{
cca3974e 1561 struct pci_dev *pdev = to_pci_dev(host->dev);
ada364e8
TH
1562 u8 regval;
1563
1564 /* disable SATA space for CK804 */
1565 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
1566 regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1567 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
ada364e8
TH
1568}
1569
fbbb262d
RH
1570static void nv_adma_host_stop(struct ata_host *host)
1571{
1572 struct pci_dev *pdev = to_pci_dev(host->dev);
fbbb262d
RH
1573 u32 tmp32;
1574
fbbb262d
RH
1575 /* disable ADMA on the ports */
1576 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1577 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
1578 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1579 NV_MCP_SATA_CFG_20_PORT1_EN |
1580 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
1581
1582 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1583
1584 nv_ck804_host_stop(host);
1585}
1586
1da177e4
LT
1587static int __init nv_init(void)
1588{
b7887196 1589 return pci_register_driver(&nv_pci_driver);
1da177e4
LT
1590}
1591
1592static void __exit nv_exit(void)
1593{
1594 pci_unregister_driver(&nv_pci_driver);
1595}
1596
1597module_init(nv_init);
1598module_exit(nv_exit);
fbbb262d
RH
1599module_param_named(adma, adma_enabled, bool, 0444);
1600MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: true)");
This page took 0.259321 seconds and 5 git commands to generate.