sata_nv: use ADMA for NODATA commands
[deliverable/linux.git] / drivers / ata / sata_nv.c
CommitLineData
1da177e4
LT
1/*
2 * sata_nv.c - NVIDIA nForce SATA
3 *
4 * Copyright 2004 NVIDIA Corp. All rights reserved.
5 * Copyright 2004 Andrew Chew
6 *
aa7e16d6
JG
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
1da177e4 21 *
af36d7f0
JG
22 *
23 * libata documentation is available via 'make {ps|pdf}docs',
24 * as Documentation/DocBook/libata.*
25 *
26 * No hardware documentation available outside of NVIDIA.
27 * This driver programs the NVIDIA SATA controller in a similar
28 * fashion as with other PCI IDE BMDMA controllers, with a few
29 * NV-specific details such as register offsets, SATA phy location,
30 * hotplug info, etc.
31 *
fbbb262d
RH
32 * CK804/MCP04 controllers support an alternate programming interface
33 * similar to the ADMA specification (with some modifications).
34 * This allows the use of NCQ. Non-DMA-mapped ATA commands are still
35 * sent through the legacy interface.
36 *
1da177e4
LT
37 */
38
1da177e4
LT
39#include <linux/kernel.h>
40#include <linux/module.h>
41#include <linux/pci.h>
42#include <linux/init.h>
43#include <linux/blkdev.h>
44#include <linux/delay.h>
45#include <linux/interrupt.h>
a9524a76 46#include <linux/device.h>
1da177e4 47#include <scsi/scsi_host.h>
fbbb262d 48#include <scsi/scsi_device.h>
1da177e4
LT
49#include <linux/libata.h>
50
51#define DRV_NAME "sata_nv"
cdf56bcf 52#define DRV_VERSION "3.3"
fbbb262d
RH
53
54#define NV_ADMA_DMA_BOUNDARY 0xffffffffUL
1da177e4 55
10ad05df 56enum {
0d5ff566
TH
57 NV_MMIO_BAR = 5,
58
10ad05df
JG
59 NV_PORTS = 2,
60 NV_PIO_MASK = 0x1f,
61 NV_MWDMA_MASK = 0x07,
62 NV_UDMA_MASK = 0x7f,
63 NV_PORT0_SCR_REG_OFFSET = 0x00,
64 NV_PORT1_SCR_REG_OFFSET = 0x40,
1da177e4 65
27e4b274 66 /* INT_STATUS/ENABLE */
10ad05df 67 NV_INT_STATUS = 0x10,
10ad05df 68 NV_INT_ENABLE = 0x11,
27e4b274 69 NV_INT_STATUS_CK804 = 0x440,
10ad05df 70 NV_INT_ENABLE_CK804 = 0x441,
1da177e4 71
27e4b274
TH
72 /* INT_STATUS/ENABLE bits */
73 NV_INT_DEV = 0x01,
74 NV_INT_PM = 0x02,
75 NV_INT_ADDED = 0x04,
76 NV_INT_REMOVED = 0x08,
77
78 NV_INT_PORT_SHIFT = 4, /* each port occupies 4 bits */
79
39f87582 80 NV_INT_ALL = 0x0f,
5a44efff
TH
81 NV_INT_MASK = NV_INT_DEV |
82 NV_INT_ADDED | NV_INT_REMOVED,
39f87582 83
27e4b274 84 /* INT_CONFIG */
10ad05df
JG
85 NV_INT_CONFIG = 0x12,
86 NV_INT_CONFIG_METHD = 0x01, // 0 = INT, 1 = SMI
1da177e4 87
10ad05df
JG
88 // For PCI config register 20
89 NV_MCP_SATA_CFG_20 = 0x50,
90 NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
fbbb262d
RH
91 NV_MCP_SATA_CFG_20_PORT0_EN = (1 << 17),
92 NV_MCP_SATA_CFG_20_PORT1_EN = (1 << 16),
93 NV_MCP_SATA_CFG_20_PORT0_PWB_EN = (1 << 14),
94 NV_MCP_SATA_CFG_20_PORT1_PWB_EN = (1 << 12),
95
96 NV_ADMA_MAX_CPBS = 32,
97 NV_ADMA_CPB_SZ = 128,
98 NV_ADMA_APRD_SZ = 16,
99 NV_ADMA_SGTBL_LEN = (1024 - NV_ADMA_CPB_SZ) /
100 NV_ADMA_APRD_SZ,
101 NV_ADMA_SGTBL_TOTAL_LEN = NV_ADMA_SGTBL_LEN + 5,
102 NV_ADMA_SGTBL_SZ = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
103 NV_ADMA_PORT_PRIV_DMA_SZ = NV_ADMA_MAX_CPBS *
104 (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
105
106 /* BAR5 offset to ADMA general registers */
107 NV_ADMA_GEN = 0x400,
108 NV_ADMA_GEN_CTL = 0x00,
109 NV_ADMA_NOTIFIER_CLEAR = 0x30,
110
111 /* BAR5 offset to ADMA ports */
112 NV_ADMA_PORT = 0x480,
113
114 /* size of ADMA port register space */
115 NV_ADMA_PORT_SIZE = 0x100,
116
117 /* ADMA port registers */
118 NV_ADMA_CTL = 0x40,
119 NV_ADMA_CPB_COUNT = 0x42,
120 NV_ADMA_NEXT_CPB_IDX = 0x43,
121 NV_ADMA_STAT = 0x44,
122 NV_ADMA_CPB_BASE_LOW = 0x48,
123 NV_ADMA_CPB_BASE_HIGH = 0x4C,
124 NV_ADMA_APPEND = 0x50,
125 NV_ADMA_NOTIFIER = 0x68,
126 NV_ADMA_NOTIFIER_ERROR = 0x6C,
127
128 /* NV_ADMA_CTL register bits */
129 NV_ADMA_CTL_HOTPLUG_IEN = (1 << 0),
130 NV_ADMA_CTL_CHANNEL_RESET = (1 << 5),
131 NV_ADMA_CTL_GO = (1 << 7),
132 NV_ADMA_CTL_AIEN = (1 << 8),
133 NV_ADMA_CTL_READ_NON_COHERENT = (1 << 11),
134 NV_ADMA_CTL_WRITE_NON_COHERENT = (1 << 12),
135
136 /* CPB response flag bits */
137 NV_CPB_RESP_DONE = (1 << 0),
138 NV_CPB_RESP_ATA_ERR = (1 << 3),
139 NV_CPB_RESP_CMD_ERR = (1 << 4),
140 NV_CPB_RESP_CPB_ERR = (1 << 7),
141
142 /* CPB control flag bits */
143 NV_CPB_CTL_CPB_VALID = (1 << 0),
144 NV_CPB_CTL_QUEUE = (1 << 1),
145 NV_CPB_CTL_APRD_VALID = (1 << 2),
146 NV_CPB_CTL_IEN = (1 << 3),
147 NV_CPB_CTL_FPDMA = (1 << 4),
148
149 /* APRD flags */
150 NV_APRD_WRITE = (1 << 1),
151 NV_APRD_END = (1 << 2),
152 NV_APRD_CONT = (1 << 3),
153
154 /* NV_ADMA_STAT flags */
155 NV_ADMA_STAT_TIMEOUT = (1 << 0),
156 NV_ADMA_STAT_HOTUNPLUG = (1 << 1),
157 NV_ADMA_STAT_HOTPLUG = (1 << 2),
158 NV_ADMA_STAT_CPBERR = (1 << 4),
159 NV_ADMA_STAT_SERROR = (1 << 5),
160 NV_ADMA_STAT_CMD_COMPLETE = (1 << 6),
161 NV_ADMA_STAT_IDLE = (1 << 8),
162 NV_ADMA_STAT_LEGACY = (1 << 9),
163 NV_ADMA_STAT_STOPPED = (1 << 10),
164 NV_ADMA_STAT_DONE = (1 << 12),
165 NV_ADMA_STAT_ERR = NV_ADMA_STAT_CPBERR |
166 NV_ADMA_STAT_TIMEOUT,
167
168 /* port flags */
169 NV_ADMA_PORT_REGISTER_MODE = (1 << 0),
2dec7555 170 NV_ADMA_ATAPI_SETUP_COMPLETE = (1 << 1),
fbbb262d
RH
171
172};
173
174/* ADMA Physical Region Descriptor - one SG segment */
175struct nv_adma_prd {
176 __le64 addr;
177 __le32 len;
178 u8 flags;
179 u8 packet_len;
180 __le16 reserved;
181};
182
183enum nv_adma_regbits {
184 CMDEND = (1 << 15), /* end of command list */
185 WNB = (1 << 14), /* wait-not-BSY */
186 IGN = (1 << 13), /* ignore this entry */
187 CS1n = (1 << (4 + 8)), /* std. PATA signals follow... */
188 DA2 = (1 << (2 + 8)),
189 DA1 = (1 << (1 + 8)),
190 DA0 = (1 << (0 + 8)),
191};
192
193/* ADMA Command Parameter Block
194 The first 5 SG segments are stored inside the Command Parameter Block itself.
195 If there are more than 5 segments the remainder are stored in a separate
196 memory area indicated by next_aprd. */
197struct nv_adma_cpb {
198 u8 resp_flags; /* 0 */
199 u8 reserved1; /* 1 */
200 u8 ctl_flags; /* 2 */
201 /* len is length of taskfile in 64 bit words */
202 u8 len; /* 3 */
203 u8 tag; /* 4 */
204 u8 next_cpb_idx; /* 5 */
205 __le16 reserved2; /* 6-7 */
206 __le16 tf[12]; /* 8-31 */
207 struct nv_adma_prd aprd[5]; /* 32-111 */
208 __le64 next_aprd; /* 112-119 */
209 __le64 reserved3; /* 120-127 */
10ad05df 210};
1da177e4 211
fbbb262d
RH
212
213struct nv_adma_port_priv {
214 struct nv_adma_cpb *cpb;
215 dma_addr_t cpb_dma;
216 struct nv_adma_prd *aprd;
217 dma_addr_t aprd_dma;
cdf56bcf
RH
218 void __iomem * ctl_block;
219 void __iomem * gen_block;
220 void __iomem * notifier_clear_block;
fbbb262d
RH
221 u8 flags;
222};
223
cdf56bcf
RH
224struct nv_host_priv {
225 unsigned long type;
226};
227
fbbb262d
RH
228#define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & ( 1 << (19 + (12 * (PORT)))))
229
1da177e4 230static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
cdf56bcf
RH
231static void nv_remove_one (struct pci_dev *pdev);
232static int nv_pci_device_resume(struct pci_dev *pdev);
cca3974e 233static void nv_ck804_host_stop(struct ata_host *host);
7d12e780
DH
234static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
235static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
236static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
1da177e4
LT
237static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg);
238static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
1da177e4 239
39f87582
TH
240static void nv_nf2_freeze(struct ata_port *ap);
241static void nv_nf2_thaw(struct ata_port *ap);
242static void nv_ck804_freeze(struct ata_port *ap);
243static void nv_ck804_thaw(struct ata_port *ap);
244static void nv_error_handler(struct ata_port *ap);
fbbb262d 245static int nv_adma_slave_config(struct scsi_device *sdev);
2dec7555 246static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
fbbb262d
RH
247static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
248static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
249static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
250static void nv_adma_irq_clear(struct ata_port *ap);
251static int nv_adma_port_start(struct ata_port *ap);
252static void nv_adma_port_stop(struct ata_port *ap);
cdf56bcf
RH
253static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
254static int nv_adma_port_resume(struct ata_port *ap);
fbbb262d
RH
255static void nv_adma_error_handler(struct ata_port *ap);
256static void nv_adma_host_stop(struct ata_host *host);
257static void nv_adma_bmdma_setup(struct ata_queued_cmd *qc);
258static void nv_adma_bmdma_start(struct ata_queued_cmd *qc);
259static void nv_adma_bmdma_stop(struct ata_queued_cmd *qc);
260static u8 nv_adma_bmdma_status(struct ata_port *ap);
39f87582 261
1da177e4
LT
262enum nv_host_type
263{
264 GENERIC,
265 NFORCE2,
27e4b274 266 NFORCE3 = NFORCE2, /* NF2 == NF3 as far as sata_nv is concerned */
fbbb262d
RH
267 CK804,
268 ADMA
1da177e4
LT
269};
270
3b7d697d 271static const struct pci_device_id nv_pci_tbl[] = {
54bb3a94
JG
272 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
273 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
274 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
275 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
276 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
277 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
278 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
279 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), GENERIC },
280 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), GENERIC },
281 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), GENERIC },
282 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), GENERIC },
283 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
284 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
285 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
1da177e4
LT
286 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
287 PCI_ANY_ID, PCI_ANY_ID,
288 PCI_CLASS_STORAGE_IDE<<8, 0xffff00, GENERIC },
541134cf
DD
289 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
290 PCI_ANY_ID, PCI_ANY_ID,
291 PCI_CLASS_STORAGE_RAID<<8, 0xffff00, GENERIC },
2d2744fc
JG
292
293 { } /* terminate list */
1da177e4
LT
294};
295
1da177e4
LT
296static struct pci_driver nv_pci_driver = {
297 .name = DRV_NAME,
298 .id_table = nv_pci_tbl,
299 .probe = nv_init_one,
cdf56bcf
RH
300 .suspend = ata_pci_device_suspend,
301 .resume = nv_pci_device_resume,
302 .remove = nv_remove_one,
1da177e4
LT
303};
304
193515d5 305static struct scsi_host_template nv_sht = {
1da177e4
LT
306 .module = THIS_MODULE,
307 .name = DRV_NAME,
308 .ioctl = ata_scsi_ioctl,
309 .queuecommand = ata_scsi_queuecmd,
1da177e4
LT
310 .can_queue = ATA_DEF_QUEUE,
311 .this_id = ATA_SHT_THIS_ID,
312 .sg_tablesize = LIBATA_MAX_PRD,
1da177e4
LT
313 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
314 .emulated = ATA_SHT_EMULATED,
315 .use_clustering = ATA_SHT_USE_CLUSTERING,
316 .proc_name = DRV_NAME,
317 .dma_boundary = ATA_DMA_BOUNDARY,
318 .slave_configure = ata_scsi_slave_config,
ccf68c34 319 .slave_destroy = ata_scsi_slave_destroy,
1da177e4 320 .bios_param = ata_std_bios_param,
cdf56bcf
RH
321 .suspend = ata_scsi_device_suspend,
322 .resume = ata_scsi_device_resume,
1da177e4
LT
323};
324
fbbb262d
RH
325static struct scsi_host_template nv_adma_sht = {
326 .module = THIS_MODULE,
327 .name = DRV_NAME,
328 .ioctl = ata_scsi_ioctl,
329 .queuecommand = ata_scsi_queuecmd,
330 .can_queue = NV_ADMA_MAX_CPBS,
331 .this_id = ATA_SHT_THIS_ID,
332 .sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN,
fbbb262d
RH
333 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
334 .emulated = ATA_SHT_EMULATED,
335 .use_clustering = ATA_SHT_USE_CLUSTERING,
336 .proc_name = DRV_NAME,
337 .dma_boundary = NV_ADMA_DMA_BOUNDARY,
338 .slave_configure = nv_adma_slave_config,
339 .slave_destroy = ata_scsi_slave_destroy,
340 .bios_param = ata_std_bios_param,
cdf56bcf
RH
341 .suspend = ata_scsi_device_suspend,
342 .resume = ata_scsi_device_resume,
fbbb262d
RH
343};
344
ada364e8 345static const struct ata_port_operations nv_generic_ops = {
1da177e4
LT
346 .port_disable = ata_port_disable,
347 .tf_load = ata_tf_load,
348 .tf_read = ata_tf_read,
349 .exec_command = ata_exec_command,
350 .check_status = ata_check_status,
351 .dev_select = ata_std_dev_select,
1da177e4
LT
352 .bmdma_setup = ata_bmdma_setup,
353 .bmdma_start = ata_bmdma_start,
354 .bmdma_stop = ata_bmdma_stop,
355 .bmdma_status = ata_bmdma_status,
356 .qc_prep = ata_qc_prep,
357 .qc_issue = ata_qc_issue_prot,
39f87582
TH
358 .freeze = ata_bmdma_freeze,
359 .thaw = ata_bmdma_thaw,
360 .error_handler = nv_error_handler,
361 .post_internal_cmd = ata_bmdma_post_internal_cmd,
0d5ff566 362 .data_xfer = ata_data_xfer,
ada364e8 363 .irq_handler = nv_generic_interrupt,
1da177e4 364 .irq_clear = ata_bmdma_irq_clear,
246ce3b6
AI
365 .irq_on = ata_irq_on,
366 .irq_ack = ata_irq_ack,
1da177e4
LT
367 .scr_read = nv_scr_read,
368 .scr_write = nv_scr_write,
369 .port_start = ata_port_start,
1da177e4
LT
370};
371
ada364e8
TH
372static const struct ata_port_operations nv_nf2_ops = {
373 .port_disable = ata_port_disable,
374 .tf_load = ata_tf_load,
375 .tf_read = ata_tf_read,
376 .exec_command = ata_exec_command,
377 .check_status = ata_check_status,
378 .dev_select = ata_std_dev_select,
ada364e8
TH
379 .bmdma_setup = ata_bmdma_setup,
380 .bmdma_start = ata_bmdma_start,
381 .bmdma_stop = ata_bmdma_stop,
382 .bmdma_status = ata_bmdma_status,
383 .qc_prep = ata_qc_prep,
384 .qc_issue = ata_qc_issue_prot,
39f87582
TH
385 .freeze = nv_nf2_freeze,
386 .thaw = nv_nf2_thaw,
387 .error_handler = nv_error_handler,
388 .post_internal_cmd = ata_bmdma_post_internal_cmd,
0d5ff566 389 .data_xfer = ata_data_xfer,
ada364e8
TH
390 .irq_handler = nv_nf2_interrupt,
391 .irq_clear = ata_bmdma_irq_clear,
246ce3b6
AI
392 .irq_on = ata_irq_on,
393 .irq_ack = ata_irq_ack,
ada364e8
TH
394 .scr_read = nv_scr_read,
395 .scr_write = nv_scr_write,
396 .port_start = ata_port_start,
ada364e8
TH
397};
398
399static const struct ata_port_operations nv_ck804_ops = {
400 .port_disable = ata_port_disable,
401 .tf_load = ata_tf_load,
402 .tf_read = ata_tf_read,
403 .exec_command = ata_exec_command,
404 .check_status = ata_check_status,
405 .dev_select = ata_std_dev_select,
ada364e8
TH
406 .bmdma_setup = ata_bmdma_setup,
407 .bmdma_start = ata_bmdma_start,
408 .bmdma_stop = ata_bmdma_stop,
409 .bmdma_status = ata_bmdma_status,
410 .qc_prep = ata_qc_prep,
411 .qc_issue = ata_qc_issue_prot,
39f87582
TH
412 .freeze = nv_ck804_freeze,
413 .thaw = nv_ck804_thaw,
414 .error_handler = nv_error_handler,
415 .post_internal_cmd = ata_bmdma_post_internal_cmd,
0d5ff566 416 .data_xfer = ata_data_xfer,
ada364e8
TH
417 .irq_handler = nv_ck804_interrupt,
418 .irq_clear = ata_bmdma_irq_clear,
246ce3b6
AI
419 .irq_on = ata_irq_on,
420 .irq_ack = ata_irq_ack,
ada364e8
TH
421 .scr_read = nv_scr_read,
422 .scr_write = nv_scr_write,
423 .port_start = ata_port_start,
ada364e8
TH
424 .host_stop = nv_ck804_host_stop,
425};
426
fbbb262d
RH
427static const struct ata_port_operations nv_adma_ops = {
428 .port_disable = ata_port_disable,
429 .tf_load = ata_tf_load,
430 .tf_read = ata_tf_read,
2dec7555 431 .check_atapi_dma = nv_adma_check_atapi_dma,
fbbb262d
RH
432 .exec_command = ata_exec_command,
433 .check_status = ata_check_status,
434 .dev_select = ata_std_dev_select,
435 .bmdma_setup = nv_adma_bmdma_setup,
436 .bmdma_start = nv_adma_bmdma_start,
437 .bmdma_stop = nv_adma_bmdma_stop,
438 .bmdma_status = nv_adma_bmdma_status,
439 .qc_prep = nv_adma_qc_prep,
440 .qc_issue = nv_adma_qc_issue,
441 .freeze = nv_ck804_freeze,
442 .thaw = nv_ck804_thaw,
443 .error_handler = nv_adma_error_handler,
444 .post_internal_cmd = nv_adma_bmdma_stop,
0d5ff566 445 .data_xfer = ata_data_xfer,
fbbb262d
RH
446 .irq_handler = nv_adma_interrupt,
447 .irq_clear = nv_adma_irq_clear,
246ce3b6
AI
448 .irq_on = ata_irq_on,
449 .irq_ack = ata_irq_ack,
fbbb262d
RH
450 .scr_read = nv_scr_read,
451 .scr_write = nv_scr_write,
452 .port_start = nv_adma_port_start,
453 .port_stop = nv_adma_port_stop,
cdf56bcf
RH
454 .port_suspend = nv_adma_port_suspend,
455 .port_resume = nv_adma_port_resume,
fbbb262d
RH
456 .host_stop = nv_adma_host_stop,
457};
458
ada364e8
TH
459static struct ata_port_info nv_port_info[] = {
460 /* generic */
461 {
462 .sht = &nv_sht,
722420fe
TH
463 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
464 ATA_FLAG_HRST_TO_RESUME,
ada364e8
TH
465 .pio_mask = NV_PIO_MASK,
466 .mwdma_mask = NV_MWDMA_MASK,
467 .udma_mask = NV_UDMA_MASK,
468 .port_ops = &nv_generic_ops,
469 },
470 /* nforce2/3 */
471 {
472 .sht = &nv_sht,
722420fe
TH
473 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
474 ATA_FLAG_HRST_TO_RESUME,
ada364e8
TH
475 .pio_mask = NV_PIO_MASK,
476 .mwdma_mask = NV_MWDMA_MASK,
477 .udma_mask = NV_UDMA_MASK,
478 .port_ops = &nv_nf2_ops,
479 },
480 /* ck804 */
481 {
482 .sht = &nv_sht,
722420fe
TH
483 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
484 ATA_FLAG_HRST_TO_RESUME,
ada364e8
TH
485 .pio_mask = NV_PIO_MASK,
486 .mwdma_mask = NV_MWDMA_MASK,
487 .udma_mask = NV_UDMA_MASK,
488 .port_ops = &nv_ck804_ops,
489 },
fbbb262d
RH
490 /* ADMA */
491 {
492 .sht = &nv_adma_sht,
493 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
cdf56bcf 494 ATA_FLAG_HRST_TO_RESUME |
fbbb262d
RH
495 ATA_FLAG_MMIO | ATA_FLAG_NCQ,
496 .pio_mask = NV_PIO_MASK,
497 .mwdma_mask = NV_MWDMA_MASK,
498 .udma_mask = NV_UDMA_MASK,
499 .port_ops = &nv_adma_ops,
500 },
1da177e4
LT
501};
502
503MODULE_AUTHOR("NVIDIA");
504MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
505MODULE_LICENSE("GPL");
506MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
507MODULE_VERSION(DRV_VERSION);
508
fbbb262d
RH
509static int adma_enabled = 1;
510
2dec7555
RH
511static void nv_adma_register_mode(struct ata_port *ap)
512{
2dec7555 513 struct nv_adma_port_priv *pp = ap->private_data;
cdf56bcf 514 void __iomem *mmio = pp->ctl_block;
2dec7555
RH
515 u16 tmp;
516
517 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
518 return;
519
520 tmp = readw(mmio + NV_ADMA_CTL);
521 writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
522
523 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
524}
525
526static void nv_adma_mode(struct ata_port *ap)
527{
2dec7555 528 struct nv_adma_port_priv *pp = ap->private_data;
cdf56bcf 529 void __iomem *mmio = pp->ctl_block;
2dec7555
RH
530 u16 tmp;
531
532 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
533 return;
f20b16ff 534
2dec7555
RH
535 WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
536
537 tmp = readw(mmio + NV_ADMA_CTL);
538 writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
539
540 pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
541}
542
fbbb262d
RH
543static int nv_adma_slave_config(struct scsi_device *sdev)
544{
545 struct ata_port *ap = ata_shost_to_port(sdev->host);
2dec7555
RH
546 struct nv_adma_port_priv *pp = ap->private_data;
547 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
fbbb262d
RH
548 u64 bounce_limit;
549 unsigned long segment_boundary;
550 unsigned short sg_tablesize;
551 int rc;
2dec7555
RH
552 int adma_enable;
553 u32 current_reg, new_reg, config_mask;
fbbb262d
RH
554
555 rc = ata_scsi_slave_config(sdev);
556
557 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
558 /* Not a proper libata device, ignore */
559 return rc;
560
561 if (ap->device[sdev->id].class == ATA_DEV_ATAPI) {
562 /*
563 * NVIDIA reports that ADMA mode does not support ATAPI commands.
564 * Therefore ATAPI commands are sent through the legacy interface.
565 * However, the legacy interface only supports 32-bit DMA.
566 * Restrict DMA parameters as required by the legacy interface
567 * when an ATAPI device is connected.
568 */
569 bounce_limit = ATA_DMA_MASK;
570 segment_boundary = ATA_DMA_BOUNDARY;
571 /* Subtract 1 since an extra entry may be needed for padding, see
572 libata-scsi.c */
573 sg_tablesize = LIBATA_MAX_PRD - 1;
f20b16ff 574
2dec7555
RH
575 /* Since the legacy DMA engine is in use, we need to disable ADMA
576 on the port. */
577 adma_enable = 0;
578 nv_adma_register_mode(ap);
fbbb262d
RH
579 }
580 else {
581 bounce_limit = *ap->dev->dma_mask;
582 segment_boundary = NV_ADMA_DMA_BOUNDARY;
583 sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
2dec7555 584 adma_enable = 1;
fbbb262d 585 }
f20b16ff 586
2dec7555
RH
587 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg);
588
589 if(ap->port_no == 1)
590 config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
591 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
592 else
593 config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
594 NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
f20b16ff 595
2dec7555
RH
596 if(adma_enable) {
597 new_reg = current_reg | config_mask;
598 pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
599 }
600 else {
601 new_reg = current_reg & ~config_mask;
602 pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
603 }
f20b16ff 604
2dec7555
RH
605 if(current_reg != new_reg)
606 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
f20b16ff 607
fbbb262d
RH
608 blk_queue_bounce_limit(sdev->request_queue, bounce_limit);
609 blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
610 blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize);
611 ata_port_printk(ap, KERN_INFO,
612 "bounce limit 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
613 (unsigned long long)bounce_limit, segment_boundary, sg_tablesize);
614 return rc;
615}
616
2dec7555
RH
617static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
618{
619 struct nv_adma_port_priv *pp = qc->ap->private_data;
620 return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
621}
622
623static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
fbbb262d
RH
624{
625 unsigned int idx = 0;
626
627 cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device | WNB);
628
629 if ((tf->flags & ATA_TFLAG_LBA48) == 0) {
630 cpb[idx++] = cpu_to_le16(IGN);
631 cpb[idx++] = cpu_to_le16(IGN);
632 cpb[idx++] = cpu_to_le16(IGN);
633 cpb[idx++] = cpu_to_le16(IGN);
634 cpb[idx++] = cpu_to_le16(IGN);
635 }
636 else {
637 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->hob_feature);
638 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
639 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->hob_lbal);
640 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->hob_lbam);
641 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->hob_lbah);
642 }
643 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature);
644 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->nsect);
645 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->lbal);
646 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->lbam);
647 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->lbah);
648
649 cpb[idx++] = cpu_to_le16((ATA_REG_CMD << 8) | tf->command | CMDEND);
650
651 return idx;
652}
653
5bd28a4b 654static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
fbbb262d
RH
655{
656 struct nv_adma_port_priv *pp = ap->private_data;
2dec7555 657 u8 flags = pp->cpb[cpb_num].resp_flags;
fbbb262d
RH
658
659 VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
660
5bd28a4b
RH
661 if (unlikely((force_err ||
662 flags & (NV_CPB_RESP_ATA_ERR |
663 NV_CPB_RESP_CMD_ERR |
664 NV_CPB_RESP_CPB_ERR)))) {
665 struct ata_eh_info *ehi = &ap->eh_info;
666 int freeze = 0;
667
668 ata_ehi_clear_desc(ehi);
669 ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x", flags );
670 if (flags & NV_CPB_RESP_ATA_ERR) {
671 ata_ehi_push_desc(ehi, ": ATA error");
672 ehi->err_mask |= AC_ERR_DEV;
673 } else if (flags & NV_CPB_RESP_CMD_ERR) {
674 ata_ehi_push_desc(ehi, ": CMD error");
675 ehi->err_mask |= AC_ERR_DEV;
676 } else if (flags & NV_CPB_RESP_CPB_ERR) {
677 ata_ehi_push_desc(ehi, ": CPB error");
678 ehi->err_mask |= AC_ERR_SYSTEM;
679 freeze = 1;
680 } else {
681 /* notifier error, but no error in CPB flags? */
682 ehi->err_mask |= AC_ERR_OTHER;
683 freeze = 1;
684 }
685 /* Kill all commands. EH will determine what actually failed. */
686 if (freeze)
687 ata_port_freeze(ap);
688 else
689 ata_port_abort(ap);
690 return 1;
fbbb262d 691 }
5bd28a4b
RH
692
693 if (flags & NV_CPB_RESP_DONE) {
fbbb262d 694 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num);
5bd28a4b
RH
695 VPRINTK("CPB flags done, flags=0x%x\n", flags);
696 if (likely(qc)) {
697 /* Grab the ATA port status for non-NCQ commands.
fbbb262d
RH
698 For NCQ commands the current status may have nothing to do with
699 the command just completed. */
5bd28a4b
RH
700 if (qc->tf.protocol != ATA_PROT_NCQ) {
701 u8 ata_status = readb(pp->ctl_block + (ATA_REG_STATUS * 4));
702 qc->err_mask |= ac_err_mask(ata_status);
703 }
fbbb262d
RH
704 DPRINTK("Completing qc from tag %d with err_mask %u\n",cpb_num,
705 qc->err_mask);
706 ata_qc_complete(qc);
707 }
708 }
5bd28a4b 709 return 0;
fbbb262d
RH
710}
711
2dec7555
RH
712static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
713{
714 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
2dec7555
RH
715
716 /* freeze if hotplugged */
717 if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
718 ata_port_freeze(ap);
719 return 1;
720 }
721
722 /* bail out if not our interrupt */
723 if (!(irq_stat & NV_INT_DEV))
724 return 0;
725
726 /* DEV interrupt w/ no active qc? */
727 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
728 ata_check_status(ap);
729 return 1;
730 }
731
732 /* handle interrupt */
f740d168 733 return ata_host_intr(ap, qc);
2dec7555
RH
734}
735
fbbb262d
RH
736static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
737{
738 struct ata_host *host = dev_instance;
739 int i, handled = 0;
2dec7555 740 u32 notifier_clears[2];
fbbb262d
RH
741
742 spin_lock(&host->lock);
743
744 for (i = 0; i < host->n_ports; i++) {
745 struct ata_port *ap = host->ports[i];
2dec7555 746 notifier_clears[i] = 0;
fbbb262d
RH
747
748 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
749 struct nv_adma_port_priv *pp = ap->private_data;
cdf56bcf 750 void __iomem *mmio = pp->ctl_block;
fbbb262d
RH
751 u16 status;
752 u32 gen_ctl;
fbbb262d
RH
753 u32 notifier, notifier_error;
754
755 /* if in ATA register mode, use standard ata interrupt handler */
756 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
0d5ff566 757 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
2dec7555 758 >> (NV_INT_PORT_SHIFT * i);
f740d168
RH
759 if(ata_tag_valid(ap->active_tag))
760 /** NV_INT_DEV indication seems unreliable at times
761 at least in ADMA mode. Force it on always when a
762 command is active, to prevent losing interrupts. */
763 irq_stat |= NV_INT_DEV;
2dec7555 764 handled += nv_host_intr(ap, irq_stat);
fbbb262d
RH
765 continue;
766 }
767
768 notifier = readl(mmio + NV_ADMA_NOTIFIER);
769 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
2dec7555 770 notifier_clears[i] = notifier | notifier_error;
fbbb262d 771
cdf56bcf 772 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
fbbb262d 773
fbbb262d
RH
774 if( !NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
775 !notifier_error)
776 /* Nothing to do */
777 continue;
778
779 status = readw(mmio + NV_ADMA_STAT);
780
781 /* Clear status. Ensure the controller sees the clearing before we start
782 looking at any of the CPB statuses, so that any CPB completions after
783 this point in the handler will raise another interrupt. */
784 writew(status, mmio + NV_ADMA_STAT);
785 readw(mmio + NV_ADMA_STAT); /* flush posted write */
786 rmb();
787
5bd28a4b
RH
788 handled++; /* irq handled if we got here */
789
790 /* freeze if hotplugged or controller error */
791 if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
792 NV_ADMA_STAT_HOTUNPLUG |
793 NV_ADMA_STAT_TIMEOUT))) {
794 struct ata_eh_info *ehi = &ap->eh_info;
795
796 ata_ehi_clear_desc(ehi);
797 ata_ehi_push_desc(ehi, "ADMA status 0x%08x", status );
798 if (status & NV_ADMA_STAT_TIMEOUT) {
799 ehi->err_mask |= AC_ERR_SYSTEM;
800 ata_ehi_push_desc(ehi, ": timeout");
801 } else if (status & NV_ADMA_STAT_HOTPLUG) {
802 ata_ehi_hotplugged(ehi);
803 ata_ehi_push_desc(ehi, ": hotplug");
804 } else if (status & NV_ADMA_STAT_HOTUNPLUG) {
805 ata_ehi_hotplugged(ehi);
806 ata_ehi_push_desc(ehi, ": hot unplug");
807 }
fbbb262d 808 ata_port_freeze(ap);
fbbb262d
RH
809 continue;
810 }
811
5bd28a4b
RH
812 if (status & (NV_ADMA_STAT_DONE |
813 NV_ADMA_STAT_CPBERR)) {
fbbb262d
RH
814 /** Check CPBs for completed commands */
815
5bd28a4b 816 if (ata_tag_valid(ap->active_tag)) {
fbbb262d 817 /* Non-NCQ command */
5bd28a4b
RH
818 nv_adma_check_cpb(ap, ap->active_tag,
819 notifier_error & (1 << ap->active_tag));
820 } else {
821 int pos, error = 0;
fbbb262d 822 u32 active = ap->sactive;
5bd28a4b
RH
823
824 while ((pos = ffs(active)) && !error) {
fbbb262d 825 pos--;
5bd28a4b
RH
826 error = nv_adma_check_cpb(ap, pos,
827 notifier_error & (1 << pos) );
fbbb262d
RH
828 active &= ~(1 << pos );
829 }
830 }
831 }
fbbb262d
RH
832 }
833 }
f20b16ff 834
2dec7555
RH
835 if(notifier_clears[0] || notifier_clears[1]) {
836 /* Note: Both notifier clear registers must be written
837 if either is set, even if one is zero, according to NVIDIA. */
cdf56bcf
RH
838 struct nv_adma_port_priv *pp = host->ports[0]->private_data;
839 writel(notifier_clears[0], pp->notifier_clear_block);
840 pp = host->ports[1]->private_data;
841 writel(notifier_clears[1], pp->notifier_clear_block);
2dec7555 842 }
fbbb262d
RH
843
844 spin_unlock(&host->lock);
845
846 return IRQ_RETVAL(handled);
847}
848
849static void nv_adma_irq_clear(struct ata_port *ap)
850{
cdf56bcf
RH
851 struct nv_adma_port_priv *pp = ap->private_data;
852 void __iomem *mmio = pp->ctl_block;
fbbb262d
RH
853 u16 status = readw(mmio + NV_ADMA_STAT);
854 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
855 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
0d5ff566 856 void __iomem *dma_stat_addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS;
fbbb262d
RH
857
858 /* clear ADMA status */
859 writew(status, mmio + NV_ADMA_STAT);
860 writel(notifier | notifier_error,
cdf56bcf 861 pp->notifier_clear_block);
fbbb262d
RH
862
863 /** clear legacy status */
0d5ff566 864 iowrite8(ioread8(dma_stat_addr), dma_stat_addr);
fbbb262d
RH
865}
866
867static void nv_adma_bmdma_setup(struct ata_queued_cmd *qc)
868{
2dec7555
RH
869 struct ata_port *ap = qc->ap;
870 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
871 struct nv_adma_port_priv *pp = ap->private_data;
872 u8 dmactl;
fbbb262d 873
2dec7555 874 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
fbbb262d
RH
875 WARN_ON(1);
876 return;
877 }
878
2dec7555 879 /* load PRD table addr. */
0d5ff566 880 iowrite32(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
2dec7555
RH
881
882 /* specify data direction, triple-check start bit is clear */
0d5ff566 883 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2dec7555
RH
884 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
885 if (!rw)
886 dmactl |= ATA_DMA_WR;
887
0d5ff566 888 iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2dec7555
RH
889
890 /* issue r/w command */
891 ata_exec_command(ap, &qc->tf);
fbbb262d
RH
892}
893
894static void nv_adma_bmdma_start(struct ata_queued_cmd *qc)
895{
2dec7555
RH
896 struct ata_port *ap = qc->ap;
897 struct nv_adma_port_priv *pp = ap->private_data;
898 u8 dmactl;
fbbb262d 899
2dec7555 900 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
fbbb262d
RH
901 WARN_ON(1);
902 return;
903 }
904
2dec7555 905 /* start host DMA transaction */
0d5ff566
TH
906 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
907 iowrite8(dmactl | ATA_DMA_START,
908 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
fbbb262d
RH
909}
910
911static void nv_adma_bmdma_stop(struct ata_queued_cmd *qc)
912{
2dec7555 913 struct ata_port *ap = qc->ap;
fbbb262d
RH
914 struct nv_adma_port_priv *pp = ap->private_data;
915
2dec7555 916 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
fbbb262d
RH
917 return;
918
2dec7555 919 /* clear start/stop bit */
0d5ff566
TH
920 iowrite8(ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD) & ~ATA_DMA_START,
921 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
fbbb262d 922
2dec7555
RH
923 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
924 ata_altstatus(ap); /* dummy read */
fbbb262d
RH
925}
926
2dec7555 927static u8 nv_adma_bmdma_status(struct ata_port *ap)
fbbb262d 928{
fbbb262d 929 struct nv_adma_port_priv *pp = ap->private_data;
fbbb262d 930
2dec7555 931 WARN_ON(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE));
fbbb262d 932
0d5ff566 933 return ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
fbbb262d
RH
934}
935
936static int nv_adma_port_start(struct ata_port *ap)
937{
938 struct device *dev = ap->host->dev;
939 struct nv_adma_port_priv *pp;
940 int rc;
941 void *mem;
942 dma_addr_t mem_dma;
cdf56bcf 943 void __iomem *mmio;
fbbb262d
RH
944 u16 tmp;
945
946 VPRINTK("ENTER\n");
947
948 rc = ata_port_start(ap);
949 if (rc)
950 return rc;
951
24dc5f33
TH
952 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
953 if (!pp)
954 return -ENOMEM;
fbbb262d 955
0d5ff566 956 mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
cdf56bcf
RH
957 ap->port_no * NV_ADMA_PORT_SIZE;
958 pp->ctl_block = mmio;
0d5ff566 959 pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
cdf56bcf
RH
960 pp->notifier_clear_block = pp->gen_block +
961 NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
962
24dc5f33
TH
963 mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
964 &mem_dma, GFP_KERNEL);
965 if (!mem)
966 return -ENOMEM;
fbbb262d
RH
967 memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
968
969 /*
970 * First item in chunk of DMA memory:
971 * 128-byte command parameter block (CPB)
972 * one for each command tag
973 */
974 pp->cpb = mem;
975 pp->cpb_dma = mem_dma;
976
977 writel(mem_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
978 writel((mem_dma >> 16 ) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
979
980 mem += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
981 mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
982
983 /*
984 * Second item: block of ADMA_SGTBL_LEN s/g entries
985 */
986 pp->aprd = mem;
987 pp->aprd_dma = mem_dma;
988
989 ap->private_data = pp;
990
991 /* clear any outstanding interrupt conditions */
992 writew(0xffff, mmio + NV_ADMA_STAT);
993
994 /* initialize port variables */
995 pp->flags = NV_ADMA_PORT_REGISTER_MODE;
996
997 /* clear CPB fetch count */
998 writew(0, mmio + NV_ADMA_CPB_COUNT);
999
cdf56bcf 1000 /* clear GO for register mode, enable interrupt */
fbbb262d 1001 tmp = readw(mmio + NV_ADMA_CTL);
cdf56bcf 1002 writew( (tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN, mmio + NV_ADMA_CTL);
fbbb262d
RH
1003
1004 tmp = readw(mmio + NV_ADMA_CTL);
1005 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1006 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1007 udelay(1);
1008 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1009 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1010
1011 return 0;
fbbb262d
RH
1012}
1013
1014static void nv_adma_port_stop(struct ata_port *ap)
1015{
fbbb262d 1016 struct nv_adma_port_priv *pp = ap->private_data;
cdf56bcf 1017 void __iomem *mmio = pp->ctl_block;
fbbb262d
RH
1018
1019 VPRINTK("ENTER\n");
fbbb262d 1020 writew(0, mmio + NV_ADMA_CTL);
fbbb262d
RH
1021}
1022
cdf56bcf
RH
1023static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
1024{
1025 struct nv_adma_port_priv *pp = ap->private_data;
1026 void __iomem *mmio = pp->ctl_block;
1027
1028 /* Go to register mode - clears GO */
1029 nv_adma_register_mode(ap);
1030
1031 /* clear CPB fetch count */
1032 writew(0, mmio + NV_ADMA_CPB_COUNT);
1033
1034 /* disable interrupt, shut down port */
1035 writew(0, mmio + NV_ADMA_CTL);
1036
1037 return 0;
1038}
1039
1040static int nv_adma_port_resume(struct ata_port *ap)
1041{
1042 struct nv_adma_port_priv *pp = ap->private_data;
1043 void __iomem *mmio = pp->ctl_block;
1044 u16 tmp;
1045
1046 /* set CPB block location */
1047 writel(pp->cpb_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
1048 writel((pp->cpb_dma >> 16 ) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
1049
1050 /* clear any outstanding interrupt conditions */
1051 writew(0xffff, mmio + NV_ADMA_STAT);
1052
1053 /* initialize port variables */
1054 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
1055
1056 /* clear CPB fetch count */
1057 writew(0, mmio + NV_ADMA_CPB_COUNT);
1058
1059 /* clear GO for register mode, enable interrupt */
1060 tmp = readw(mmio + NV_ADMA_CTL);
1061 writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN, mmio + NV_ADMA_CTL);
1062
1063 tmp = readw(mmio + NV_ADMA_CTL);
1064 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1065 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1066 udelay(1);
1067 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1068 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1069
1070 return 0;
1071}
fbbb262d
RH
1072
1073static void nv_adma_setup_port(struct ata_probe_ent *probe_ent, unsigned int port)
1074{
0d5ff566 1075 void __iomem *mmio = probe_ent->iomap[NV_MMIO_BAR];
fbbb262d
RH
1076 struct ata_ioports *ioport = &probe_ent->port[port];
1077
1078 VPRINTK("ENTER\n");
1079
1080 mmio += NV_ADMA_PORT + port * NV_ADMA_PORT_SIZE;
1081
0d5ff566
TH
1082 ioport->cmd_addr = mmio;
1083 ioport->data_addr = mmio + (ATA_REG_DATA * 4);
fbbb262d 1084 ioport->error_addr =
0d5ff566
TH
1085 ioport->feature_addr = mmio + (ATA_REG_ERR * 4);
1086 ioport->nsect_addr = mmio + (ATA_REG_NSECT * 4);
1087 ioport->lbal_addr = mmio + (ATA_REG_LBAL * 4);
1088 ioport->lbam_addr = mmio + (ATA_REG_LBAM * 4);
1089 ioport->lbah_addr = mmio + (ATA_REG_LBAH * 4);
1090 ioport->device_addr = mmio + (ATA_REG_DEVICE * 4);
fbbb262d 1091 ioport->status_addr =
0d5ff566 1092 ioport->command_addr = mmio + (ATA_REG_STATUS * 4);
fbbb262d 1093 ioport->altstatus_addr =
0d5ff566 1094 ioport->ctl_addr = mmio + 0x20;
fbbb262d
RH
1095}
1096
1097static int nv_adma_host_init(struct ata_probe_ent *probe_ent)
1098{
1099 struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
1100 unsigned int i;
1101 u32 tmp32;
1102
1103 VPRINTK("ENTER\n");
1104
1105 /* enable ADMA on the ports */
1106 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1107 tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1108 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1109 NV_MCP_SATA_CFG_20_PORT1_EN |
1110 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1111
1112 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1113
1114 for (i = 0; i < probe_ent->n_ports; i++)
1115 nv_adma_setup_port(probe_ent, i);
1116
fbbb262d
RH
1117 return 0;
1118}
1119
1120static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1121 struct scatterlist *sg,
1122 int idx,
1123 struct nv_adma_prd *aprd)
1124{
2dec7555 1125 u8 flags;
fbbb262d
RH
1126
1127 memset(aprd, 0, sizeof(struct nv_adma_prd));
1128
1129 flags = 0;
1130 if (qc->tf.flags & ATA_TFLAG_WRITE)
1131 flags |= NV_APRD_WRITE;
1132 if (idx == qc->n_elem - 1)
1133 flags |= NV_APRD_END;
1134 else if (idx != 4)
1135 flags |= NV_APRD_CONT;
1136
1137 aprd->addr = cpu_to_le64(((u64)sg_dma_address(sg)));
1138 aprd->len = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
2dec7555 1139 aprd->flags = flags;
fbbb262d
RH
1140}
1141
1142static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1143{
1144 struct nv_adma_port_priv *pp = qc->ap->private_data;
1145 unsigned int idx;
1146 struct nv_adma_prd *aprd;
1147 struct scatterlist *sg;
1148
1149 VPRINTK("ENTER\n");
1150
1151 idx = 0;
1152
1153 ata_for_each_sg(sg, qc) {
1154 aprd = (idx < 5) ? &cpb->aprd[idx] : &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (idx-5)];
1155 nv_adma_fill_aprd(qc, sg, idx, aprd);
1156 idx++;
1157 }
1158 if (idx > 5)
1159 cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));
1160}
1161
382a6652
RH
1162static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
1163{
1164 struct nv_adma_port_priv *pp = qc->ap->private_data;
1165
1166 /* ADMA engine can only be used for non-ATAPI DMA commands,
1167 or interrupt-driven no-data commands. */
1168 if((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
1169 (qc->tf.flags & ATA_TFLAG_POLLING))
1170 return 1;
1171
1172 if((qc->flags & ATA_QCFLAG_DMAMAP) ||
1173 (qc->tf.protocol == ATA_PROT_NODATA))
1174 return 0;
1175
1176 return 1;
1177}
1178
fbbb262d
RH
1179static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1180{
1181 struct nv_adma_port_priv *pp = qc->ap->private_data;
1182 struct nv_adma_cpb *cpb = &pp->cpb[qc->tag];
1183 u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
fbbb262d
RH
1184 NV_CPB_CTL_IEN;
1185
382a6652 1186 if (nv_adma_use_reg_mode(qc)) {
2dec7555 1187 nv_adma_register_mode(qc->ap);
fbbb262d
RH
1188 ata_qc_prep(qc);
1189 return;
1190 }
1191
1192 memset(cpb, 0, sizeof(struct nv_adma_cpb));
1193
1194 cpb->len = 3;
1195 cpb->tag = qc->tag;
1196 cpb->next_cpb_idx = 0;
1197
1198 /* turn on NCQ flags for NCQ commands */
1199 if (qc->tf.protocol == ATA_PROT_NCQ)
1200 ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1201
cdf56bcf
RH
1202 VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1203
fbbb262d
RH
1204 nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1205
382a6652
RH
1206 if(qc->flags & ATA_QCFLAG_DMAMAP) {
1207 nv_adma_fill_sg(qc, cpb);
1208 ctl_flags |= NV_CPB_CTL_APRD_VALID;
1209 } else
1210 memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
fbbb262d
RH
1211
1212 /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID until we are
1213 finished filling in all of the contents */
1214 wmb();
1215 cpb->ctl_flags = ctl_flags;
1216}
1217
1218static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1219{
2dec7555 1220 struct nv_adma_port_priv *pp = qc->ap->private_data;
cdf56bcf 1221 void __iomem *mmio = pp->ctl_block;
fbbb262d
RH
1222
1223 VPRINTK("ENTER\n");
1224
382a6652 1225 if (nv_adma_use_reg_mode(qc)) {
fbbb262d 1226 /* use ATA register mode */
382a6652 1227 VPRINTK("using ATA register mode: 0x%lx\n", qc->flags);
fbbb262d
RH
1228 nv_adma_register_mode(qc->ap);
1229 return ata_qc_issue_prot(qc);
1230 } else
1231 nv_adma_mode(qc->ap);
1232
1233 /* write append register, command tag in lower 8 bits
1234 and (number of cpbs to append -1) in top 8 bits */
1235 wmb();
1236 writew(qc->tag, mmio + NV_ADMA_APPEND);
1237
1238 DPRINTK("Issued tag %u\n",qc->tag);
1239
1240 return 0;
1241}
1242
7d12e780 1243static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1da177e4 1244{
cca3974e 1245 struct ata_host *host = dev_instance;
1da177e4
LT
1246 unsigned int i;
1247 unsigned int handled = 0;
1248 unsigned long flags;
1249
cca3974e 1250 spin_lock_irqsave(&host->lock, flags);
1da177e4 1251
cca3974e 1252 for (i = 0; i < host->n_ports; i++) {
1da177e4
LT
1253 struct ata_port *ap;
1254
cca3974e 1255 ap = host->ports[i];
c1389503 1256 if (ap &&
029f5468 1257 !(ap->flags & ATA_FLAG_DISABLED)) {
1da177e4
LT
1258 struct ata_queued_cmd *qc;
1259
1260 qc = ata_qc_from_tag(ap, ap->active_tag);
e50362ec 1261 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
1da177e4 1262 handled += ata_host_intr(ap, qc);
b887030a
AC
1263 else
1264 // No request pending? Clear interrupt status
1265 // anyway, in case there's one pending.
1266 ap->ops->check_status(ap);
1da177e4
LT
1267 }
1268
1269 }
1270
cca3974e 1271 spin_unlock_irqrestore(&host->lock, flags);
1da177e4
LT
1272
1273 return IRQ_RETVAL(handled);
1274}
1275
cca3974e 1276static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
ada364e8
TH
1277{
1278 int i, handled = 0;
1279
cca3974e
JG
1280 for (i = 0; i < host->n_ports; i++) {
1281 struct ata_port *ap = host->ports[i];
ada364e8
TH
1282
1283 if (ap && !(ap->flags & ATA_FLAG_DISABLED))
1284 handled += nv_host_intr(ap, irq_stat);
1285
1286 irq_stat >>= NV_INT_PORT_SHIFT;
1287 }
1288
1289 return IRQ_RETVAL(handled);
1290}
1291
7d12e780 1292static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
ada364e8 1293{
cca3974e 1294 struct ata_host *host = dev_instance;
ada364e8
TH
1295 u8 irq_stat;
1296 irqreturn_t ret;
1297
cca3974e 1298 spin_lock(&host->lock);
0d5ff566 1299 irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
cca3974e
JG
1300 ret = nv_do_interrupt(host, irq_stat);
1301 spin_unlock(&host->lock);
ada364e8
TH
1302
1303 return ret;
1304}
1305
7d12e780 1306static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
ada364e8 1307{
cca3974e 1308 struct ata_host *host = dev_instance;
ada364e8
TH
1309 u8 irq_stat;
1310 irqreturn_t ret;
1311
cca3974e 1312 spin_lock(&host->lock);
0d5ff566 1313 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
cca3974e
JG
1314 ret = nv_do_interrupt(host, irq_stat);
1315 spin_unlock(&host->lock);
ada364e8
TH
1316
1317 return ret;
1318}
1319
1da177e4
LT
1320static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg)
1321{
1da177e4
LT
1322 if (sc_reg > SCR_CONTROL)
1323 return 0xffffffffU;
1324
0d5ff566 1325 return ioread32(ap->ioaddr.scr_addr + (sc_reg * 4));
1da177e4
LT
1326}
1327
1328static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
1329{
1da177e4
LT
1330 if (sc_reg > SCR_CONTROL)
1331 return;
1332
0d5ff566 1333 iowrite32(val, ap->ioaddr.scr_addr + (sc_reg * 4));
1da177e4
LT
1334}
1335
39f87582
TH
1336static void nv_nf2_freeze(struct ata_port *ap)
1337{
0d5ff566 1338 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
39f87582
TH
1339 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1340 u8 mask;
1341
0d5ff566 1342 mask = ioread8(scr_addr + NV_INT_ENABLE);
39f87582 1343 mask &= ~(NV_INT_ALL << shift);
0d5ff566 1344 iowrite8(mask, scr_addr + NV_INT_ENABLE);
39f87582
TH
1345}
1346
1347static void nv_nf2_thaw(struct ata_port *ap)
1348{
0d5ff566 1349 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
39f87582
TH
1350 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1351 u8 mask;
1352
0d5ff566 1353 iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
39f87582 1354
0d5ff566 1355 mask = ioread8(scr_addr + NV_INT_ENABLE);
39f87582 1356 mask |= (NV_INT_MASK << shift);
0d5ff566 1357 iowrite8(mask, scr_addr + NV_INT_ENABLE);
39f87582
TH
1358}
1359
1360static void nv_ck804_freeze(struct ata_port *ap)
1361{
0d5ff566 1362 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
39f87582
TH
1363 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1364 u8 mask;
1365
1366 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1367 mask &= ~(NV_INT_ALL << shift);
1368 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1369}
1370
1371static void nv_ck804_thaw(struct ata_port *ap)
1372{
0d5ff566 1373 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
39f87582
TH
1374 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1375 u8 mask;
1376
1377 writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1378
1379 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1380 mask |= (NV_INT_MASK << shift);
1381 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1382}
1383
1384static int nv_hardreset(struct ata_port *ap, unsigned int *class)
1385{
1386 unsigned int dummy;
1387
1388 /* SATA hardreset fails to retrieve proper device signature on
1389 * some controllers. Don't classify on hardreset. For more
1390 * info, see http://bugme.osdl.org/show_bug.cgi?id=3352
1391 */
1392 return sata_std_hardreset(ap, &dummy);
1393}
1394
1395static void nv_error_handler(struct ata_port *ap)
1396{
1397 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1398 nv_hardreset, ata_std_postreset);
1399}
1400
fbbb262d
RH
1401static void nv_adma_error_handler(struct ata_port *ap)
1402{
1403 struct nv_adma_port_priv *pp = ap->private_data;
1404 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
cdf56bcf 1405 void __iomem *mmio = pp->ctl_block;
fbbb262d
RH
1406 int i;
1407 u16 tmp;
1408
fbbb262d
RH
1409 /* Push us back into port register mode for error handling. */
1410 nv_adma_register_mode(ap);
1411
fbbb262d
RH
1412 /* Mark all of the CPBs as invalid to prevent them from being executed */
1413 for( i=0;i<NV_ADMA_MAX_CPBS;i++)
1414 pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1415
1416 /* clear CPB fetch count */
1417 writew(0, mmio + NV_ADMA_CPB_COUNT);
1418
1419 /* Reset channel */
1420 tmp = readw(mmio + NV_ADMA_CTL);
1421 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1422 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1423 udelay(1);
1424 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1425 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1426 }
1427
1428 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1429 nv_hardreset, ata_std_postreset);
1430}
1431
1da177e4
LT
1432static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1433{
1434 static int printed_version = 0;
29da9f6d 1435 struct ata_port_info *ppi[2];
1da177e4 1436 struct ata_probe_ent *probe_ent;
cdf56bcf 1437 struct nv_host_priv *hpriv;
1da177e4
LT
1438 int rc;
1439 u32 bar;
0d5ff566 1440 void __iomem *base;
fbbb262d
RH
1441 unsigned long type = ent->driver_data;
1442 int mask_set = 0;
1da177e4
LT
1443
1444 // Make sure this is a SATA controller by counting the number of bars
1445 // (NVIDIA SATA controllers will always have six bars). Otherwise,
1446 // it's an IDE controller and we ignore it.
1447 for (bar=0; bar<6; bar++)
1448 if (pci_resource_start(pdev, bar) == 0)
1449 return -ENODEV;
1450
cdf56bcf 1451 if (!printed_version++)
a9524a76 1452 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
1da177e4 1453
24dc5f33 1454 rc = pcim_enable_device(pdev);
1da177e4 1455 if (rc)
24dc5f33 1456 return rc;
1da177e4
LT
1457
1458 rc = pci_request_regions(pdev, DRV_NAME);
1459 if (rc) {
24dc5f33
TH
1460 pcim_pin_device(pdev);
1461 return rc;
1da177e4
LT
1462 }
1463
fbbb262d
RH
1464 if(type >= CK804 && adma_enabled) {
1465 dev_printk(KERN_NOTICE, &pdev->dev, "Using ADMA mode\n");
1466 type = ADMA;
1467 if(!pci_set_dma_mask(pdev, DMA_64BIT_MASK) &&
1468 !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))
1469 mask_set = 1;
1470 }
1471
1472 if(!mask_set) {
1473 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
1474 if (rc)
24dc5f33 1475 return rc;
fbbb262d
RH
1476 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
1477 if (rc)
24dc5f33 1478 return rc;
fbbb262d 1479 }
1da177e4
LT
1480
1481 rc = -ENOMEM;
1482
24dc5f33 1483 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
cdf56bcf 1484 if (!hpriv)
24dc5f33 1485 return -ENOMEM;
cdf56bcf 1486
fbbb262d 1487 ppi[0] = ppi[1] = &nv_port_info[type];
29da9f6d 1488 probe_ent = ata_pci_init_native_mode(pdev, ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
1da177e4 1489 if (!probe_ent)
24dc5f33
TH
1490 return -ENOMEM;
1491
0d5ff566 1492 if (!pcim_iomap(pdev, NV_MMIO_BAR, 0))
24dc5f33 1493 return -EIO;
0d5ff566 1494 probe_ent->iomap = pcim_iomap_table(pdev);
1da177e4 1495
cdf56bcf
RH
1496 probe_ent->private_data = hpriv;
1497 hpriv->type = type;
1da177e4 1498
0d5ff566 1499 base = probe_ent->iomap[NV_MMIO_BAR];
02cbd926
JG
1500 probe_ent->port[0].scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
1501 probe_ent->port[1].scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
1da177e4 1502
ada364e8 1503 /* enable SATA space for CK804 */
fbbb262d 1504 if (type >= CK804) {
ada364e8
TH
1505 u8 regval;
1506
1507 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
1508 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1509 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1510 }
1511
1da177e4
LT
1512 pci_set_master(pdev);
1513
fbbb262d
RH
1514 if (type == ADMA) {
1515 rc = nv_adma_host_init(probe_ent);
1516 if (rc)
24dc5f33 1517 return rc;
fbbb262d
RH
1518 }
1519
1da177e4
LT
1520 rc = ata_device_add(probe_ent);
1521 if (rc != NV_PORTS)
24dc5f33 1522 return -ENODEV;
1da177e4 1523
24dc5f33 1524 devm_kfree(&pdev->dev, probe_ent);
1da177e4 1525 return 0;
1da177e4
LT
1526}
1527
cdf56bcf
RH
1528static void nv_remove_one (struct pci_dev *pdev)
1529{
1530 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1531 struct nv_host_priv *hpriv = host->private_data;
1532
1533 ata_pci_remove_one(pdev);
1534 kfree(hpriv);
1535}
1536
1537static int nv_pci_device_resume(struct pci_dev *pdev)
1538{
1539 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1540 struct nv_host_priv *hpriv = host->private_data;
1541
1542 ata_pci_device_do_resume(pdev);
1543
1544 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
1545 if(hpriv->type >= CK804) {
1546 u8 regval;
1547
1548 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
1549 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1550 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1551 }
1552 if(hpriv->type == ADMA) {
1553 u32 tmp32;
1554 struct nv_adma_port_priv *pp;
1555 /* enable/disable ADMA on the ports appropriately */
1556 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1557
1558 pp = host->ports[0]->private_data;
1559 if(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1560 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
1561 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
1562 else
1563 tmp32 |= (NV_MCP_SATA_CFG_20_PORT0_EN |
1564 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
1565 pp = host->ports[1]->private_data;
1566 if(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1567 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
1568 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
1569 else
1570 tmp32 |= (NV_MCP_SATA_CFG_20_PORT1_EN |
1571 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
1572
1573 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1574 }
1575 }
1576
1577 ata_host_resume(host);
1578
1579 return 0;
1580}
1581
cca3974e 1582static void nv_ck804_host_stop(struct ata_host *host)
ada364e8 1583{
cca3974e 1584 struct pci_dev *pdev = to_pci_dev(host->dev);
ada364e8
TH
1585 u8 regval;
1586
1587 /* disable SATA space for CK804 */
1588 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
1589 regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1590 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
ada364e8
TH
1591}
1592
fbbb262d
RH
1593static void nv_adma_host_stop(struct ata_host *host)
1594{
1595 struct pci_dev *pdev = to_pci_dev(host->dev);
fbbb262d
RH
1596 u32 tmp32;
1597
fbbb262d
RH
1598 /* disable ADMA on the ports */
1599 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1600 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
1601 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1602 NV_MCP_SATA_CFG_20_PORT1_EN |
1603 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
1604
1605 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1606
1607 nv_ck804_host_stop(host);
1608}
1609
1da177e4
LT
1610static int __init nv_init(void)
1611{
b7887196 1612 return pci_register_driver(&nv_pci_driver);
1da177e4
LT
1613}
1614
1615static void __exit nv_exit(void)
1616{
1617 pci_unregister_driver(&nv_pci_driver);
1618}
1619
1620module_init(nv_init);
1621module_exit(nv_exit);
fbbb262d
RH
1622module_param_named(adma, adma_enabled, bool, 0444);
1623MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: true)");
This page took 0.233315 seconds and 5 git commands to generate.