Merge branch 'usb-move' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6
[deliverable/linux.git] / drivers / ata / sata_nv.c
1 /*
2 * sata_nv.c - NVIDIA nForce SATA
3 *
4 * Copyright 2004 NVIDIA Corp. All rights reserved.
5 * Copyright 2004 Andrew Chew
6 *
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
21 *
22 *
23 * libata documentation is available via 'make {ps|pdf}docs',
24 * as Documentation/DocBook/libata.*
25 *
26 * No hardware documentation available outside of NVIDIA.
27 * This driver programs the NVIDIA SATA controller in a similar
28 * fashion as with other PCI IDE BMDMA controllers, with a few
29 * NV-specific details such as register offsets, SATA phy location,
30 * hotplug info, etc.
31 *
32 * CK804/MCP04 controllers support an alternate programming interface
33 * similar to the ADMA specification (with some modifications).
34 * This allows the use of NCQ. Non-DMA-mapped ATA commands are still
35 * sent through the legacy interface.
36 *
37 */
38
39 #include <linux/kernel.h>
40 #include <linux/module.h>
41 #include <linux/pci.h>
42 #include <linux/init.h>
43 #include <linux/blkdev.h>
44 #include <linux/delay.h>
45 #include <linux/interrupt.h>
46 #include <linux/device.h>
47 #include <scsi/scsi_host.h>
48 #include <scsi/scsi_device.h>
49 #include <linux/libata.h>
50
51 #define DRV_NAME "sata_nv"
52 #define DRV_VERSION "3.3"
53
54 #define NV_ADMA_DMA_BOUNDARY 0xffffffffUL
55
56 enum {
57 NV_MMIO_BAR = 5,
58
59 NV_PORTS = 2,
60 NV_PIO_MASK = 0x1f,
61 NV_MWDMA_MASK = 0x07,
62 NV_UDMA_MASK = 0x7f,
63 NV_PORT0_SCR_REG_OFFSET = 0x00,
64 NV_PORT1_SCR_REG_OFFSET = 0x40,
65
66 /* INT_STATUS/ENABLE */
67 NV_INT_STATUS = 0x10,
68 NV_INT_ENABLE = 0x11,
69 NV_INT_STATUS_CK804 = 0x440,
70 NV_INT_ENABLE_CK804 = 0x441,
71
72 /* INT_STATUS/ENABLE bits */
73 NV_INT_DEV = 0x01,
74 NV_INT_PM = 0x02,
75 NV_INT_ADDED = 0x04,
76 NV_INT_REMOVED = 0x08,
77
78 NV_INT_PORT_SHIFT = 4, /* each port occupies 4 bits */
79
80 NV_INT_ALL = 0x0f,
81 NV_INT_MASK = NV_INT_DEV |
82 NV_INT_ADDED | NV_INT_REMOVED,
83
84 /* INT_CONFIG */
85 NV_INT_CONFIG = 0x12,
86 NV_INT_CONFIG_METHD = 0x01, // 0 = INT, 1 = SMI
87
88 // For PCI config register 20
89 NV_MCP_SATA_CFG_20 = 0x50,
90 NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
91 NV_MCP_SATA_CFG_20_PORT0_EN = (1 << 17),
92 NV_MCP_SATA_CFG_20_PORT1_EN = (1 << 16),
93 NV_MCP_SATA_CFG_20_PORT0_PWB_EN = (1 << 14),
94 NV_MCP_SATA_CFG_20_PORT1_PWB_EN = (1 << 12),
95
96 NV_ADMA_MAX_CPBS = 32,
97 NV_ADMA_CPB_SZ = 128,
98 NV_ADMA_APRD_SZ = 16,
99 NV_ADMA_SGTBL_LEN = (1024 - NV_ADMA_CPB_SZ) /
100 NV_ADMA_APRD_SZ,
101 NV_ADMA_SGTBL_TOTAL_LEN = NV_ADMA_SGTBL_LEN + 5,
102 NV_ADMA_SGTBL_SZ = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
103 NV_ADMA_PORT_PRIV_DMA_SZ = NV_ADMA_MAX_CPBS *
104 (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
105
106 /* BAR5 offset to ADMA general registers */
107 NV_ADMA_GEN = 0x400,
108 NV_ADMA_GEN_CTL = 0x00,
109 NV_ADMA_NOTIFIER_CLEAR = 0x30,
110
111 /* BAR5 offset to ADMA ports */
112 NV_ADMA_PORT = 0x480,
113
114 /* size of ADMA port register space */
115 NV_ADMA_PORT_SIZE = 0x100,
116
117 /* ADMA port registers */
118 NV_ADMA_CTL = 0x40,
119 NV_ADMA_CPB_COUNT = 0x42,
120 NV_ADMA_NEXT_CPB_IDX = 0x43,
121 NV_ADMA_STAT = 0x44,
122 NV_ADMA_CPB_BASE_LOW = 0x48,
123 NV_ADMA_CPB_BASE_HIGH = 0x4C,
124 NV_ADMA_APPEND = 0x50,
125 NV_ADMA_NOTIFIER = 0x68,
126 NV_ADMA_NOTIFIER_ERROR = 0x6C,
127
128 /* NV_ADMA_CTL register bits */
129 NV_ADMA_CTL_HOTPLUG_IEN = (1 << 0),
130 NV_ADMA_CTL_CHANNEL_RESET = (1 << 5),
131 NV_ADMA_CTL_GO = (1 << 7),
132 NV_ADMA_CTL_AIEN = (1 << 8),
133 NV_ADMA_CTL_READ_NON_COHERENT = (1 << 11),
134 NV_ADMA_CTL_WRITE_NON_COHERENT = (1 << 12),
135
136 /* CPB response flag bits */
137 NV_CPB_RESP_DONE = (1 << 0),
138 NV_CPB_RESP_ATA_ERR = (1 << 3),
139 NV_CPB_RESP_CMD_ERR = (1 << 4),
140 NV_CPB_RESP_CPB_ERR = (1 << 7),
141
142 /* CPB control flag bits */
143 NV_CPB_CTL_CPB_VALID = (1 << 0),
144 NV_CPB_CTL_QUEUE = (1 << 1),
145 NV_CPB_CTL_APRD_VALID = (1 << 2),
146 NV_CPB_CTL_IEN = (1 << 3),
147 NV_CPB_CTL_FPDMA = (1 << 4),
148
149 /* APRD flags */
150 NV_APRD_WRITE = (1 << 1),
151 NV_APRD_END = (1 << 2),
152 NV_APRD_CONT = (1 << 3),
153
154 /* NV_ADMA_STAT flags */
155 NV_ADMA_STAT_TIMEOUT = (1 << 0),
156 NV_ADMA_STAT_HOTUNPLUG = (1 << 1),
157 NV_ADMA_STAT_HOTPLUG = (1 << 2),
158 NV_ADMA_STAT_CPBERR = (1 << 4),
159 NV_ADMA_STAT_SERROR = (1 << 5),
160 NV_ADMA_STAT_CMD_COMPLETE = (1 << 6),
161 NV_ADMA_STAT_IDLE = (1 << 8),
162 NV_ADMA_STAT_LEGACY = (1 << 9),
163 NV_ADMA_STAT_STOPPED = (1 << 10),
164 NV_ADMA_STAT_DONE = (1 << 12),
165 NV_ADMA_STAT_ERR = NV_ADMA_STAT_CPBERR |
166 NV_ADMA_STAT_TIMEOUT,
167
168 /* port flags */
169 NV_ADMA_PORT_REGISTER_MODE = (1 << 0),
170 NV_ADMA_ATAPI_SETUP_COMPLETE = (1 << 1),
171
172 };
173
174 /* ADMA Physical Region Descriptor - one SG segment */
175 struct nv_adma_prd {
176 __le64 addr;
177 __le32 len;
178 u8 flags;
179 u8 packet_len;
180 __le16 reserved;
181 };
182
183 enum nv_adma_regbits {
184 CMDEND = (1 << 15), /* end of command list */
185 WNB = (1 << 14), /* wait-not-BSY */
186 IGN = (1 << 13), /* ignore this entry */
187 CS1n = (1 << (4 + 8)), /* std. PATA signals follow... */
188 DA2 = (1 << (2 + 8)),
189 DA1 = (1 << (1 + 8)),
190 DA0 = (1 << (0 + 8)),
191 };
192
193 /* ADMA Command Parameter Block
194 The first 5 SG segments are stored inside the Command Parameter Block itself.
195 If there are more than 5 segments the remainder are stored in a separate
196 memory area indicated by next_aprd. */
197 struct nv_adma_cpb {
198 u8 resp_flags; /* 0 */
199 u8 reserved1; /* 1 */
200 u8 ctl_flags; /* 2 */
201 /* len is length of taskfile in 64 bit words */
202 u8 len; /* 3 */
203 u8 tag; /* 4 */
204 u8 next_cpb_idx; /* 5 */
205 __le16 reserved2; /* 6-7 */
206 __le16 tf[12]; /* 8-31 */
207 struct nv_adma_prd aprd[5]; /* 32-111 */
208 __le64 next_aprd; /* 112-119 */
209 __le64 reserved3; /* 120-127 */
210 };
211
212
213 struct nv_adma_port_priv {
214 struct nv_adma_cpb *cpb;
215 dma_addr_t cpb_dma;
216 struct nv_adma_prd *aprd;
217 dma_addr_t aprd_dma;
218 void __iomem * ctl_block;
219 void __iomem * gen_block;
220 void __iomem * notifier_clear_block;
221 u8 flags;
222 int last_issue_ncq;
223 };
224
225 struct nv_host_priv {
226 unsigned long type;
227 };
228
229 #define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & ( 1 << (19 + (12 * (PORT)))))
230
231 static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
232 static void nv_remove_one (struct pci_dev *pdev);
233 #ifdef CONFIG_PM
234 static int nv_pci_device_resume(struct pci_dev *pdev);
235 #endif
236 static void nv_ck804_host_stop(struct ata_host *host);
237 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
238 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
239 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
240 static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg);
241 static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
242
243 static void nv_nf2_freeze(struct ata_port *ap);
244 static void nv_nf2_thaw(struct ata_port *ap);
245 static void nv_ck804_freeze(struct ata_port *ap);
246 static void nv_ck804_thaw(struct ata_port *ap);
247 static void nv_error_handler(struct ata_port *ap);
248 static int nv_adma_slave_config(struct scsi_device *sdev);
249 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
250 static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
251 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
252 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
253 static void nv_adma_irq_clear(struct ata_port *ap);
254 static int nv_adma_port_start(struct ata_port *ap);
255 static void nv_adma_port_stop(struct ata_port *ap);
256 #ifdef CONFIG_PM
257 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
258 static int nv_adma_port_resume(struct ata_port *ap);
259 #endif
260 static void nv_adma_freeze(struct ata_port *ap);
261 static void nv_adma_thaw(struct ata_port *ap);
262 static void nv_adma_error_handler(struct ata_port *ap);
263 static void nv_adma_host_stop(struct ata_host *host);
264 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
265 static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
266
267 enum nv_host_type
268 {
269 GENERIC,
270 NFORCE2,
271 NFORCE3 = NFORCE2, /* NF2 == NF3 as far as sata_nv is concerned */
272 CK804,
273 ADMA
274 };
275
276 static const struct pci_device_id nv_pci_tbl[] = {
277 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
278 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
279 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
280 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
281 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
282 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
283 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
284 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), GENERIC },
285 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), GENERIC },
286 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), GENERIC },
287 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), GENERIC },
288 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
289 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
290 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
291 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
292 PCI_ANY_ID, PCI_ANY_ID,
293 PCI_CLASS_STORAGE_IDE<<8, 0xffff00, GENERIC },
294 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
295 PCI_ANY_ID, PCI_ANY_ID,
296 PCI_CLASS_STORAGE_RAID<<8, 0xffff00, GENERIC },
297
298 { } /* terminate list */
299 };
300
301 static struct pci_driver nv_pci_driver = {
302 .name = DRV_NAME,
303 .id_table = nv_pci_tbl,
304 .probe = nv_init_one,
305 #ifdef CONFIG_PM
306 .suspend = ata_pci_device_suspend,
307 .resume = nv_pci_device_resume,
308 #endif
309 .remove = nv_remove_one,
310 };
311
312 static struct scsi_host_template nv_sht = {
313 .module = THIS_MODULE,
314 .name = DRV_NAME,
315 .ioctl = ata_scsi_ioctl,
316 .queuecommand = ata_scsi_queuecmd,
317 .can_queue = ATA_DEF_QUEUE,
318 .this_id = ATA_SHT_THIS_ID,
319 .sg_tablesize = LIBATA_MAX_PRD,
320 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
321 .emulated = ATA_SHT_EMULATED,
322 .use_clustering = ATA_SHT_USE_CLUSTERING,
323 .proc_name = DRV_NAME,
324 .dma_boundary = ATA_DMA_BOUNDARY,
325 .slave_configure = ata_scsi_slave_config,
326 .slave_destroy = ata_scsi_slave_destroy,
327 .bios_param = ata_std_bios_param,
328 #ifdef CONFIG_PM
329 .suspend = ata_scsi_device_suspend,
330 .resume = ata_scsi_device_resume,
331 #endif
332 };
333
334 static struct scsi_host_template nv_adma_sht = {
335 .module = THIS_MODULE,
336 .name = DRV_NAME,
337 .ioctl = ata_scsi_ioctl,
338 .queuecommand = ata_scsi_queuecmd,
339 .can_queue = NV_ADMA_MAX_CPBS,
340 .this_id = ATA_SHT_THIS_ID,
341 .sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN,
342 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
343 .emulated = ATA_SHT_EMULATED,
344 .use_clustering = ATA_SHT_USE_CLUSTERING,
345 .proc_name = DRV_NAME,
346 .dma_boundary = NV_ADMA_DMA_BOUNDARY,
347 .slave_configure = nv_adma_slave_config,
348 .slave_destroy = ata_scsi_slave_destroy,
349 .bios_param = ata_std_bios_param,
350 #ifdef CONFIG_PM
351 .suspend = ata_scsi_device_suspend,
352 .resume = ata_scsi_device_resume,
353 #endif
354 };
355
356 static const struct ata_port_operations nv_generic_ops = {
357 .port_disable = ata_port_disable,
358 .tf_load = ata_tf_load,
359 .tf_read = ata_tf_read,
360 .exec_command = ata_exec_command,
361 .check_status = ata_check_status,
362 .dev_select = ata_std_dev_select,
363 .bmdma_setup = ata_bmdma_setup,
364 .bmdma_start = ata_bmdma_start,
365 .bmdma_stop = ata_bmdma_stop,
366 .bmdma_status = ata_bmdma_status,
367 .qc_prep = ata_qc_prep,
368 .qc_issue = ata_qc_issue_prot,
369 .freeze = ata_bmdma_freeze,
370 .thaw = ata_bmdma_thaw,
371 .error_handler = nv_error_handler,
372 .post_internal_cmd = ata_bmdma_post_internal_cmd,
373 .data_xfer = ata_data_xfer,
374 .irq_clear = ata_bmdma_irq_clear,
375 .irq_on = ata_irq_on,
376 .irq_ack = ata_irq_ack,
377 .scr_read = nv_scr_read,
378 .scr_write = nv_scr_write,
379 .port_start = ata_port_start,
380 };
381
382 static const struct ata_port_operations nv_nf2_ops = {
383 .port_disable = ata_port_disable,
384 .tf_load = ata_tf_load,
385 .tf_read = ata_tf_read,
386 .exec_command = ata_exec_command,
387 .check_status = ata_check_status,
388 .dev_select = ata_std_dev_select,
389 .bmdma_setup = ata_bmdma_setup,
390 .bmdma_start = ata_bmdma_start,
391 .bmdma_stop = ata_bmdma_stop,
392 .bmdma_status = ata_bmdma_status,
393 .qc_prep = ata_qc_prep,
394 .qc_issue = ata_qc_issue_prot,
395 .freeze = nv_nf2_freeze,
396 .thaw = nv_nf2_thaw,
397 .error_handler = nv_error_handler,
398 .post_internal_cmd = ata_bmdma_post_internal_cmd,
399 .data_xfer = ata_data_xfer,
400 .irq_clear = ata_bmdma_irq_clear,
401 .irq_on = ata_irq_on,
402 .irq_ack = ata_irq_ack,
403 .scr_read = nv_scr_read,
404 .scr_write = nv_scr_write,
405 .port_start = ata_port_start,
406 };
407
408 static const struct ata_port_operations nv_ck804_ops = {
409 .port_disable = ata_port_disable,
410 .tf_load = ata_tf_load,
411 .tf_read = ata_tf_read,
412 .exec_command = ata_exec_command,
413 .check_status = ata_check_status,
414 .dev_select = ata_std_dev_select,
415 .bmdma_setup = ata_bmdma_setup,
416 .bmdma_start = ata_bmdma_start,
417 .bmdma_stop = ata_bmdma_stop,
418 .bmdma_status = ata_bmdma_status,
419 .qc_prep = ata_qc_prep,
420 .qc_issue = ata_qc_issue_prot,
421 .freeze = nv_ck804_freeze,
422 .thaw = nv_ck804_thaw,
423 .error_handler = nv_error_handler,
424 .post_internal_cmd = ata_bmdma_post_internal_cmd,
425 .data_xfer = ata_data_xfer,
426 .irq_clear = ata_bmdma_irq_clear,
427 .irq_on = ata_irq_on,
428 .irq_ack = ata_irq_ack,
429 .scr_read = nv_scr_read,
430 .scr_write = nv_scr_write,
431 .port_start = ata_port_start,
432 .host_stop = nv_ck804_host_stop,
433 };
434
435 static const struct ata_port_operations nv_adma_ops = {
436 .port_disable = ata_port_disable,
437 .tf_load = ata_tf_load,
438 .tf_read = nv_adma_tf_read,
439 .check_atapi_dma = nv_adma_check_atapi_dma,
440 .exec_command = ata_exec_command,
441 .check_status = ata_check_status,
442 .dev_select = ata_std_dev_select,
443 .bmdma_setup = ata_bmdma_setup,
444 .bmdma_start = ata_bmdma_start,
445 .bmdma_stop = ata_bmdma_stop,
446 .bmdma_status = ata_bmdma_status,
447 .qc_prep = nv_adma_qc_prep,
448 .qc_issue = nv_adma_qc_issue,
449 .freeze = nv_adma_freeze,
450 .thaw = nv_adma_thaw,
451 .error_handler = nv_adma_error_handler,
452 .post_internal_cmd = nv_adma_post_internal_cmd,
453 .data_xfer = ata_data_xfer,
454 .irq_clear = nv_adma_irq_clear,
455 .irq_on = ata_irq_on,
456 .irq_ack = ata_irq_ack,
457 .scr_read = nv_scr_read,
458 .scr_write = nv_scr_write,
459 .port_start = nv_adma_port_start,
460 .port_stop = nv_adma_port_stop,
461 #ifdef CONFIG_PM
462 .port_suspend = nv_adma_port_suspend,
463 .port_resume = nv_adma_port_resume,
464 #endif
465 .host_stop = nv_adma_host_stop,
466 };
467
468 static struct ata_port_info nv_port_info[] = {
469 /* generic */
470 {
471 .sht = &nv_sht,
472 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
473 ATA_FLAG_HRST_TO_RESUME,
474 .pio_mask = NV_PIO_MASK,
475 .mwdma_mask = NV_MWDMA_MASK,
476 .udma_mask = NV_UDMA_MASK,
477 .port_ops = &nv_generic_ops,
478 .irq_handler = nv_generic_interrupt,
479 },
480 /* nforce2/3 */
481 {
482 .sht = &nv_sht,
483 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
484 ATA_FLAG_HRST_TO_RESUME,
485 .pio_mask = NV_PIO_MASK,
486 .mwdma_mask = NV_MWDMA_MASK,
487 .udma_mask = NV_UDMA_MASK,
488 .port_ops = &nv_nf2_ops,
489 .irq_handler = nv_nf2_interrupt,
490 },
491 /* ck804 */
492 {
493 .sht = &nv_sht,
494 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
495 ATA_FLAG_HRST_TO_RESUME,
496 .pio_mask = NV_PIO_MASK,
497 .mwdma_mask = NV_MWDMA_MASK,
498 .udma_mask = NV_UDMA_MASK,
499 .port_ops = &nv_ck804_ops,
500 .irq_handler = nv_ck804_interrupt,
501 },
502 /* ADMA */
503 {
504 .sht = &nv_adma_sht,
505 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
506 ATA_FLAG_HRST_TO_RESUME |
507 ATA_FLAG_MMIO | ATA_FLAG_NCQ,
508 .pio_mask = NV_PIO_MASK,
509 .mwdma_mask = NV_MWDMA_MASK,
510 .udma_mask = NV_UDMA_MASK,
511 .port_ops = &nv_adma_ops,
512 .irq_handler = nv_adma_interrupt,
513 },
514 };
515
516 MODULE_AUTHOR("NVIDIA");
517 MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
518 MODULE_LICENSE("GPL");
519 MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
520 MODULE_VERSION(DRV_VERSION);
521
522 static int adma_enabled = 1;
523
524 static void nv_adma_register_mode(struct ata_port *ap)
525 {
526 struct nv_adma_port_priv *pp = ap->private_data;
527 void __iomem *mmio = pp->ctl_block;
528 u16 tmp, status;
529 int count = 0;
530
531 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
532 return;
533
534 status = readw(mmio + NV_ADMA_STAT);
535 while(!(status & NV_ADMA_STAT_IDLE) && count < 20) {
536 ndelay(50);
537 status = readw(mmio + NV_ADMA_STAT);
538 count++;
539 }
540 if(count == 20)
541 ata_port_printk(ap, KERN_WARNING,
542 "timeout waiting for ADMA IDLE, stat=0x%hx\n",
543 status);
544
545 tmp = readw(mmio + NV_ADMA_CTL);
546 writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
547
548 count = 0;
549 status = readw(mmio + NV_ADMA_STAT);
550 while(!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
551 ndelay(50);
552 status = readw(mmio + NV_ADMA_STAT);
553 count++;
554 }
555 if(count == 20)
556 ata_port_printk(ap, KERN_WARNING,
557 "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
558 status);
559
560 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
561 }
562
563 static void nv_adma_mode(struct ata_port *ap)
564 {
565 struct nv_adma_port_priv *pp = ap->private_data;
566 void __iomem *mmio = pp->ctl_block;
567 u16 tmp, status;
568 int count = 0;
569
570 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
571 return;
572
573 WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
574
575 tmp = readw(mmio + NV_ADMA_CTL);
576 writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
577
578 status = readw(mmio + NV_ADMA_STAT);
579 while(((status & NV_ADMA_STAT_LEGACY) ||
580 !(status & NV_ADMA_STAT_IDLE)) && count < 20) {
581 ndelay(50);
582 status = readw(mmio + NV_ADMA_STAT);
583 count++;
584 }
585 if(count == 20)
586 ata_port_printk(ap, KERN_WARNING,
587 "timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
588 status);
589
590 pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
591 }
592
593 static int nv_adma_slave_config(struct scsi_device *sdev)
594 {
595 struct ata_port *ap = ata_shost_to_port(sdev->host);
596 struct nv_adma_port_priv *pp = ap->private_data;
597 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
598 u64 bounce_limit;
599 unsigned long segment_boundary;
600 unsigned short sg_tablesize;
601 int rc;
602 int adma_enable;
603 u32 current_reg, new_reg, config_mask;
604
605 rc = ata_scsi_slave_config(sdev);
606
607 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
608 /* Not a proper libata device, ignore */
609 return rc;
610
611 if (ap->device[sdev->id].class == ATA_DEV_ATAPI) {
612 /*
613 * NVIDIA reports that ADMA mode does not support ATAPI commands.
614 * Therefore ATAPI commands are sent through the legacy interface.
615 * However, the legacy interface only supports 32-bit DMA.
616 * Restrict DMA parameters as required by the legacy interface
617 * when an ATAPI device is connected.
618 */
619 bounce_limit = ATA_DMA_MASK;
620 segment_boundary = ATA_DMA_BOUNDARY;
621 /* Subtract 1 since an extra entry may be needed for padding, see
622 libata-scsi.c */
623 sg_tablesize = LIBATA_MAX_PRD - 1;
624
625 /* Since the legacy DMA engine is in use, we need to disable ADMA
626 on the port. */
627 adma_enable = 0;
628 nv_adma_register_mode(ap);
629 }
630 else {
631 bounce_limit = *ap->dev->dma_mask;
632 segment_boundary = NV_ADMA_DMA_BOUNDARY;
633 sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
634 adma_enable = 1;
635 }
636
637 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg);
638
639 if(ap->port_no == 1)
640 config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
641 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
642 else
643 config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
644 NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
645
646 if(adma_enable) {
647 new_reg = current_reg | config_mask;
648 pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
649 }
650 else {
651 new_reg = current_reg & ~config_mask;
652 pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
653 }
654
655 if(current_reg != new_reg)
656 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
657
658 blk_queue_bounce_limit(sdev->request_queue, bounce_limit);
659 blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
660 blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize);
661 ata_port_printk(ap, KERN_INFO,
662 "bounce limit 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
663 (unsigned long long)bounce_limit, segment_boundary, sg_tablesize);
664 return rc;
665 }
666
667 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
668 {
669 struct nv_adma_port_priv *pp = qc->ap->private_data;
670 return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
671 }
672
673 static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
674 {
675 /* Since commands where a result TF is requested are not
676 executed in ADMA mode, the only time this function will be called
677 in ADMA mode will be if a command fails. In this case we
678 don't care about going into register mode with ADMA commands
679 pending, as the commands will all shortly be aborted anyway. */
680 nv_adma_register_mode(ap);
681
682 ata_tf_read(ap, tf);
683 }
684
685 static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
686 {
687 unsigned int idx = 0;
688
689 if(tf->flags & ATA_TFLAG_ISADDR) {
690 if (tf->flags & ATA_TFLAG_LBA48) {
691 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->hob_feature | WNB);
692 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
693 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->hob_lbal);
694 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->hob_lbam);
695 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->hob_lbah);
696 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature);
697 } else
698 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature | WNB);
699
700 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->nsect);
701 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->lbal);
702 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->lbam);
703 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->lbah);
704 }
705
706 if(tf->flags & ATA_TFLAG_DEVICE)
707 cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device);
708
709 cpb[idx++] = cpu_to_le16((ATA_REG_CMD << 8) | tf->command | CMDEND);
710
711 while(idx < 12)
712 cpb[idx++] = cpu_to_le16(IGN);
713
714 return idx;
715 }
716
717 static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
718 {
719 struct nv_adma_port_priv *pp = ap->private_data;
720 u8 flags = pp->cpb[cpb_num].resp_flags;
721
722 VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
723
724 if (unlikely((force_err ||
725 flags & (NV_CPB_RESP_ATA_ERR |
726 NV_CPB_RESP_CMD_ERR |
727 NV_CPB_RESP_CPB_ERR)))) {
728 struct ata_eh_info *ehi = &ap->eh_info;
729 int freeze = 0;
730
731 ata_ehi_clear_desc(ehi);
732 ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x", flags );
733 if (flags & NV_CPB_RESP_ATA_ERR) {
734 ata_ehi_push_desc(ehi, ": ATA error");
735 ehi->err_mask |= AC_ERR_DEV;
736 } else if (flags & NV_CPB_RESP_CMD_ERR) {
737 ata_ehi_push_desc(ehi, ": CMD error");
738 ehi->err_mask |= AC_ERR_DEV;
739 } else if (flags & NV_CPB_RESP_CPB_ERR) {
740 ata_ehi_push_desc(ehi, ": CPB error");
741 ehi->err_mask |= AC_ERR_SYSTEM;
742 freeze = 1;
743 } else {
744 /* notifier error, but no error in CPB flags? */
745 ehi->err_mask |= AC_ERR_OTHER;
746 freeze = 1;
747 }
748 /* Kill all commands. EH will determine what actually failed. */
749 if (freeze)
750 ata_port_freeze(ap);
751 else
752 ata_port_abort(ap);
753 return 1;
754 }
755
756 if (likely(flags & NV_CPB_RESP_DONE)) {
757 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num);
758 VPRINTK("CPB flags done, flags=0x%x\n", flags);
759 if (likely(qc)) {
760 DPRINTK("Completing qc from tag %d\n",cpb_num);
761 ata_qc_complete(qc);
762 } else {
763 struct ata_eh_info *ehi = &ap->eh_info;
764 /* Notifier bits set without a command may indicate the drive
765 is misbehaving. Raise host state machine violation on this
766 condition. */
767 ata_port_printk(ap, KERN_ERR, "notifier for tag %d with no command?\n",
768 cpb_num);
769 ehi->err_mask |= AC_ERR_HSM;
770 ehi->action |= ATA_EH_SOFTRESET;
771 ata_port_freeze(ap);
772 return 1;
773 }
774 }
775 return 0;
776 }
777
778 static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
779 {
780 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
781
782 /* freeze if hotplugged */
783 if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
784 ata_port_freeze(ap);
785 return 1;
786 }
787
788 /* bail out if not our interrupt */
789 if (!(irq_stat & NV_INT_DEV))
790 return 0;
791
792 /* DEV interrupt w/ no active qc? */
793 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
794 ata_check_status(ap);
795 return 1;
796 }
797
798 /* handle interrupt */
799 return ata_host_intr(ap, qc);
800 }
801
802 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
803 {
804 struct ata_host *host = dev_instance;
805 int i, handled = 0;
806 u32 notifier_clears[2];
807
808 spin_lock(&host->lock);
809
810 for (i = 0; i < host->n_ports; i++) {
811 struct ata_port *ap = host->ports[i];
812 notifier_clears[i] = 0;
813
814 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
815 struct nv_adma_port_priv *pp = ap->private_data;
816 void __iomem *mmio = pp->ctl_block;
817 u16 status;
818 u32 gen_ctl;
819 u32 notifier, notifier_error;
820
821 /* if ADMA is disabled, use standard ata interrupt handler */
822 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
823 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
824 >> (NV_INT_PORT_SHIFT * i);
825 handled += nv_host_intr(ap, irq_stat);
826 continue;
827 }
828
829 /* if in ATA register mode, check for standard interrupts */
830 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
831 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
832 >> (NV_INT_PORT_SHIFT * i);
833 if(ata_tag_valid(ap->active_tag))
834 /** NV_INT_DEV indication seems unreliable at times
835 at least in ADMA mode. Force it on always when a
836 command is active, to prevent losing interrupts. */
837 irq_stat |= NV_INT_DEV;
838 handled += nv_host_intr(ap, irq_stat);
839 }
840
841 notifier = readl(mmio + NV_ADMA_NOTIFIER);
842 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
843 notifier_clears[i] = notifier | notifier_error;
844
845 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
846
847 if( !NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
848 !notifier_error)
849 /* Nothing to do */
850 continue;
851
852 status = readw(mmio + NV_ADMA_STAT);
853
854 /* Clear status. Ensure the controller sees the clearing before we start
855 looking at any of the CPB statuses, so that any CPB completions after
856 this point in the handler will raise another interrupt. */
857 writew(status, mmio + NV_ADMA_STAT);
858 readw(mmio + NV_ADMA_STAT); /* flush posted write */
859 rmb();
860
861 handled++; /* irq handled if we got here */
862
863 /* freeze if hotplugged or controller error */
864 if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
865 NV_ADMA_STAT_HOTUNPLUG |
866 NV_ADMA_STAT_TIMEOUT |
867 NV_ADMA_STAT_SERROR))) {
868 struct ata_eh_info *ehi = &ap->eh_info;
869
870 ata_ehi_clear_desc(ehi);
871 ata_ehi_push_desc(ehi, "ADMA status 0x%08x", status );
872 if (status & NV_ADMA_STAT_TIMEOUT) {
873 ehi->err_mask |= AC_ERR_SYSTEM;
874 ata_ehi_push_desc(ehi, ": timeout");
875 } else if (status & NV_ADMA_STAT_HOTPLUG) {
876 ata_ehi_hotplugged(ehi);
877 ata_ehi_push_desc(ehi, ": hotplug");
878 } else if (status & NV_ADMA_STAT_HOTUNPLUG) {
879 ata_ehi_hotplugged(ehi);
880 ata_ehi_push_desc(ehi, ": hot unplug");
881 } else if (status & NV_ADMA_STAT_SERROR) {
882 /* let libata analyze SError and figure out the cause */
883 ata_ehi_push_desc(ehi, ": SError");
884 }
885 ata_port_freeze(ap);
886 continue;
887 }
888
889 if (status & (NV_ADMA_STAT_DONE |
890 NV_ADMA_STAT_CPBERR)) {
891 u32 check_commands;
892 int pos, error = 0;
893
894 if(ata_tag_valid(ap->active_tag))
895 check_commands = 1 << ap->active_tag;
896 else
897 check_commands = ap->sactive;
898
899 /** Check CPBs for completed commands */
900 while ((pos = ffs(check_commands)) && !error) {
901 pos--;
902 error = nv_adma_check_cpb(ap, pos,
903 notifier_error & (1 << pos) );
904 check_commands &= ~(1 << pos );
905 }
906 }
907 }
908 }
909
910 if(notifier_clears[0] || notifier_clears[1]) {
911 /* Note: Both notifier clear registers must be written
912 if either is set, even if one is zero, according to NVIDIA. */
913 struct nv_adma_port_priv *pp = host->ports[0]->private_data;
914 writel(notifier_clears[0], pp->notifier_clear_block);
915 pp = host->ports[1]->private_data;
916 writel(notifier_clears[1], pp->notifier_clear_block);
917 }
918
919 spin_unlock(&host->lock);
920
921 return IRQ_RETVAL(handled);
922 }
923
924 static void nv_adma_freeze(struct ata_port *ap)
925 {
926 struct nv_adma_port_priv *pp = ap->private_data;
927 void __iomem *mmio = pp->ctl_block;
928 u16 tmp;
929
930 nv_ck804_freeze(ap);
931
932 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
933 return;
934
935 /* clear any outstanding CK804 notifications */
936 writeb( NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
937 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
938
939 /* Disable interrupt */
940 tmp = readw(mmio + NV_ADMA_CTL);
941 writew( tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
942 mmio + NV_ADMA_CTL);
943 readw( mmio + NV_ADMA_CTL ); /* flush posted write */
944 }
945
946 static void nv_adma_thaw(struct ata_port *ap)
947 {
948 struct nv_adma_port_priv *pp = ap->private_data;
949 void __iomem *mmio = pp->ctl_block;
950 u16 tmp;
951
952 nv_ck804_thaw(ap);
953
954 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
955 return;
956
957 /* Enable interrupt */
958 tmp = readw(mmio + NV_ADMA_CTL);
959 writew( tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
960 mmio + NV_ADMA_CTL);
961 readw( mmio + NV_ADMA_CTL ); /* flush posted write */
962 }
963
964 static void nv_adma_irq_clear(struct ata_port *ap)
965 {
966 struct nv_adma_port_priv *pp = ap->private_data;
967 void __iomem *mmio = pp->ctl_block;
968 u32 notifier_clears[2];
969
970 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
971 ata_bmdma_irq_clear(ap);
972 return;
973 }
974
975 /* clear any outstanding CK804 notifications */
976 writeb( NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
977 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
978
979 /* clear ADMA status */
980 writew(0xffff, mmio + NV_ADMA_STAT);
981
982 /* clear notifiers - note both ports need to be written with
983 something even though we are only clearing on one */
984 if (ap->port_no == 0) {
985 notifier_clears[0] = 0xFFFFFFFF;
986 notifier_clears[1] = 0;
987 } else {
988 notifier_clears[0] = 0;
989 notifier_clears[1] = 0xFFFFFFFF;
990 }
991 pp = ap->host->ports[0]->private_data;
992 writel(notifier_clears[0], pp->notifier_clear_block);
993 pp = ap->host->ports[1]->private_data;
994 writel(notifier_clears[1], pp->notifier_clear_block);
995 }
996
997 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
998 {
999 struct nv_adma_port_priv *pp = qc->ap->private_data;
1000
1001 if(pp->flags & NV_ADMA_PORT_REGISTER_MODE)
1002 ata_bmdma_post_internal_cmd(qc);
1003 }
1004
1005 static int nv_adma_port_start(struct ata_port *ap)
1006 {
1007 struct device *dev = ap->host->dev;
1008 struct nv_adma_port_priv *pp;
1009 int rc;
1010 void *mem;
1011 dma_addr_t mem_dma;
1012 void __iomem *mmio;
1013 u16 tmp;
1014
1015 VPRINTK("ENTER\n");
1016
1017 rc = ata_port_start(ap);
1018 if (rc)
1019 return rc;
1020
1021 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1022 if (!pp)
1023 return -ENOMEM;
1024
1025 mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
1026 ap->port_no * NV_ADMA_PORT_SIZE;
1027 pp->ctl_block = mmio;
1028 pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
1029 pp->notifier_clear_block = pp->gen_block +
1030 NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
1031
1032 mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
1033 &mem_dma, GFP_KERNEL);
1034 if (!mem)
1035 return -ENOMEM;
1036 memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
1037
1038 /*
1039 * First item in chunk of DMA memory:
1040 * 128-byte command parameter block (CPB)
1041 * one for each command tag
1042 */
1043 pp->cpb = mem;
1044 pp->cpb_dma = mem_dma;
1045
1046 writel(mem_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
1047 writel((mem_dma >> 16 ) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
1048
1049 mem += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1050 mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1051
1052 /*
1053 * Second item: block of ADMA_SGTBL_LEN s/g entries
1054 */
1055 pp->aprd = mem;
1056 pp->aprd_dma = mem_dma;
1057
1058 ap->private_data = pp;
1059
1060 /* clear any outstanding interrupt conditions */
1061 writew(0xffff, mmio + NV_ADMA_STAT);
1062
1063 /* initialize port variables */
1064 pp->flags = NV_ADMA_PORT_REGISTER_MODE;
1065
1066 /* clear CPB fetch count */
1067 writew(0, mmio + NV_ADMA_CPB_COUNT);
1068
1069 /* clear GO for register mode, enable interrupt */
1070 tmp = readw(mmio + NV_ADMA_CTL);
1071 writew( (tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1072 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1073
1074 tmp = readw(mmio + NV_ADMA_CTL);
1075 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1076 readw( mmio + NV_ADMA_CTL ); /* flush posted write */
1077 udelay(1);
1078 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1079 readw( mmio + NV_ADMA_CTL ); /* flush posted write */
1080
1081 return 0;
1082 }
1083
1084 static void nv_adma_port_stop(struct ata_port *ap)
1085 {
1086 struct nv_adma_port_priv *pp = ap->private_data;
1087 void __iomem *mmio = pp->ctl_block;
1088
1089 VPRINTK("ENTER\n");
1090 writew(0, mmio + NV_ADMA_CTL);
1091 }
1092
1093 #ifdef CONFIG_PM
1094 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
1095 {
1096 struct nv_adma_port_priv *pp = ap->private_data;
1097 void __iomem *mmio = pp->ctl_block;
1098
1099 /* Go to register mode - clears GO */
1100 nv_adma_register_mode(ap);
1101
1102 /* clear CPB fetch count */
1103 writew(0, mmio + NV_ADMA_CPB_COUNT);
1104
1105 /* disable interrupt, shut down port */
1106 writew(0, mmio + NV_ADMA_CTL);
1107
1108 return 0;
1109 }
1110
1111 static int nv_adma_port_resume(struct ata_port *ap)
1112 {
1113 struct nv_adma_port_priv *pp = ap->private_data;
1114 void __iomem *mmio = pp->ctl_block;
1115 u16 tmp;
1116
1117 /* set CPB block location */
1118 writel(pp->cpb_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
1119 writel((pp->cpb_dma >> 16 ) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
1120
1121 /* clear any outstanding interrupt conditions */
1122 writew(0xffff, mmio + NV_ADMA_STAT);
1123
1124 /* initialize port variables */
1125 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
1126
1127 /* clear CPB fetch count */
1128 writew(0, mmio + NV_ADMA_CPB_COUNT);
1129
1130 /* clear GO for register mode, enable interrupt */
1131 tmp = readw(mmio + NV_ADMA_CTL);
1132 writew( (tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1133 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1134
1135 tmp = readw(mmio + NV_ADMA_CTL);
1136 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1137 readw( mmio + NV_ADMA_CTL ); /* flush posted write */
1138 udelay(1);
1139 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1140 readw( mmio + NV_ADMA_CTL ); /* flush posted write */
1141
1142 return 0;
1143 }
1144 #endif
1145
1146 static void nv_adma_setup_port(struct ata_port *ap)
1147 {
1148 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1149 struct ata_ioports *ioport = &ap->ioaddr;
1150
1151 VPRINTK("ENTER\n");
1152
1153 mmio += NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE;
1154
1155 ioport->cmd_addr = mmio;
1156 ioport->data_addr = mmio + (ATA_REG_DATA * 4);
1157 ioport->error_addr =
1158 ioport->feature_addr = mmio + (ATA_REG_ERR * 4);
1159 ioport->nsect_addr = mmio + (ATA_REG_NSECT * 4);
1160 ioport->lbal_addr = mmio + (ATA_REG_LBAL * 4);
1161 ioport->lbam_addr = mmio + (ATA_REG_LBAM * 4);
1162 ioport->lbah_addr = mmio + (ATA_REG_LBAH * 4);
1163 ioport->device_addr = mmio + (ATA_REG_DEVICE * 4);
1164 ioport->status_addr =
1165 ioport->command_addr = mmio + (ATA_REG_STATUS * 4);
1166 ioport->altstatus_addr =
1167 ioport->ctl_addr = mmio + 0x20;
1168 }
1169
1170 static int nv_adma_host_init(struct ata_host *host)
1171 {
1172 struct pci_dev *pdev = to_pci_dev(host->dev);
1173 unsigned int i;
1174 u32 tmp32;
1175
1176 VPRINTK("ENTER\n");
1177
1178 /* enable ADMA on the ports */
1179 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1180 tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1181 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1182 NV_MCP_SATA_CFG_20_PORT1_EN |
1183 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1184
1185 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1186
1187 for (i = 0; i < host->n_ports; i++)
1188 nv_adma_setup_port(host->ports[i]);
1189
1190 return 0;
1191 }
1192
1193 static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1194 struct scatterlist *sg,
1195 int idx,
1196 struct nv_adma_prd *aprd)
1197 {
1198 u8 flags = 0;
1199 if (qc->tf.flags & ATA_TFLAG_WRITE)
1200 flags |= NV_APRD_WRITE;
1201 if (idx == qc->n_elem - 1)
1202 flags |= NV_APRD_END;
1203 else if (idx != 4)
1204 flags |= NV_APRD_CONT;
1205
1206 aprd->addr = cpu_to_le64(((u64)sg_dma_address(sg)));
1207 aprd->len = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
1208 aprd->flags = flags;
1209 aprd->packet_len = 0;
1210 }
1211
1212 static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1213 {
1214 struct nv_adma_port_priv *pp = qc->ap->private_data;
1215 unsigned int idx;
1216 struct nv_adma_prd *aprd;
1217 struct scatterlist *sg;
1218
1219 VPRINTK("ENTER\n");
1220
1221 idx = 0;
1222
1223 ata_for_each_sg(sg, qc) {
1224 aprd = (idx < 5) ? &cpb->aprd[idx] : &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (idx-5)];
1225 nv_adma_fill_aprd(qc, sg, idx, aprd);
1226 idx++;
1227 }
1228 if (idx > 5)
1229 cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));
1230 else
1231 cpb->next_aprd = cpu_to_le64(0);
1232 }
1233
1234 static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
1235 {
1236 struct nv_adma_port_priv *pp = qc->ap->private_data;
1237
1238 /* ADMA engine can only be used for non-ATAPI DMA commands,
1239 or interrupt-driven no-data commands, where a result taskfile
1240 is not required. */
1241 if((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
1242 (qc->tf.flags & ATA_TFLAG_POLLING) ||
1243 (qc->flags & ATA_QCFLAG_RESULT_TF))
1244 return 1;
1245
1246 if((qc->flags & ATA_QCFLAG_DMAMAP) ||
1247 (qc->tf.protocol == ATA_PROT_NODATA))
1248 return 0;
1249
1250 return 1;
1251 }
1252
1253 static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1254 {
1255 struct nv_adma_port_priv *pp = qc->ap->private_data;
1256 struct nv_adma_cpb *cpb = &pp->cpb[qc->tag];
1257 u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
1258 NV_CPB_CTL_IEN;
1259
1260 if (nv_adma_use_reg_mode(qc)) {
1261 nv_adma_register_mode(qc->ap);
1262 ata_qc_prep(qc);
1263 return;
1264 }
1265
1266 cpb->resp_flags = NV_CPB_RESP_DONE;
1267 wmb();
1268 cpb->ctl_flags = 0;
1269 wmb();
1270
1271 cpb->len = 3;
1272 cpb->tag = qc->tag;
1273 cpb->next_cpb_idx = 0;
1274
1275 /* turn on NCQ flags for NCQ commands */
1276 if (qc->tf.protocol == ATA_PROT_NCQ)
1277 ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1278
1279 VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1280
1281 nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1282
1283 if(qc->flags & ATA_QCFLAG_DMAMAP) {
1284 nv_adma_fill_sg(qc, cpb);
1285 ctl_flags |= NV_CPB_CTL_APRD_VALID;
1286 } else
1287 memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
1288
1289 /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID until we are
1290 finished filling in all of the contents */
1291 wmb();
1292 cpb->ctl_flags = ctl_flags;
1293 wmb();
1294 cpb->resp_flags = 0;
1295 }
1296
1297 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1298 {
1299 struct nv_adma_port_priv *pp = qc->ap->private_data;
1300 void __iomem *mmio = pp->ctl_block;
1301 int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ);
1302
1303 VPRINTK("ENTER\n");
1304
1305 if (nv_adma_use_reg_mode(qc)) {
1306 /* use ATA register mode */
1307 VPRINTK("using ATA register mode: 0x%lx\n", qc->flags);
1308 nv_adma_register_mode(qc->ap);
1309 return ata_qc_issue_prot(qc);
1310 } else
1311 nv_adma_mode(qc->ap);
1312
1313 /* write append register, command tag in lower 8 bits
1314 and (number of cpbs to append -1) in top 8 bits */
1315 wmb();
1316
1317 if(curr_ncq != pp->last_issue_ncq) {
1318 /* Seems to need some delay before switching between NCQ and non-NCQ
1319 commands, else we get command timeouts and such. */
1320 udelay(20);
1321 pp->last_issue_ncq = curr_ncq;
1322 }
1323
1324 writew(qc->tag, mmio + NV_ADMA_APPEND);
1325
1326 DPRINTK("Issued tag %u\n",qc->tag);
1327
1328 return 0;
1329 }
1330
1331 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1332 {
1333 struct ata_host *host = dev_instance;
1334 unsigned int i;
1335 unsigned int handled = 0;
1336 unsigned long flags;
1337
1338 spin_lock_irqsave(&host->lock, flags);
1339
1340 for (i = 0; i < host->n_ports; i++) {
1341 struct ata_port *ap;
1342
1343 ap = host->ports[i];
1344 if (ap &&
1345 !(ap->flags & ATA_FLAG_DISABLED)) {
1346 struct ata_queued_cmd *qc;
1347
1348 qc = ata_qc_from_tag(ap, ap->active_tag);
1349 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
1350 handled += ata_host_intr(ap, qc);
1351 else
1352 // No request pending? Clear interrupt status
1353 // anyway, in case there's one pending.
1354 ap->ops->check_status(ap);
1355 }
1356
1357 }
1358
1359 spin_unlock_irqrestore(&host->lock, flags);
1360
1361 return IRQ_RETVAL(handled);
1362 }
1363
1364 static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
1365 {
1366 int i, handled = 0;
1367
1368 for (i = 0; i < host->n_ports; i++) {
1369 struct ata_port *ap = host->ports[i];
1370
1371 if (ap && !(ap->flags & ATA_FLAG_DISABLED))
1372 handled += nv_host_intr(ap, irq_stat);
1373
1374 irq_stat >>= NV_INT_PORT_SHIFT;
1375 }
1376
1377 return IRQ_RETVAL(handled);
1378 }
1379
1380 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
1381 {
1382 struct ata_host *host = dev_instance;
1383 u8 irq_stat;
1384 irqreturn_t ret;
1385
1386 spin_lock(&host->lock);
1387 irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
1388 ret = nv_do_interrupt(host, irq_stat);
1389 spin_unlock(&host->lock);
1390
1391 return ret;
1392 }
1393
1394 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
1395 {
1396 struct ata_host *host = dev_instance;
1397 u8 irq_stat;
1398 irqreturn_t ret;
1399
1400 spin_lock(&host->lock);
1401 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1402 ret = nv_do_interrupt(host, irq_stat);
1403 spin_unlock(&host->lock);
1404
1405 return ret;
1406 }
1407
1408 static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg)
1409 {
1410 if (sc_reg > SCR_CONTROL)
1411 return 0xffffffffU;
1412
1413 return ioread32(ap->ioaddr.scr_addr + (sc_reg * 4));
1414 }
1415
1416 static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
1417 {
1418 if (sc_reg > SCR_CONTROL)
1419 return;
1420
1421 iowrite32(val, ap->ioaddr.scr_addr + (sc_reg * 4));
1422 }
1423
1424 static void nv_nf2_freeze(struct ata_port *ap)
1425 {
1426 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1427 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1428 u8 mask;
1429
1430 mask = ioread8(scr_addr + NV_INT_ENABLE);
1431 mask &= ~(NV_INT_ALL << shift);
1432 iowrite8(mask, scr_addr + NV_INT_ENABLE);
1433 }
1434
1435 static void nv_nf2_thaw(struct ata_port *ap)
1436 {
1437 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1438 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1439 u8 mask;
1440
1441 iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
1442
1443 mask = ioread8(scr_addr + NV_INT_ENABLE);
1444 mask |= (NV_INT_MASK << shift);
1445 iowrite8(mask, scr_addr + NV_INT_ENABLE);
1446 }
1447
1448 static void nv_ck804_freeze(struct ata_port *ap)
1449 {
1450 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1451 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1452 u8 mask;
1453
1454 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1455 mask &= ~(NV_INT_ALL << shift);
1456 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1457 }
1458
1459 static void nv_ck804_thaw(struct ata_port *ap)
1460 {
1461 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1462 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1463 u8 mask;
1464
1465 writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1466
1467 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1468 mask |= (NV_INT_MASK << shift);
1469 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1470 }
1471
1472 static int nv_hardreset(struct ata_port *ap, unsigned int *class,
1473 unsigned long deadline)
1474 {
1475 unsigned int dummy;
1476
1477 /* SATA hardreset fails to retrieve proper device signature on
1478 * some controllers. Don't classify on hardreset. For more
1479 * info, see http://bugme.osdl.org/show_bug.cgi?id=3352
1480 */
1481 return sata_std_hardreset(ap, &dummy, deadline);
1482 }
1483
1484 static void nv_error_handler(struct ata_port *ap)
1485 {
1486 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1487 nv_hardreset, ata_std_postreset);
1488 }
1489
1490 static void nv_adma_error_handler(struct ata_port *ap)
1491 {
1492 struct nv_adma_port_priv *pp = ap->private_data;
1493 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
1494 void __iomem *mmio = pp->ctl_block;
1495 int i;
1496 u16 tmp;
1497
1498 if(ata_tag_valid(ap->active_tag) || ap->sactive) {
1499 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1500 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1501 u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
1502 u32 status = readw(mmio + NV_ADMA_STAT);
1503 u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
1504 u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
1505
1506 ata_port_printk(ap, KERN_ERR, "EH in ADMA mode, notifier 0x%X "
1507 "notifier_error 0x%X gen_ctl 0x%X status 0x%X "
1508 "next cpb count 0x%X next cpb idx 0x%x\n",
1509 notifier, notifier_error, gen_ctl, status,
1510 cpb_count, next_cpb_idx);
1511
1512 for( i=0;i<NV_ADMA_MAX_CPBS;i++) {
1513 struct nv_adma_cpb *cpb = &pp->cpb[i];
1514 if( (ata_tag_valid(ap->active_tag) && i == ap->active_tag) ||
1515 ap->sactive & (1 << i) )
1516 ata_port_printk(ap, KERN_ERR,
1517 "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1518 i, cpb->ctl_flags, cpb->resp_flags);
1519 }
1520 }
1521
1522 /* Push us back into port register mode for error handling. */
1523 nv_adma_register_mode(ap);
1524
1525 /* Mark all of the CPBs as invalid to prevent them from being executed */
1526 for( i=0;i<NV_ADMA_MAX_CPBS;i++)
1527 pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1528
1529 /* clear CPB fetch count */
1530 writew(0, mmio + NV_ADMA_CPB_COUNT);
1531
1532 /* Reset channel */
1533 tmp = readw(mmio + NV_ADMA_CTL);
1534 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1535 readw( mmio + NV_ADMA_CTL ); /* flush posted write */
1536 udelay(1);
1537 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1538 readw( mmio + NV_ADMA_CTL ); /* flush posted write */
1539 }
1540
1541 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1542 nv_hardreset, ata_std_postreset);
1543 }
1544
1545 static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1546 {
1547 static int printed_version = 0;
1548 const struct ata_port_info *ppi[2];
1549 struct ata_host *host;
1550 struct nv_host_priv *hpriv;
1551 int rc;
1552 u32 bar;
1553 void __iomem *base;
1554 unsigned long type = ent->driver_data;
1555
1556 // Make sure this is a SATA controller by counting the number of bars
1557 // (NVIDIA SATA controllers will always have six bars). Otherwise,
1558 // it's an IDE controller and we ignore it.
1559 for (bar=0; bar<6; bar++)
1560 if (pci_resource_start(pdev, bar) == 0)
1561 return -ENODEV;
1562
1563 if (!printed_version++)
1564 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
1565
1566 rc = pcim_enable_device(pdev);
1567 if (rc)
1568 return rc;
1569
1570 /* determine type and allocate host */
1571 if (type >= CK804 && adma_enabled) {
1572 dev_printk(KERN_NOTICE, &pdev->dev, "Using ADMA mode\n");
1573 type = ADMA;
1574 }
1575
1576 ppi[0] = ppi[1] = &nv_port_info[type];
1577 rc = ata_pci_prepare_native_host(pdev, ppi, 2, &host);
1578 if (rc)
1579 return rc;
1580
1581 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
1582 if (!hpriv)
1583 return -ENOMEM;
1584 hpriv->type = type;
1585 host->private_data = hpriv;
1586
1587 /* set 64bit dma masks, may fail */
1588 if (type == ADMA) {
1589 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0)
1590 pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
1591 }
1592
1593 /* request and iomap NV_MMIO_BAR */
1594 rc = pcim_iomap_regions(pdev, 1 << NV_MMIO_BAR, DRV_NAME);
1595 if (rc)
1596 return rc;
1597
1598 /* configure SCR access */
1599 base = host->iomap[NV_MMIO_BAR];
1600 host->ports[0]->ioaddr.scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
1601 host->ports[1]->ioaddr.scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
1602
1603 /* enable SATA space for CK804 */
1604 if (type >= CK804) {
1605 u8 regval;
1606
1607 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
1608 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1609 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1610 }
1611
1612 /* init ADMA */
1613 if (type == ADMA) {
1614 rc = nv_adma_host_init(host);
1615 if (rc)
1616 return rc;
1617 }
1618
1619 pci_set_master(pdev);
1620 return ata_host_activate(host, pdev->irq, ppi[0]->irq_handler,
1621 IRQF_SHARED, ppi[0]->sht);
1622 }
1623
1624 static void nv_remove_one (struct pci_dev *pdev)
1625 {
1626 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1627 struct nv_host_priv *hpriv = host->private_data;
1628
1629 ata_pci_remove_one(pdev);
1630 kfree(hpriv);
1631 }
1632
1633 #ifdef CONFIG_PM
1634 static int nv_pci_device_resume(struct pci_dev *pdev)
1635 {
1636 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1637 struct nv_host_priv *hpriv = host->private_data;
1638 int rc;
1639
1640 rc = ata_pci_device_do_resume(pdev);
1641 if(rc)
1642 return rc;
1643
1644 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
1645 if(hpriv->type >= CK804) {
1646 u8 regval;
1647
1648 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
1649 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1650 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1651 }
1652 if(hpriv->type == ADMA) {
1653 u32 tmp32;
1654 struct nv_adma_port_priv *pp;
1655 /* enable/disable ADMA on the ports appropriately */
1656 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1657
1658 pp = host->ports[0]->private_data;
1659 if(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1660 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
1661 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
1662 else
1663 tmp32 |= (NV_MCP_SATA_CFG_20_PORT0_EN |
1664 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
1665 pp = host->ports[1]->private_data;
1666 if(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1667 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
1668 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
1669 else
1670 tmp32 |= (NV_MCP_SATA_CFG_20_PORT1_EN |
1671 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
1672
1673 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1674 }
1675 }
1676
1677 ata_host_resume(host);
1678
1679 return 0;
1680 }
1681 #endif
1682
1683 static void nv_ck804_host_stop(struct ata_host *host)
1684 {
1685 struct pci_dev *pdev = to_pci_dev(host->dev);
1686 u8 regval;
1687
1688 /* disable SATA space for CK804 */
1689 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
1690 regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1691 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1692 }
1693
1694 static void nv_adma_host_stop(struct ata_host *host)
1695 {
1696 struct pci_dev *pdev = to_pci_dev(host->dev);
1697 u32 tmp32;
1698
1699 /* disable ADMA on the ports */
1700 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1701 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
1702 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1703 NV_MCP_SATA_CFG_20_PORT1_EN |
1704 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
1705
1706 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1707
1708 nv_ck804_host_stop(host);
1709 }
1710
1711 static int __init nv_init(void)
1712 {
1713 return pci_register_driver(&nv_pci_driver);
1714 }
1715
1716 static void __exit nv_exit(void)
1717 {
1718 pci_unregister_driver(&nv_pci_driver);
1719 }
1720
1721 module_init(nv_init);
1722 module_exit(nv_exit);
1723 module_param_named(adma, adma_enabled, bool, 0444);
1724 MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: true)");
This page took 0.082982 seconds and 6 git commands to generate.