88ecca6343eaac2eb5ea1f8ff2f484d16297f0bb
[deliverable/linux.git] / drivers / ata / sata_nv.c
1 /*
2 * sata_nv.c - NVIDIA nForce SATA
3 *
4 * Copyright 2004 NVIDIA Corp. All rights reserved.
5 * Copyright 2004 Andrew Chew
6 *
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
21 *
22 *
23 * libata documentation is available via 'make {ps|pdf}docs',
24 * as Documentation/DocBook/libata.*
25 *
26 * No hardware documentation available outside of NVIDIA.
27 * This driver programs the NVIDIA SATA controller in a similar
28 * fashion as with other PCI IDE BMDMA controllers, with a few
29 * NV-specific details such as register offsets, SATA phy location,
30 * hotplug info, etc.
31 *
32 * CK804/MCP04 controllers support an alternate programming interface
33 * similar to the ADMA specification (with some modifications).
34 * This allows the use of NCQ. Non-DMA-mapped ATA commands are still
35 * sent through the legacy interface.
36 *
37 */
38
39 #include <linux/kernel.h>
40 #include <linux/module.h>
41 #include <linux/pci.h>
42 #include <linux/init.h>
43 #include <linux/blkdev.h>
44 #include <linux/delay.h>
45 #include <linux/interrupt.h>
46 #include <linux/device.h>
47 #include <scsi/scsi_host.h>
48 #include <scsi/scsi_device.h>
49 #include <linux/libata.h>
50
51 #define DRV_NAME "sata_nv"
52 #define DRV_VERSION "3.5"
53
54 #define NV_ADMA_DMA_BOUNDARY 0xffffffffUL
55
56 enum {
57 NV_MMIO_BAR = 5,
58
59 NV_PORTS = 2,
60 NV_PIO_MASK = 0x1f,
61 NV_MWDMA_MASK = 0x07,
62 NV_UDMA_MASK = 0x7f,
63 NV_PORT0_SCR_REG_OFFSET = 0x00,
64 NV_PORT1_SCR_REG_OFFSET = 0x40,
65
66 /* INT_STATUS/ENABLE */
67 NV_INT_STATUS = 0x10,
68 NV_INT_ENABLE = 0x11,
69 NV_INT_STATUS_CK804 = 0x440,
70 NV_INT_ENABLE_CK804 = 0x441,
71
72 /* INT_STATUS/ENABLE bits */
73 NV_INT_DEV = 0x01,
74 NV_INT_PM = 0x02,
75 NV_INT_ADDED = 0x04,
76 NV_INT_REMOVED = 0x08,
77
78 NV_INT_PORT_SHIFT = 4, /* each port occupies 4 bits */
79
80 NV_INT_ALL = 0x0f,
81 NV_INT_MASK = NV_INT_DEV |
82 NV_INT_ADDED | NV_INT_REMOVED,
83
84 /* INT_CONFIG */
85 NV_INT_CONFIG = 0x12,
86 NV_INT_CONFIG_METHD = 0x01, // 0 = INT, 1 = SMI
87
88 // For PCI config register 20
89 NV_MCP_SATA_CFG_20 = 0x50,
90 NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
91 NV_MCP_SATA_CFG_20_PORT0_EN = (1 << 17),
92 NV_MCP_SATA_CFG_20_PORT1_EN = (1 << 16),
93 NV_MCP_SATA_CFG_20_PORT0_PWB_EN = (1 << 14),
94 NV_MCP_SATA_CFG_20_PORT1_PWB_EN = (1 << 12),
95
96 NV_ADMA_MAX_CPBS = 32,
97 NV_ADMA_CPB_SZ = 128,
98 NV_ADMA_APRD_SZ = 16,
99 NV_ADMA_SGTBL_LEN = (1024 - NV_ADMA_CPB_SZ) /
100 NV_ADMA_APRD_SZ,
101 NV_ADMA_SGTBL_TOTAL_LEN = NV_ADMA_SGTBL_LEN + 5,
102 NV_ADMA_SGTBL_SZ = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
103 NV_ADMA_PORT_PRIV_DMA_SZ = NV_ADMA_MAX_CPBS *
104 (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
105
106 /* BAR5 offset to ADMA general registers */
107 NV_ADMA_GEN = 0x400,
108 NV_ADMA_GEN_CTL = 0x00,
109 NV_ADMA_NOTIFIER_CLEAR = 0x30,
110
111 /* BAR5 offset to ADMA ports */
112 NV_ADMA_PORT = 0x480,
113
114 /* size of ADMA port register space */
115 NV_ADMA_PORT_SIZE = 0x100,
116
117 /* ADMA port registers */
118 NV_ADMA_CTL = 0x40,
119 NV_ADMA_CPB_COUNT = 0x42,
120 NV_ADMA_NEXT_CPB_IDX = 0x43,
121 NV_ADMA_STAT = 0x44,
122 NV_ADMA_CPB_BASE_LOW = 0x48,
123 NV_ADMA_CPB_BASE_HIGH = 0x4C,
124 NV_ADMA_APPEND = 0x50,
125 NV_ADMA_NOTIFIER = 0x68,
126 NV_ADMA_NOTIFIER_ERROR = 0x6C,
127
128 /* NV_ADMA_CTL register bits */
129 NV_ADMA_CTL_HOTPLUG_IEN = (1 << 0),
130 NV_ADMA_CTL_CHANNEL_RESET = (1 << 5),
131 NV_ADMA_CTL_GO = (1 << 7),
132 NV_ADMA_CTL_AIEN = (1 << 8),
133 NV_ADMA_CTL_READ_NON_COHERENT = (1 << 11),
134 NV_ADMA_CTL_WRITE_NON_COHERENT = (1 << 12),
135
136 /* CPB response flag bits */
137 NV_CPB_RESP_DONE = (1 << 0),
138 NV_CPB_RESP_ATA_ERR = (1 << 3),
139 NV_CPB_RESP_CMD_ERR = (1 << 4),
140 NV_CPB_RESP_CPB_ERR = (1 << 7),
141
142 /* CPB control flag bits */
143 NV_CPB_CTL_CPB_VALID = (1 << 0),
144 NV_CPB_CTL_QUEUE = (1 << 1),
145 NV_CPB_CTL_APRD_VALID = (1 << 2),
146 NV_CPB_CTL_IEN = (1 << 3),
147 NV_CPB_CTL_FPDMA = (1 << 4),
148
149 /* APRD flags */
150 NV_APRD_WRITE = (1 << 1),
151 NV_APRD_END = (1 << 2),
152 NV_APRD_CONT = (1 << 3),
153
154 /* NV_ADMA_STAT flags */
155 NV_ADMA_STAT_TIMEOUT = (1 << 0),
156 NV_ADMA_STAT_HOTUNPLUG = (1 << 1),
157 NV_ADMA_STAT_HOTPLUG = (1 << 2),
158 NV_ADMA_STAT_CPBERR = (1 << 4),
159 NV_ADMA_STAT_SERROR = (1 << 5),
160 NV_ADMA_STAT_CMD_COMPLETE = (1 << 6),
161 NV_ADMA_STAT_IDLE = (1 << 8),
162 NV_ADMA_STAT_LEGACY = (1 << 9),
163 NV_ADMA_STAT_STOPPED = (1 << 10),
164 NV_ADMA_STAT_DONE = (1 << 12),
165 NV_ADMA_STAT_ERR = NV_ADMA_STAT_CPBERR |
166 NV_ADMA_STAT_TIMEOUT,
167
168 /* port flags */
169 NV_ADMA_PORT_REGISTER_MODE = (1 << 0),
170 NV_ADMA_ATAPI_SETUP_COMPLETE = (1 << 1),
171
172 };
173
174 /* ADMA Physical Region Descriptor - one SG segment */
175 struct nv_adma_prd {
176 __le64 addr;
177 __le32 len;
178 u8 flags;
179 u8 packet_len;
180 __le16 reserved;
181 };
182
183 enum nv_adma_regbits {
184 CMDEND = (1 << 15), /* end of command list */
185 WNB = (1 << 14), /* wait-not-BSY */
186 IGN = (1 << 13), /* ignore this entry */
187 CS1n = (1 << (4 + 8)), /* std. PATA signals follow... */
188 DA2 = (1 << (2 + 8)),
189 DA1 = (1 << (1 + 8)),
190 DA0 = (1 << (0 + 8)),
191 };
192
193 /* ADMA Command Parameter Block
194 The first 5 SG segments are stored inside the Command Parameter Block itself.
195 If there are more than 5 segments the remainder are stored in a separate
196 memory area indicated by next_aprd. */
197 struct nv_adma_cpb {
198 u8 resp_flags; /* 0 */
199 u8 reserved1; /* 1 */
200 u8 ctl_flags; /* 2 */
201 /* len is length of taskfile in 64 bit words */
202 u8 len; /* 3 */
203 u8 tag; /* 4 */
204 u8 next_cpb_idx; /* 5 */
205 __le16 reserved2; /* 6-7 */
206 __le16 tf[12]; /* 8-31 */
207 struct nv_adma_prd aprd[5]; /* 32-111 */
208 __le64 next_aprd; /* 112-119 */
209 __le64 reserved3; /* 120-127 */
210 };
211
212
213 struct nv_adma_port_priv {
214 struct nv_adma_cpb *cpb;
215 dma_addr_t cpb_dma;
216 struct nv_adma_prd *aprd;
217 dma_addr_t aprd_dma;
218 void __iomem * ctl_block;
219 void __iomem * gen_block;
220 void __iomem * notifier_clear_block;
221 u8 flags;
222 int last_issue_ncq;
223 };
224
225 struct nv_host_priv {
226 unsigned long type;
227 };
228
229 #define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & ( 1 << (19 + (12 * (PORT)))))
230
231 static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
232 #ifdef CONFIG_PM
233 static int nv_pci_device_resume(struct pci_dev *pdev);
234 #endif
235 static void nv_ck804_host_stop(struct ata_host *host);
236 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
237 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
238 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
239 static int nv_scr_read (struct ata_port *ap, unsigned int sc_reg, u32 *val);
240 static int nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
241
242 static void nv_nf2_freeze(struct ata_port *ap);
243 static void nv_nf2_thaw(struct ata_port *ap);
244 static void nv_ck804_freeze(struct ata_port *ap);
245 static void nv_ck804_thaw(struct ata_port *ap);
246 static void nv_error_handler(struct ata_port *ap);
247 static int nv_adma_slave_config(struct scsi_device *sdev);
248 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
249 static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
250 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
251 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
252 static void nv_adma_irq_clear(struct ata_port *ap);
253 static int nv_adma_port_start(struct ata_port *ap);
254 static void nv_adma_port_stop(struct ata_port *ap);
255 #ifdef CONFIG_PM
256 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
257 static int nv_adma_port_resume(struct ata_port *ap);
258 #endif
259 static void nv_adma_freeze(struct ata_port *ap);
260 static void nv_adma_thaw(struct ata_port *ap);
261 static void nv_adma_error_handler(struct ata_port *ap);
262 static void nv_adma_host_stop(struct ata_host *host);
263 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
264 static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
265
266 enum nv_host_type
267 {
268 GENERIC,
269 NFORCE2,
270 NFORCE3 = NFORCE2, /* NF2 == NF3 as far as sata_nv is concerned */
271 CK804,
272 ADMA
273 };
274
275 static const struct pci_device_id nv_pci_tbl[] = {
276 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
277 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
278 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
279 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
280 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
281 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
282 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
283 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), GENERIC },
284 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), GENERIC },
285 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), GENERIC },
286 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), GENERIC },
287 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
288 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
289 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
290
291 { } /* terminate list */
292 };
293
294 static struct pci_driver nv_pci_driver = {
295 .name = DRV_NAME,
296 .id_table = nv_pci_tbl,
297 .probe = nv_init_one,
298 #ifdef CONFIG_PM
299 .suspend = ata_pci_device_suspend,
300 .resume = nv_pci_device_resume,
301 #endif
302 .remove = ata_pci_remove_one,
303 };
304
305 static struct scsi_host_template nv_sht = {
306 .module = THIS_MODULE,
307 .name = DRV_NAME,
308 .ioctl = ata_scsi_ioctl,
309 .queuecommand = ata_scsi_queuecmd,
310 .can_queue = ATA_DEF_QUEUE,
311 .this_id = ATA_SHT_THIS_ID,
312 .sg_tablesize = LIBATA_MAX_PRD,
313 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
314 .emulated = ATA_SHT_EMULATED,
315 .use_clustering = ATA_SHT_USE_CLUSTERING,
316 .proc_name = DRV_NAME,
317 .dma_boundary = ATA_DMA_BOUNDARY,
318 .slave_configure = ata_scsi_slave_config,
319 .slave_destroy = ata_scsi_slave_destroy,
320 .bios_param = ata_std_bios_param,
321 };
322
323 static struct scsi_host_template nv_adma_sht = {
324 .module = THIS_MODULE,
325 .name = DRV_NAME,
326 .ioctl = ata_scsi_ioctl,
327 .queuecommand = ata_scsi_queuecmd,
328 .change_queue_depth = ata_scsi_change_queue_depth,
329 .can_queue = NV_ADMA_MAX_CPBS,
330 .this_id = ATA_SHT_THIS_ID,
331 .sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN,
332 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
333 .emulated = ATA_SHT_EMULATED,
334 .use_clustering = ATA_SHT_USE_CLUSTERING,
335 .proc_name = DRV_NAME,
336 .dma_boundary = NV_ADMA_DMA_BOUNDARY,
337 .slave_configure = nv_adma_slave_config,
338 .slave_destroy = ata_scsi_slave_destroy,
339 .bios_param = ata_std_bios_param,
340 };
341
342 static const struct ata_port_operations nv_generic_ops = {
343 .port_disable = ata_port_disable,
344 .tf_load = ata_tf_load,
345 .tf_read = ata_tf_read,
346 .exec_command = ata_exec_command,
347 .check_status = ata_check_status,
348 .dev_select = ata_std_dev_select,
349 .bmdma_setup = ata_bmdma_setup,
350 .bmdma_start = ata_bmdma_start,
351 .bmdma_stop = ata_bmdma_stop,
352 .bmdma_status = ata_bmdma_status,
353 .qc_prep = ata_qc_prep,
354 .qc_issue = ata_qc_issue_prot,
355 .freeze = ata_bmdma_freeze,
356 .thaw = ata_bmdma_thaw,
357 .error_handler = nv_error_handler,
358 .post_internal_cmd = ata_bmdma_post_internal_cmd,
359 .data_xfer = ata_data_xfer,
360 .irq_clear = ata_bmdma_irq_clear,
361 .irq_on = ata_irq_on,
362 .scr_read = nv_scr_read,
363 .scr_write = nv_scr_write,
364 .port_start = ata_port_start,
365 };
366
367 static const struct ata_port_operations nv_nf2_ops = {
368 .port_disable = ata_port_disable,
369 .tf_load = ata_tf_load,
370 .tf_read = ata_tf_read,
371 .exec_command = ata_exec_command,
372 .check_status = ata_check_status,
373 .dev_select = ata_std_dev_select,
374 .bmdma_setup = ata_bmdma_setup,
375 .bmdma_start = ata_bmdma_start,
376 .bmdma_stop = ata_bmdma_stop,
377 .bmdma_status = ata_bmdma_status,
378 .qc_prep = ata_qc_prep,
379 .qc_issue = ata_qc_issue_prot,
380 .freeze = nv_nf2_freeze,
381 .thaw = nv_nf2_thaw,
382 .error_handler = nv_error_handler,
383 .post_internal_cmd = ata_bmdma_post_internal_cmd,
384 .data_xfer = ata_data_xfer,
385 .irq_clear = ata_bmdma_irq_clear,
386 .irq_on = ata_irq_on,
387 .scr_read = nv_scr_read,
388 .scr_write = nv_scr_write,
389 .port_start = ata_port_start,
390 };
391
392 static const struct ata_port_operations nv_ck804_ops = {
393 .port_disable = ata_port_disable,
394 .tf_load = ata_tf_load,
395 .tf_read = ata_tf_read,
396 .exec_command = ata_exec_command,
397 .check_status = ata_check_status,
398 .dev_select = ata_std_dev_select,
399 .bmdma_setup = ata_bmdma_setup,
400 .bmdma_start = ata_bmdma_start,
401 .bmdma_stop = ata_bmdma_stop,
402 .bmdma_status = ata_bmdma_status,
403 .qc_prep = ata_qc_prep,
404 .qc_issue = ata_qc_issue_prot,
405 .freeze = nv_ck804_freeze,
406 .thaw = nv_ck804_thaw,
407 .error_handler = nv_error_handler,
408 .post_internal_cmd = ata_bmdma_post_internal_cmd,
409 .data_xfer = ata_data_xfer,
410 .irq_clear = ata_bmdma_irq_clear,
411 .irq_on = ata_irq_on,
412 .scr_read = nv_scr_read,
413 .scr_write = nv_scr_write,
414 .port_start = ata_port_start,
415 .host_stop = nv_ck804_host_stop,
416 };
417
418 static const struct ata_port_operations nv_adma_ops = {
419 .port_disable = ata_port_disable,
420 .tf_load = ata_tf_load,
421 .tf_read = nv_adma_tf_read,
422 .check_atapi_dma = nv_adma_check_atapi_dma,
423 .exec_command = ata_exec_command,
424 .check_status = ata_check_status,
425 .dev_select = ata_std_dev_select,
426 .bmdma_setup = ata_bmdma_setup,
427 .bmdma_start = ata_bmdma_start,
428 .bmdma_stop = ata_bmdma_stop,
429 .bmdma_status = ata_bmdma_status,
430 .qc_prep = nv_adma_qc_prep,
431 .qc_issue = nv_adma_qc_issue,
432 .freeze = nv_adma_freeze,
433 .thaw = nv_adma_thaw,
434 .error_handler = nv_adma_error_handler,
435 .post_internal_cmd = nv_adma_post_internal_cmd,
436 .data_xfer = ata_data_xfer,
437 .irq_clear = nv_adma_irq_clear,
438 .irq_on = ata_irq_on,
439 .scr_read = nv_scr_read,
440 .scr_write = nv_scr_write,
441 .port_start = nv_adma_port_start,
442 .port_stop = nv_adma_port_stop,
443 #ifdef CONFIG_PM
444 .port_suspend = nv_adma_port_suspend,
445 .port_resume = nv_adma_port_resume,
446 #endif
447 .host_stop = nv_adma_host_stop,
448 };
449
450 static const struct ata_port_info nv_port_info[] = {
451 /* generic */
452 {
453 .sht = &nv_sht,
454 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
455 .link_flags = ATA_LFLAG_HRST_TO_RESUME,
456 .pio_mask = NV_PIO_MASK,
457 .mwdma_mask = NV_MWDMA_MASK,
458 .udma_mask = NV_UDMA_MASK,
459 .port_ops = &nv_generic_ops,
460 .irq_handler = nv_generic_interrupt,
461 },
462 /* nforce2/3 */
463 {
464 .sht = &nv_sht,
465 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
466 .link_flags = ATA_LFLAG_HRST_TO_RESUME,
467 .pio_mask = NV_PIO_MASK,
468 .mwdma_mask = NV_MWDMA_MASK,
469 .udma_mask = NV_UDMA_MASK,
470 .port_ops = &nv_nf2_ops,
471 .irq_handler = nv_nf2_interrupt,
472 },
473 /* ck804 */
474 {
475 .sht = &nv_sht,
476 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
477 .link_flags = ATA_LFLAG_HRST_TO_RESUME,
478 .pio_mask = NV_PIO_MASK,
479 .mwdma_mask = NV_MWDMA_MASK,
480 .udma_mask = NV_UDMA_MASK,
481 .port_ops = &nv_ck804_ops,
482 .irq_handler = nv_ck804_interrupt,
483 },
484 /* ADMA */
485 {
486 .sht = &nv_adma_sht,
487 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
488 ATA_FLAG_MMIO | ATA_FLAG_NCQ,
489 .link_flags = ATA_LFLAG_HRST_TO_RESUME,
490 .pio_mask = NV_PIO_MASK,
491 .mwdma_mask = NV_MWDMA_MASK,
492 .udma_mask = NV_UDMA_MASK,
493 .port_ops = &nv_adma_ops,
494 .irq_handler = nv_adma_interrupt,
495 },
496 };
497
498 MODULE_AUTHOR("NVIDIA");
499 MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
500 MODULE_LICENSE("GPL");
501 MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
502 MODULE_VERSION(DRV_VERSION);
503
504 static int adma_enabled = 1;
505
506 static void nv_adma_register_mode(struct ata_port *ap)
507 {
508 struct nv_adma_port_priv *pp = ap->private_data;
509 void __iomem *mmio = pp->ctl_block;
510 u16 tmp, status;
511 int count = 0;
512
513 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
514 return;
515
516 status = readw(mmio + NV_ADMA_STAT);
517 while(!(status & NV_ADMA_STAT_IDLE) && count < 20) {
518 ndelay(50);
519 status = readw(mmio + NV_ADMA_STAT);
520 count++;
521 }
522 if(count == 20)
523 ata_port_printk(ap, KERN_WARNING,
524 "timeout waiting for ADMA IDLE, stat=0x%hx\n",
525 status);
526
527 tmp = readw(mmio + NV_ADMA_CTL);
528 writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
529
530 count = 0;
531 status = readw(mmio + NV_ADMA_STAT);
532 while(!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
533 ndelay(50);
534 status = readw(mmio + NV_ADMA_STAT);
535 count++;
536 }
537 if(count == 20)
538 ata_port_printk(ap, KERN_WARNING,
539 "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
540 status);
541
542 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
543 }
544
545 static void nv_adma_mode(struct ata_port *ap)
546 {
547 struct nv_adma_port_priv *pp = ap->private_data;
548 void __iomem *mmio = pp->ctl_block;
549 u16 tmp, status;
550 int count = 0;
551
552 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
553 return;
554
555 WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
556
557 tmp = readw(mmio + NV_ADMA_CTL);
558 writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
559
560 status = readw(mmio + NV_ADMA_STAT);
561 while(((status & NV_ADMA_STAT_LEGACY) ||
562 !(status & NV_ADMA_STAT_IDLE)) && count < 20) {
563 ndelay(50);
564 status = readw(mmio + NV_ADMA_STAT);
565 count++;
566 }
567 if(count == 20)
568 ata_port_printk(ap, KERN_WARNING,
569 "timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
570 status);
571
572 pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
573 }
574
575 static int nv_adma_slave_config(struct scsi_device *sdev)
576 {
577 struct ata_port *ap = ata_shost_to_port(sdev->host);
578 struct nv_adma_port_priv *pp = ap->private_data;
579 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
580 u64 bounce_limit;
581 unsigned long segment_boundary;
582 unsigned short sg_tablesize;
583 int rc;
584 int adma_enable;
585 u32 current_reg, new_reg, config_mask;
586
587 rc = ata_scsi_slave_config(sdev);
588
589 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
590 /* Not a proper libata device, ignore */
591 return rc;
592
593 if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) {
594 /*
595 * NVIDIA reports that ADMA mode does not support ATAPI commands.
596 * Therefore ATAPI commands are sent through the legacy interface.
597 * However, the legacy interface only supports 32-bit DMA.
598 * Restrict DMA parameters as required by the legacy interface
599 * when an ATAPI device is connected.
600 */
601 bounce_limit = ATA_DMA_MASK;
602 segment_boundary = ATA_DMA_BOUNDARY;
603 /* Subtract 1 since an extra entry may be needed for padding, see
604 libata-scsi.c */
605 sg_tablesize = LIBATA_MAX_PRD - 1;
606
607 /* Since the legacy DMA engine is in use, we need to disable ADMA
608 on the port. */
609 adma_enable = 0;
610 nv_adma_register_mode(ap);
611 }
612 else {
613 bounce_limit = *ap->dev->dma_mask;
614 segment_boundary = NV_ADMA_DMA_BOUNDARY;
615 sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
616 adma_enable = 1;
617 }
618
619 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg);
620
621 if(ap->port_no == 1)
622 config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
623 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
624 else
625 config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
626 NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
627
628 if(adma_enable) {
629 new_reg = current_reg | config_mask;
630 pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
631 }
632 else {
633 new_reg = current_reg & ~config_mask;
634 pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
635 }
636
637 if(current_reg != new_reg)
638 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
639
640 blk_queue_bounce_limit(sdev->request_queue, bounce_limit);
641 blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
642 blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize);
643 ata_port_printk(ap, KERN_INFO,
644 "bounce limit 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
645 (unsigned long long)bounce_limit, segment_boundary, sg_tablesize);
646 return rc;
647 }
648
649 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
650 {
651 struct nv_adma_port_priv *pp = qc->ap->private_data;
652 return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
653 }
654
655 static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
656 {
657 /* Since commands where a result TF is requested are not
658 executed in ADMA mode, the only time this function will be called
659 in ADMA mode will be if a command fails. In this case we
660 don't care about going into register mode with ADMA commands
661 pending, as the commands will all shortly be aborted anyway. */
662 nv_adma_register_mode(ap);
663
664 ata_tf_read(ap, tf);
665 }
666
667 static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
668 {
669 unsigned int idx = 0;
670
671 if(tf->flags & ATA_TFLAG_ISADDR) {
672 if (tf->flags & ATA_TFLAG_LBA48) {
673 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->hob_feature | WNB);
674 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
675 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->hob_lbal);
676 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->hob_lbam);
677 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->hob_lbah);
678 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature);
679 } else
680 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature | WNB);
681
682 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->nsect);
683 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->lbal);
684 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->lbam);
685 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->lbah);
686 }
687
688 if(tf->flags & ATA_TFLAG_DEVICE)
689 cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device);
690
691 cpb[idx++] = cpu_to_le16((ATA_REG_CMD << 8) | tf->command | CMDEND);
692
693 while(idx < 12)
694 cpb[idx++] = cpu_to_le16(IGN);
695
696 return idx;
697 }
698
699 static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
700 {
701 struct nv_adma_port_priv *pp = ap->private_data;
702 u8 flags = pp->cpb[cpb_num].resp_flags;
703
704 VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
705
706 if (unlikely((force_err ||
707 flags & (NV_CPB_RESP_ATA_ERR |
708 NV_CPB_RESP_CMD_ERR |
709 NV_CPB_RESP_CPB_ERR)))) {
710 struct ata_eh_info *ehi = &ap->link.eh_info;
711 int freeze = 0;
712
713 ata_ehi_clear_desc(ehi);
714 __ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x: ", flags );
715 if (flags & NV_CPB_RESP_ATA_ERR) {
716 ata_ehi_push_desc(ehi, "ATA error");
717 ehi->err_mask |= AC_ERR_DEV;
718 } else if (flags & NV_CPB_RESP_CMD_ERR) {
719 ata_ehi_push_desc(ehi, "CMD error");
720 ehi->err_mask |= AC_ERR_DEV;
721 } else if (flags & NV_CPB_RESP_CPB_ERR) {
722 ata_ehi_push_desc(ehi, "CPB error");
723 ehi->err_mask |= AC_ERR_SYSTEM;
724 freeze = 1;
725 } else {
726 /* notifier error, but no error in CPB flags? */
727 ata_ehi_push_desc(ehi, "unknown");
728 ehi->err_mask |= AC_ERR_OTHER;
729 freeze = 1;
730 }
731 /* Kill all commands. EH will determine what actually failed. */
732 if (freeze)
733 ata_port_freeze(ap);
734 else
735 ata_port_abort(ap);
736 return 1;
737 }
738
739 if (likely(flags & NV_CPB_RESP_DONE)) {
740 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num);
741 VPRINTK("CPB flags done, flags=0x%x\n", flags);
742 if (likely(qc)) {
743 DPRINTK("Completing qc from tag %d\n",cpb_num);
744 ata_qc_complete(qc);
745 } else {
746 struct ata_eh_info *ehi = &ap->link.eh_info;
747 /* Notifier bits set without a command may indicate the drive
748 is misbehaving. Raise host state machine violation on this
749 condition. */
750 ata_port_printk(ap, KERN_ERR, "notifier for tag %d with no command?\n",
751 cpb_num);
752 ehi->err_mask |= AC_ERR_HSM;
753 ehi->action |= ATA_EH_SOFTRESET;
754 ata_port_freeze(ap);
755 return 1;
756 }
757 }
758 return 0;
759 }
760
761 static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
762 {
763 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
764
765 /* freeze if hotplugged */
766 if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
767 ata_port_freeze(ap);
768 return 1;
769 }
770
771 /* bail out if not our interrupt */
772 if (!(irq_stat & NV_INT_DEV))
773 return 0;
774
775 /* DEV interrupt w/ no active qc? */
776 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
777 ata_check_status(ap);
778 return 1;
779 }
780
781 /* handle interrupt */
782 return ata_host_intr(ap, qc);
783 }
784
785 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
786 {
787 struct ata_host *host = dev_instance;
788 int i, handled = 0;
789 u32 notifier_clears[2];
790
791 spin_lock(&host->lock);
792
793 for (i = 0; i < host->n_ports; i++) {
794 struct ata_port *ap = host->ports[i];
795 notifier_clears[i] = 0;
796
797 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
798 struct nv_adma_port_priv *pp = ap->private_data;
799 void __iomem *mmio = pp->ctl_block;
800 u16 status;
801 u32 gen_ctl;
802 u32 notifier, notifier_error;
803
804 /* if ADMA is disabled, use standard ata interrupt handler */
805 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
806 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
807 >> (NV_INT_PORT_SHIFT * i);
808 handled += nv_host_intr(ap, irq_stat);
809 continue;
810 }
811
812 /* if in ATA register mode, check for standard interrupts */
813 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
814 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
815 >> (NV_INT_PORT_SHIFT * i);
816 if(ata_tag_valid(ap->link.active_tag))
817 /** NV_INT_DEV indication seems unreliable at times
818 at least in ADMA mode. Force it on always when a
819 command is active, to prevent losing interrupts. */
820 irq_stat |= NV_INT_DEV;
821 handled += nv_host_intr(ap, irq_stat);
822 }
823
824 notifier = readl(mmio + NV_ADMA_NOTIFIER);
825 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
826 notifier_clears[i] = notifier | notifier_error;
827
828 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
829
830 if( !NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
831 !notifier_error)
832 /* Nothing to do */
833 continue;
834
835 status = readw(mmio + NV_ADMA_STAT);
836
837 /* Clear status. Ensure the controller sees the clearing before we start
838 looking at any of the CPB statuses, so that any CPB completions after
839 this point in the handler will raise another interrupt. */
840 writew(status, mmio + NV_ADMA_STAT);
841 readw(mmio + NV_ADMA_STAT); /* flush posted write */
842 rmb();
843
844 handled++; /* irq handled if we got here */
845
846 /* freeze if hotplugged or controller error */
847 if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
848 NV_ADMA_STAT_HOTUNPLUG |
849 NV_ADMA_STAT_TIMEOUT |
850 NV_ADMA_STAT_SERROR))) {
851 struct ata_eh_info *ehi = &ap->link.eh_info;
852
853 ata_ehi_clear_desc(ehi);
854 __ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status );
855 if (status & NV_ADMA_STAT_TIMEOUT) {
856 ehi->err_mask |= AC_ERR_SYSTEM;
857 ata_ehi_push_desc(ehi, "timeout");
858 } else if (status & NV_ADMA_STAT_HOTPLUG) {
859 ata_ehi_hotplugged(ehi);
860 ata_ehi_push_desc(ehi, "hotplug");
861 } else if (status & NV_ADMA_STAT_HOTUNPLUG) {
862 ata_ehi_hotplugged(ehi);
863 ata_ehi_push_desc(ehi, "hot unplug");
864 } else if (status & NV_ADMA_STAT_SERROR) {
865 /* let libata analyze SError and figure out the cause */
866 ata_ehi_push_desc(ehi, "SError");
867 } else
868 ata_ehi_push_desc(ehi, "unknown");
869 ata_port_freeze(ap);
870 continue;
871 }
872
873 if (status & (NV_ADMA_STAT_DONE |
874 NV_ADMA_STAT_CPBERR)) {
875 u32 check_commands;
876 int pos, error = 0;
877
878 if(ata_tag_valid(ap->link.active_tag))
879 check_commands = 1 << ap->link.active_tag;
880 else
881 check_commands = ap->link.sactive;
882
883 /** Check CPBs for completed commands */
884 while ((pos = ffs(check_commands)) && !error) {
885 pos--;
886 error = nv_adma_check_cpb(ap, pos,
887 notifier_error & (1 << pos) );
888 check_commands &= ~(1 << pos );
889 }
890 }
891 }
892 }
893
894 if(notifier_clears[0] || notifier_clears[1]) {
895 /* Note: Both notifier clear registers must be written
896 if either is set, even if one is zero, according to NVIDIA. */
897 struct nv_adma_port_priv *pp = host->ports[0]->private_data;
898 writel(notifier_clears[0], pp->notifier_clear_block);
899 pp = host->ports[1]->private_data;
900 writel(notifier_clears[1], pp->notifier_clear_block);
901 }
902
903 spin_unlock(&host->lock);
904
905 return IRQ_RETVAL(handled);
906 }
907
908 static void nv_adma_freeze(struct ata_port *ap)
909 {
910 struct nv_adma_port_priv *pp = ap->private_data;
911 void __iomem *mmio = pp->ctl_block;
912 u16 tmp;
913
914 nv_ck804_freeze(ap);
915
916 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
917 return;
918
919 /* clear any outstanding CK804 notifications */
920 writeb( NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
921 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
922
923 /* Disable interrupt */
924 tmp = readw(mmio + NV_ADMA_CTL);
925 writew( tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
926 mmio + NV_ADMA_CTL);
927 readw( mmio + NV_ADMA_CTL ); /* flush posted write */
928 }
929
930 static void nv_adma_thaw(struct ata_port *ap)
931 {
932 struct nv_adma_port_priv *pp = ap->private_data;
933 void __iomem *mmio = pp->ctl_block;
934 u16 tmp;
935
936 nv_ck804_thaw(ap);
937
938 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
939 return;
940
941 /* Enable interrupt */
942 tmp = readw(mmio + NV_ADMA_CTL);
943 writew( tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
944 mmio + NV_ADMA_CTL);
945 readw( mmio + NV_ADMA_CTL ); /* flush posted write */
946 }
947
948 static void nv_adma_irq_clear(struct ata_port *ap)
949 {
950 struct nv_adma_port_priv *pp = ap->private_data;
951 void __iomem *mmio = pp->ctl_block;
952 u32 notifier_clears[2];
953
954 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
955 ata_bmdma_irq_clear(ap);
956 return;
957 }
958
959 /* clear any outstanding CK804 notifications */
960 writeb( NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
961 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
962
963 /* clear ADMA status */
964 writew(0xffff, mmio + NV_ADMA_STAT);
965
966 /* clear notifiers - note both ports need to be written with
967 something even though we are only clearing on one */
968 if (ap->port_no == 0) {
969 notifier_clears[0] = 0xFFFFFFFF;
970 notifier_clears[1] = 0;
971 } else {
972 notifier_clears[0] = 0;
973 notifier_clears[1] = 0xFFFFFFFF;
974 }
975 pp = ap->host->ports[0]->private_data;
976 writel(notifier_clears[0], pp->notifier_clear_block);
977 pp = ap->host->ports[1]->private_data;
978 writel(notifier_clears[1], pp->notifier_clear_block);
979 }
980
981 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
982 {
983 struct nv_adma_port_priv *pp = qc->ap->private_data;
984
985 if(pp->flags & NV_ADMA_PORT_REGISTER_MODE)
986 ata_bmdma_post_internal_cmd(qc);
987 }
988
989 static int nv_adma_port_start(struct ata_port *ap)
990 {
991 struct device *dev = ap->host->dev;
992 struct nv_adma_port_priv *pp;
993 int rc;
994 void *mem;
995 dma_addr_t mem_dma;
996 void __iomem *mmio;
997 u16 tmp;
998
999 VPRINTK("ENTER\n");
1000
1001 rc = ata_port_start(ap);
1002 if (rc)
1003 return rc;
1004
1005 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1006 if (!pp)
1007 return -ENOMEM;
1008
1009 mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
1010 ap->port_no * NV_ADMA_PORT_SIZE;
1011 pp->ctl_block = mmio;
1012 pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
1013 pp->notifier_clear_block = pp->gen_block +
1014 NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
1015
1016 mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
1017 &mem_dma, GFP_KERNEL);
1018 if (!mem)
1019 return -ENOMEM;
1020 memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
1021
1022 /*
1023 * First item in chunk of DMA memory:
1024 * 128-byte command parameter block (CPB)
1025 * one for each command tag
1026 */
1027 pp->cpb = mem;
1028 pp->cpb_dma = mem_dma;
1029
1030 writel(mem_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
1031 writel((mem_dma >> 16 ) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
1032
1033 mem += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1034 mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1035
1036 /*
1037 * Second item: block of ADMA_SGTBL_LEN s/g entries
1038 */
1039 pp->aprd = mem;
1040 pp->aprd_dma = mem_dma;
1041
1042 ap->private_data = pp;
1043
1044 /* clear any outstanding interrupt conditions */
1045 writew(0xffff, mmio + NV_ADMA_STAT);
1046
1047 /* initialize port variables */
1048 pp->flags = NV_ADMA_PORT_REGISTER_MODE;
1049
1050 /* clear CPB fetch count */
1051 writew(0, mmio + NV_ADMA_CPB_COUNT);
1052
1053 /* clear GO for register mode, enable interrupt */
1054 tmp = readw(mmio + NV_ADMA_CTL);
1055 writew( (tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1056 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1057
1058 tmp = readw(mmio + NV_ADMA_CTL);
1059 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1060 readw( mmio + NV_ADMA_CTL ); /* flush posted write */
1061 udelay(1);
1062 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1063 readw( mmio + NV_ADMA_CTL ); /* flush posted write */
1064
1065 return 0;
1066 }
1067
1068 static void nv_adma_port_stop(struct ata_port *ap)
1069 {
1070 struct nv_adma_port_priv *pp = ap->private_data;
1071 void __iomem *mmio = pp->ctl_block;
1072
1073 VPRINTK("ENTER\n");
1074 writew(0, mmio + NV_ADMA_CTL);
1075 }
1076
1077 #ifdef CONFIG_PM
1078 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
1079 {
1080 struct nv_adma_port_priv *pp = ap->private_data;
1081 void __iomem *mmio = pp->ctl_block;
1082
1083 /* Go to register mode - clears GO */
1084 nv_adma_register_mode(ap);
1085
1086 /* clear CPB fetch count */
1087 writew(0, mmio + NV_ADMA_CPB_COUNT);
1088
1089 /* disable interrupt, shut down port */
1090 writew(0, mmio + NV_ADMA_CTL);
1091
1092 return 0;
1093 }
1094
1095 static int nv_adma_port_resume(struct ata_port *ap)
1096 {
1097 struct nv_adma_port_priv *pp = ap->private_data;
1098 void __iomem *mmio = pp->ctl_block;
1099 u16 tmp;
1100
1101 /* set CPB block location */
1102 writel(pp->cpb_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
1103 writel((pp->cpb_dma >> 16 ) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
1104
1105 /* clear any outstanding interrupt conditions */
1106 writew(0xffff, mmio + NV_ADMA_STAT);
1107
1108 /* initialize port variables */
1109 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
1110
1111 /* clear CPB fetch count */
1112 writew(0, mmio + NV_ADMA_CPB_COUNT);
1113
1114 /* clear GO for register mode, enable interrupt */
1115 tmp = readw(mmio + NV_ADMA_CTL);
1116 writew( (tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1117 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1118
1119 tmp = readw(mmio + NV_ADMA_CTL);
1120 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1121 readw( mmio + NV_ADMA_CTL ); /* flush posted write */
1122 udelay(1);
1123 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1124 readw( mmio + NV_ADMA_CTL ); /* flush posted write */
1125
1126 return 0;
1127 }
1128 #endif
1129
1130 static void nv_adma_setup_port(struct ata_port *ap)
1131 {
1132 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1133 struct ata_ioports *ioport = &ap->ioaddr;
1134
1135 VPRINTK("ENTER\n");
1136
1137 mmio += NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE;
1138
1139 ioport->cmd_addr = mmio;
1140 ioport->data_addr = mmio + (ATA_REG_DATA * 4);
1141 ioport->error_addr =
1142 ioport->feature_addr = mmio + (ATA_REG_ERR * 4);
1143 ioport->nsect_addr = mmio + (ATA_REG_NSECT * 4);
1144 ioport->lbal_addr = mmio + (ATA_REG_LBAL * 4);
1145 ioport->lbam_addr = mmio + (ATA_REG_LBAM * 4);
1146 ioport->lbah_addr = mmio + (ATA_REG_LBAH * 4);
1147 ioport->device_addr = mmio + (ATA_REG_DEVICE * 4);
1148 ioport->status_addr =
1149 ioport->command_addr = mmio + (ATA_REG_STATUS * 4);
1150 ioport->altstatus_addr =
1151 ioport->ctl_addr = mmio + 0x20;
1152 }
1153
1154 static int nv_adma_host_init(struct ata_host *host)
1155 {
1156 struct pci_dev *pdev = to_pci_dev(host->dev);
1157 unsigned int i;
1158 u32 tmp32;
1159
1160 VPRINTK("ENTER\n");
1161
1162 /* enable ADMA on the ports */
1163 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1164 tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1165 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1166 NV_MCP_SATA_CFG_20_PORT1_EN |
1167 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1168
1169 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1170
1171 for (i = 0; i < host->n_ports; i++)
1172 nv_adma_setup_port(host->ports[i]);
1173
1174 return 0;
1175 }
1176
1177 static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1178 struct scatterlist *sg,
1179 int idx,
1180 struct nv_adma_prd *aprd)
1181 {
1182 u8 flags = 0;
1183 if (qc->tf.flags & ATA_TFLAG_WRITE)
1184 flags |= NV_APRD_WRITE;
1185 if (idx == qc->n_elem - 1)
1186 flags |= NV_APRD_END;
1187 else if (idx != 4)
1188 flags |= NV_APRD_CONT;
1189
1190 aprd->addr = cpu_to_le64(((u64)sg_dma_address(sg)));
1191 aprd->len = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
1192 aprd->flags = flags;
1193 aprd->packet_len = 0;
1194 }
1195
1196 static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1197 {
1198 struct nv_adma_port_priv *pp = qc->ap->private_data;
1199 unsigned int idx;
1200 struct nv_adma_prd *aprd;
1201 struct scatterlist *sg;
1202
1203 VPRINTK("ENTER\n");
1204
1205 idx = 0;
1206
1207 ata_for_each_sg(sg, qc) {
1208 aprd = (idx < 5) ? &cpb->aprd[idx] : &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (idx-5)];
1209 nv_adma_fill_aprd(qc, sg, idx, aprd);
1210 idx++;
1211 }
1212 if (idx > 5)
1213 cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));
1214 else
1215 cpb->next_aprd = cpu_to_le64(0);
1216 }
1217
1218 static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
1219 {
1220 struct nv_adma_port_priv *pp = qc->ap->private_data;
1221
1222 /* ADMA engine can only be used for non-ATAPI DMA commands,
1223 or interrupt-driven no-data commands, where a result taskfile
1224 is not required. */
1225 if((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
1226 (qc->tf.flags & ATA_TFLAG_POLLING) ||
1227 (qc->flags & ATA_QCFLAG_RESULT_TF))
1228 return 1;
1229
1230 if((qc->flags & ATA_QCFLAG_DMAMAP) ||
1231 (qc->tf.protocol == ATA_PROT_NODATA))
1232 return 0;
1233
1234 return 1;
1235 }
1236
1237 static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1238 {
1239 struct nv_adma_port_priv *pp = qc->ap->private_data;
1240 struct nv_adma_cpb *cpb = &pp->cpb[qc->tag];
1241 u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
1242 NV_CPB_CTL_IEN;
1243
1244 if (nv_adma_use_reg_mode(qc)) {
1245 nv_adma_register_mode(qc->ap);
1246 ata_qc_prep(qc);
1247 return;
1248 }
1249
1250 cpb->resp_flags = NV_CPB_RESP_DONE;
1251 wmb();
1252 cpb->ctl_flags = 0;
1253 wmb();
1254
1255 cpb->len = 3;
1256 cpb->tag = qc->tag;
1257 cpb->next_cpb_idx = 0;
1258
1259 /* turn on NCQ flags for NCQ commands */
1260 if (qc->tf.protocol == ATA_PROT_NCQ)
1261 ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1262
1263 VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1264
1265 nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1266
1267 if(qc->flags & ATA_QCFLAG_DMAMAP) {
1268 nv_adma_fill_sg(qc, cpb);
1269 ctl_flags |= NV_CPB_CTL_APRD_VALID;
1270 } else
1271 memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
1272
1273 /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID until we are
1274 finished filling in all of the contents */
1275 wmb();
1276 cpb->ctl_flags = ctl_flags;
1277 wmb();
1278 cpb->resp_flags = 0;
1279 }
1280
1281 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1282 {
1283 struct nv_adma_port_priv *pp = qc->ap->private_data;
1284 void __iomem *mmio = pp->ctl_block;
1285 int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ);
1286
1287 VPRINTK("ENTER\n");
1288
1289 if (nv_adma_use_reg_mode(qc)) {
1290 /* use ATA register mode */
1291 VPRINTK("using ATA register mode: 0x%lx\n", qc->flags);
1292 nv_adma_register_mode(qc->ap);
1293 return ata_qc_issue_prot(qc);
1294 } else
1295 nv_adma_mode(qc->ap);
1296
1297 /* write append register, command tag in lower 8 bits
1298 and (number of cpbs to append -1) in top 8 bits */
1299 wmb();
1300
1301 if(curr_ncq != pp->last_issue_ncq) {
1302 /* Seems to need some delay before switching between NCQ and non-NCQ
1303 commands, else we get command timeouts and such. */
1304 udelay(20);
1305 pp->last_issue_ncq = curr_ncq;
1306 }
1307
1308 writew(qc->tag, mmio + NV_ADMA_APPEND);
1309
1310 DPRINTK("Issued tag %u\n",qc->tag);
1311
1312 return 0;
1313 }
1314
1315 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1316 {
1317 struct ata_host *host = dev_instance;
1318 unsigned int i;
1319 unsigned int handled = 0;
1320 unsigned long flags;
1321
1322 spin_lock_irqsave(&host->lock, flags);
1323
1324 for (i = 0; i < host->n_ports; i++) {
1325 struct ata_port *ap;
1326
1327 ap = host->ports[i];
1328 if (ap &&
1329 !(ap->flags & ATA_FLAG_DISABLED)) {
1330 struct ata_queued_cmd *qc;
1331
1332 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1333 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
1334 handled += ata_host_intr(ap, qc);
1335 else
1336 // No request pending? Clear interrupt status
1337 // anyway, in case there's one pending.
1338 ap->ops->check_status(ap);
1339 }
1340
1341 }
1342
1343 spin_unlock_irqrestore(&host->lock, flags);
1344
1345 return IRQ_RETVAL(handled);
1346 }
1347
1348 static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
1349 {
1350 int i, handled = 0;
1351
1352 for (i = 0; i < host->n_ports; i++) {
1353 struct ata_port *ap = host->ports[i];
1354
1355 if (ap && !(ap->flags & ATA_FLAG_DISABLED))
1356 handled += nv_host_intr(ap, irq_stat);
1357
1358 irq_stat >>= NV_INT_PORT_SHIFT;
1359 }
1360
1361 return IRQ_RETVAL(handled);
1362 }
1363
1364 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
1365 {
1366 struct ata_host *host = dev_instance;
1367 u8 irq_stat;
1368 irqreturn_t ret;
1369
1370 spin_lock(&host->lock);
1371 irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
1372 ret = nv_do_interrupt(host, irq_stat);
1373 spin_unlock(&host->lock);
1374
1375 return ret;
1376 }
1377
1378 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
1379 {
1380 struct ata_host *host = dev_instance;
1381 u8 irq_stat;
1382 irqreturn_t ret;
1383
1384 spin_lock(&host->lock);
1385 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1386 ret = nv_do_interrupt(host, irq_stat);
1387 spin_unlock(&host->lock);
1388
1389 return ret;
1390 }
1391
1392 static int nv_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val)
1393 {
1394 if (sc_reg > SCR_CONTROL)
1395 return -EINVAL;
1396
1397 *val = ioread32(ap->ioaddr.scr_addr + (sc_reg * 4));
1398 return 0;
1399 }
1400
1401 static int nv_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val)
1402 {
1403 if (sc_reg > SCR_CONTROL)
1404 return -EINVAL;
1405
1406 iowrite32(val, ap->ioaddr.scr_addr + (sc_reg * 4));
1407 return 0;
1408 }
1409
1410 static void nv_nf2_freeze(struct ata_port *ap)
1411 {
1412 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1413 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1414 u8 mask;
1415
1416 mask = ioread8(scr_addr + NV_INT_ENABLE);
1417 mask &= ~(NV_INT_ALL << shift);
1418 iowrite8(mask, scr_addr + NV_INT_ENABLE);
1419 }
1420
1421 static void nv_nf2_thaw(struct ata_port *ap)
1422 {
1423 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1424 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1425 u8 mask;
1426
1427 iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
1428
1429 mask = ioread8(scr_addr + NV_INT_ENABLE);
1430 mask |= (NV_INT_MASK << shift);
1431 iowrite8(mask, scr_addr + NV_INT_ENABLE);
1432 }
1433
1434 static void nv_ck804_freeze(struct ata_port *ap)
1435 {
1436 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1437 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1438 u8 mask;
1439
1440 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1441 mask &= ~(NV_INT_ALL << shift);
1442 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1443 }
1444
1445 static void nv_ck804_thaw(struct ata_port *ap)
1446 {
1447 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1448 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1449 u8 mask;
1450
1451 writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1452
1453 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1454 mask |= (NV_INT_MASK << shift);
1455 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1456 }
1457
1458 static int nv_hardreset(struct ata_link *link, unsigned int *class,
1459 unsigned long deadline)
1460 {
1461 unsigned int dummy;
1462
1463 /* SATA hardreset fails to retrieve proper device signature on
1464 * some controllers. Don't classify on hardreset. For more
1465 * info, see http://bugme.osdl.org/show_bug.cgi?id=3352
1466 */
1467 return sata_std_hardreset(link, &dummy, deadline);
1468 }
1469
1470 static void nv_error_handler(struct ata_port *ap)
1471 {
1472 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1473 nv_hardreset, ata_std_postreset);
1474 }
1475
1476 static void nv_adma_error_handler(struct ata_port *ap)
1477 {
1478 struct nv_adma_port_priv *pp = ap->private_data;
1479 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
1480 void __iomem *mmio = pp->ctl_block;
1481 int i;
1482 u16 tmp;
1483
1484 if(ata_tag_valid(ap->link.active_tag) || ap->link.sactive) {
1485 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1486 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1487 u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
1488 u32 status = readw(mmio + NV_ADMA_STAT);
1489 u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
1490 u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
1491
1492 ata_port_printk(ap, KERN_ERR, "EH in ADMA mode, notifier 0x%X "
1493 "notifier_error 0x%X gen_ctl 0x%X status 0x%X "
1494 "next cpb count 0x%X next cpb idx 0x%x\n",
1495 notifier, notifier_error, gen_ctl, status,
1496 cpb_count, next_cpb_idx);
1497
1498 for( i=0;i<NV_ADMA_MAX_CPBS;i++) {
1499 struct nv_adma_cpb *cpb = &pp->cpb[i];
1500 if( (ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) ||
1501 ap->link.sactive & (1 << i) )
1502 ata_port_printk(ap, KERN_ERR,
1503 "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1504 i, cpb->ctl_flags, cpb->resp_flags);
1505 }
1506 }
1507
1508 /* Push us back into port register mode for error handling. */
1509 nv_adma_register_mode(ap);
1510
1511 /* Mark all of the CPBs as invalid to prevent them from being executed */
1512 for( i=0;i<NV_ADMA_MAX_CPBS;i++)
1513 pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1514
1515 /* clear CPB fetch count */
1516 writew(0, mmio + NV_ADMA_CPB_COUNT);
1517
1518 /* Reset channel */
1519 tmp = readw(mmio + NV_ADMA_CTL);
1520 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1521 readw( mmio + NV_ADMA_CTL ); /* flush posted write */
1522 udelay(1);
1523 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1524 readw( mmio + NV_ADMA_CTL ); /* flush posted write */
1525 }
1526
1527 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1528 nv_hardreset, ata_std_postreset);
1529 }
1530
1531 static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1532 {
1533 static int printed_version = 0;
1534 const struct ata_port_info *ppi[] = { NULL, NULL };
1535 struct ata_host *host;
1536 struct nv_host_priv *hpriv;
1537 int rc;
1538 u32 bar;
1539 void __iomem *base;
1540 unsigned long type = ent->driver_data;
1541
1542 // Make sure this is a SATA controller by counting the number of bars
1543 // (NVIDIA SATA controllers will always have six bars). Otherwise,
1544 // it's an IDE controller and we ignore it.
1545 for (bar=0; bar<6; bar++)
1546 if (pci_resource_start(pdev, bar) == 0)
1547 return -ENODEV;
1548
1549 if (!printed_version++)
1550 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
1551
1552 rc = pcim_enable_device(pdev);
1553 if (rc)
1554 return rc;
1555
1556 /* determine type and allocate host */
1557 if (type >= CK804 && adma_enabled) {
1558 dev_printk(KERN_NOTICE, &pdev->dev, "Using ADMA mode\n");
1559 type = ADMA;
1560 }
1561
1562 ppi[0] = &nv_port_info[type];
1563 rc = ata_pci_prepare_sff_host(pdev, ppi, &host);
1564 if (rc)
1565 return rc;
1566
1567 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
1568 if (!hpriv)
1569 return -ENOMEM;
1570 hpriv->type = type;
1571 host->private_data = hpriv;
1572
1573 /* set 64bit dma masks, may fail */
1574 if (type == ADMA) {
1575 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0)
1576 pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
1577 }
1578
1579 /* request and iomap NV_MMIO_BAR */
1580 rc = pcim_iomap_regions(pdev, 1 << NV_MMIO_BAR, DRV_NAME);
1581 if (rc)
1582 return rc;
1583
1584 /* configure SCR access */
1585 base = host->iomap[NV_MMIO_BAR];
1586 host->ports[0]->ioaddr.scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
1587 host->ports[1]->ioaddr.scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
1588
1589 /* enable SATA space for CK804 */
1590 if (type >= CK804) {
1591 u8 regval;
1592
1593 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
1594 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1595 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1596 }
1597
1598 /* init ADMA */
1599 if (type == ADMA) {
1600 rc = nv_adma_host_init(host);
1601 if (rc)
1602 return rc;
1603 }
1604
1605 pci_set_master(pdev);
1606 return ata_host_activate(host, pdev->irq, ppi[0]->irq_handler,
1607 IRQF_SHARED, ppi[0]->sht);
1608 }
1609
1610 #ifdef CONFIG_PM
1611 static int nv_pci_device_resume(struct pci_dev *pdev)
1612 {
1613 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1614 struct nv_host_priv *hpriv = host->private_data;
1615 int rc;
1616
1617 rc = ata_pci_device_do_resume(pdev);
1618 if(rc)
1619 return rc;
1620
1621 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
1622 if(hpriv->type >= CK804) {
1623 u8 regval;
1624
1625 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
1626 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1627 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1628 }
1629 if(hpriv->type == ADMA) {
1630 u32 tmp32;
1631 struct nv_adma_port_priv *pp;
1632 /* enable/disable ADMA on the ports appropriately */
1633 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1634
1635 pp = host->ports[0]->private_data;
1636 if(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1637 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
1638 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
1639 else
1640 tmp32 |= (NV_MCP_SATA_CFG_20_PORT0_EN |
1641 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
1642 pp = host->ports[1]->private_data;
1643 if(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1644 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
1645 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
1646 else
1647 tmp32 |= (NV_MCP_SATA_CFG_20_PORT1_EN |
1648 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
1649
1650 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1651 }
1652 }
1653
1654 ata_host_resume(host);
1655
1656 return 0;
1657 }
1658 #endif
1659
1660 static void nv_ck804_host_stop(struct ata_host *host)
1661 {
1662 struct pci_dev *pdev = to_pci_dev(host->dev);
1663 u8 regval;
1664
1665 /* disable SATA space for CK804 */
1666 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
1667 regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1668 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1669 }
1670
1671 static void nv_adma_host_stop(struct ata_host *host)
1672 {
1673 struct pci_dev *pdev = to_pci_dev(host->dev);
1674 u32 tmp32;
1675
1676 /* disable ADMA on the ports */
1677 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1678 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
1679 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1680 NV_MCP_SATA_CFG_20_PORT1_EN |
1681 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
1682
1683 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1684
1685 nv_ck804_host_stop(host);
1686 }
1687
1688 static int __init nv_init(void)
1689 {
1690 return pci_register_driver(&nv_pci_driver);
1691 }
1692
1693 static void __exit nv_exit(void)
1694 {
1695 pci_unregister_driver(&nv_pci_driver);
1696 }
1697
1698 module_init(nv_init);
1699 module_exit(nv_exit);
1700 module_param_named(adma, adma_enabled, bool, 0444);
1701 MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: true)");
This page took 0.069002 seconds and 4 git commands to generate.