Merge branch 'pxa-palm' into pxa-machines
[deliverable/linux.git] / drivers / ata / sata_nv.c
1 /*
2 * sata_nv.c - NVIDIA nForce SATA
3 *
4 * Copyright 2004 NVIDIA Corp. All rights reserved.
5 * Copyright 2004 Andrew Chew
6 *
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
21 *
22 *
23 * libata documentation is available via 'make {ps|pdf}docs',
24 * as Documentation/DocBook/libata.*
25 *
26 * No hardware documentation available outside of NVIDIA.
27 * This driver programs the NVIDIA SATA controller in a similar
28 * fashion as with other PCI IDE BMDMA controllers, with a few
29 * NV-specific details such as register offsets, SATA phy location,
30 * hotplug info, etc.
31 *
32 * CK804/MCP04 controllers support an alternate programming interface
33 * similar to the ADMA specification (with some modifications).
34 * This allows the use of NCQ. Non-DMA-mapped ATA commands are still
35 * sent through the legacy interface.
36 *
37 */
38
39 #include <linux/kernel.h>
40 #include <linux/module.h>
41 #include <linux/pci.h>
42 #include <linux/init.h>
43 #include <linux/blkdev.h>
44 #include <linux/delay.h>
45 #include <linux/interrupt.h>
46 #include <linux/device.h>
47 #include <scsi/scsi_host.h>
48 #include <scsi/scsi_device.h>
49 #include <linux/libata.h>
50
51 #define DRV_NAME "sata_nv"
52 #define DRV_VERSION "3.5"
53
54 #define NV_ADMA_DMA_BOUNDARY 0xffffffffUL
55
56 enum {
57 NV_MMIO_BAR = 5,
58
59 NV_PORTS = 2,
60 NV_PIO_MASK = 0x1f,
61 NV_MWDMA_MASK = 0x07,
62 NV_UDMA_MASK = 0x7f,
63 NV_PORT0_SCR_REG_OFFSET = 0x00,
64 NV_PORT1_SCR_REG_OFFSET = 0x40,
65
66 /* INT_STATUS/ENABLE */
67 NV_INT_STATUS = 0x10,
68 NV_INT_ENABLE = 0x11,
69 NV_INT_STATUS_CK804 = 0x440,
70 NV_INT_ENABLE_CK804 = 0x441,
71
72 /* INT_STATUS/ENABLE bits */
73 NV_INT_DEV = 0x01,
74 NV_INT_PM = 0x02,
75 NV_INT_ADDED = 0x04,
76 NV_INT_REMOVED = 0x08,
77
78 NV_INT_PORT_SHIFT = 4, /* each port occupies 4 bits */
79
80 NV_INT_ALL = 0x0f,
81 NV_INT_MASK = NV_INT_DEV |
82 NV_INT_ADDED | NV_INT_REMOVED,
83
84 /* INT_CONFIG */
85 NV_INT_CONFIG = 0x12,
86 NV_INT_CONFIG_METHD = 0x01, // 0 = INT, 1 = SMI
87
88 // For PCI config register 20
89 NV_MCP_SATA_CFG_20 = 0x50,
90 NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
91 NV_MCP_SATA_CFG_20_PORT0_EN = (1 << 17),
92 NV_MCP_SATA_CFG_20_PORT1_EN = (1 << 16),
93 NV_MCP_SATA_CFG_20_PORT0_PWB_EN = (1 << 14),
94 NV_MCP_SATA_CFG_20_PORT1_PWB_EN = (1 << 12),
95
96 NV_ADMA_MAX_CPBS = 32,
97 NV_ADMA_CPB_SZ = 128,
98 NV_ADMA_APRD_SZ = 16,
99 NV_ADMA_SGTBL_LEN = (1024 - NV_ADMA_CPB_SZ) /
100 NV_ADMA_APRD_SZ,
101 NV_ADMA_SGTBL_TOTAL_LEN = NV_ADMA_SGTBL_LEN + 5,
102 NV_ADMA_SGTBL_SZ = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
103 NV_ADMA_PORT_PRIV_DMA_SZ = NV_ADMA_MAX_CPBS *
104 (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
105
106 /* BAR5 offset to ADMA general registers */
107 NV_ADMA_GEN = 0x400,
108 NV_ADMA_GEN_CTL = 0x00,
109 NV_ADMA_NOTIFIER_CLEAR = 0x30,
110
111 /* BAR5 offset to ADMA ports */
112 NV_ADMA_PORT = 0x480,
113
114 /* size of ADMA port register space */
115 NV_ADMA_PORT_SIZE = 0x100,
116
117 /* ADMA port registers */
118 NV_ADMA_CTL = 0x40,
119 NV_ADMA_CPB_COUNT = 0x42,
120 NV_ADMA_NEXT_CPB_IDX = 0x43,
121 NV_ADMA_STAT = 0x44,
122 NV_ADMA_CPB_BASE_LOW = 0x48,
123 NV_ADMA_CPB_BASE_HIGH = 0x4C,
124 NV_ADMA_APPEND = 0x50,
125 NV_ADMA_NOTIFIER = 0x68,
126 NV_ADMA_NOTIFIER_ERROR = 0x6C,
127
128 /* NV_ADMA_CTL register bits */
129 NV_ADMA_CTL_HOTPLUG_IEN = (1 << 0),
130 NV_ADMA_CTL_CHANNEL_RESET = (1 << 5),
131 NV_ADMA_CTL_GO = (1 << 7),
132 NV_ADMA_CTL_AIEN = (1 << 8),
133 NV_ADMA_CTL_READ_NON_COHERENT = (1 << 11),
134 NV_ADMA_CTL_WRITE_NON_COHERENT = (1 << 12),
135
136 /* CPB response flag bits */
137 NV_CPB_RESP_DONE = (1 << 0),
138 NV_CPB_RESP_ATA_ERR = (1 << 3),
139 NV_CPB_RESP_CMD_ERR = (1 << 4),
140 NV_CPB_RESP_CPB_ERR = (1 << 7),
141
142 /* CPB control flag bits */
143 NV_CPB_CTL_CPB_VALID = (1 << 0),
144 NV_CPB_CTL_QUEUE = (1 << 1),
145 NV_CPB_CTL_APRD_VALID = (1 << 2),
146 NV_CPB_CTL_IEN = (1 << 3),
147 NV_CPB_CTL_FPDMA = (1 << 4),
148
149 /* APRD flags */
150 NV_APRD_WRITE = (1 << 1),
151 NV_APRD_END = (1 << 2),
152 NV_APRD_CONT = (1 << 3),
153
154 /* NV_ADMA_STAT flags */
155 NV_ADMA_STAT_TIMEOUT = (1 << 0),
156 NV_ADMA_STAT_HOTUNPLUG = (1 << 1),
157 NV_ADMA_STAT_HOTPLUG = (1 << 2),
158 NV_ADMA_STAT_CPBERR = (1 << 4),
159 NV_ADMA_STAT_SERROR = (1 << 5),
160 NV_ADMA_STAT_CMD_COMPLETE = (1 << 6),
161 NV_ADMA_STAT_IDLE = (1 << 8),
162 NV_ADMA_STAT_LEGACY = (1 << 9),
163 NV_ADMA_STAT_STOPPED = (1 << 10),
164 NV_ADMA_STAT_DONE = (1 << 12),
165 NV_ADMA_STAT_ERR = NV_ADMA_STAT_CPBERR |
166 NV_ADMA_STAT_TIMEOUT,
167
168 /* port flags */
169 NV_ADMA_PORT_REGISTER_MODE = (1 << 0),
170 NV_ADMA_ATAPI_SETUP_COMPLETE = (1 << 1),
171
172 /* MCP55 reg offset */
173 NV_CTL_MCP55 = 0x400,
174 NV_INT_STATUS_MCP55 = 0x440,
175 NV_INT_ENABLE_MCP55 = 0x444,
176 NV_NCQ_REG_MCP55 = 0x448,
177
178 /* MCP55 */
179 NV_INT_ALL_MCP55 = 0xffff,
180 NV_INT_PORT_SHIFT_MCP55 = 16, /* each port occupies 16 bits */
181 NV_INT_MASK_MCP55 = NV_INT_ALL_MCP55 & 0xfffd,
182
183 /* SWNCQ ENABLE BITS*/
184 NV_CTL_PRI_SWNCQ = 0x02,
185 NV_CTL_SEC_SWNCQ = 0x04,
186
187 /* SW NCQ status bits*/
188 NV_SWNCQ_IRQ_DEV = (1 << 0),
189 NV_SWNCQ_IRQ_PM = (1 << 1),
190 NV_SWNCQ_IRQ_ADDED = (1 << 2),
191 NV_SWNCQ_IRQ_REMOVED = (1 << 3),
192
193 NV_SWNCQ_IRQ_BACKOUT = (1 << 4),
194 NV_SWNCQ_IRQ_SDBFIS = (1 << 5),
195 NV_SWNCQ_IRQ_DHREGFIS = (1 << 6),
196 NV_SWNCQ_IRQ_DMASETUP = (1 << 7),
197
198 NV_SWNCQ_IRQ_HOTPLUG = NV_SWNCQ_IRQ_ADDED |
199 NV_SWNCQ_IRQ_REMOVED,
200
201 };
202
203 /* ADMA Physical Region Descriptor - one SG segment */
204 struct nv_adma_prd {
205 __le64 addr;
206 __le32 len;
207 u8 flags;
208 u8 packet_len;
209 __le16 reserved;
210 };
211
212 enum nv_adma_regbits {
213 CMDEND = (1 << 15), /* end of command list */
214 WNB = (1 << 14), /* wait-not-BSY */
215 IGN = (1 << 13), /* ignore this entry */
216 CS1n = (1 << (4 + 8)), /* std. PATA signals follow... */
217 DA2 = (1 << (2 + 8)),
218 DA1 = (1 << (1 + 8)),
219 DA0 = (1 << (0 + 8)),
220 };
221
222 /* ADMA Command Parameter Block
223 The first 5 SG segments are stored inside the Command Parameter Block itself.
224 If there are more than 5 segments the remainder are stored in a separate
225 memory area indicated by next_aprd. */
226 struct nv_adma_cpb {
227 u8 resp_flags; /* 0 */
228 u8 reserved1; /* 1 */
229 u8 ctl_flags; /* 2 */
230 /* len is length of taskfile in 64 bit words */
231 u8 len; /* 3 */
232 u8 tag; /* 4 */
233 u8 next_cpb_idx; /* 5 */
234 __le16 reserved2; /* 6-7 */
235 __le16 tf[12]; /* 8-31 */
236 struct nv_adma_prd aprd[5]; /* 32-111 */
237 __le64 next_aprd; /* 112-119 */
238 __le64 reserved3; /* 120-127 */
239 };
240
241
242 struct nv_adma_port_priv {
243 struct nv_adma_cpb *cpb;
244 dma_addr_t cpb_dma;
245 struct nv_adma_prd *aprd;
246 dma_addr_t aprd_dma;
247 void __iomem *ctl_block;
248 void __iomem *gen_block;
249 void __iomem *notifier_clear_block;
250 u64 adma_dma_mask;
251 u8 flags;
252 int last_issue_ncq;
253 };
254
255 struct nv_host_priv {
256 unsigned long type;
257 };
258
259 struct defer_queue {
260 u32 defer_bits;
261 unsigned int head;
262 unsigned int tail;
263 unsigned int tag[ATA_MAX_QUEUE];
264 };
265
266 enum ncq_saw_flag_list {
267 ncq_saw_d2h = (1U << 0),
268 ncq_saw_dmas = (1U << 1),
269 ncq_saw_sdb = (1U << 2),
270 ncq_saw_backout = (1U << 3),
271 };
272
273 struct nv_swncq_port_priv {
274 struct ata_prd *prd; /* our SG list */
275 dma_addr_t prd_dma; /* and its DMA mapping */
276 void __iomem *sactive_block;
277 void __iomem *irq_block;
278 void __iomem *tag_block;
279 u32 qc_active;
280
281 unsigned int last_issue_tag;
282
283 /* fifo circular queue to store deferral command */
284 struct defer_queue defer_queue;
285
286 /* for NCQ interrupt analysis */
287 u32 dhfis_bits;
288 u32 dmafis_bits;
289 u32 sdbfis_bits;
290
291 unsigned int ncq_flags;
292 };
293
294
295 #define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & (1 << (19 + (12 * (PORT)))))
296
297 static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
298 #ifdef CONFIG_PM
299 static int nv_pci_device_resume(struct pci_dev *pdev);
300 #endif
301 static void nv_ck804_host_stop(struct ata_host *host);
302 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
303 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
304 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
305 static int nv_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val);
306 static int nv_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val);
307
308 static void nv_nf2_freeze(struct ata_port *ap);
309 static void nv_nf2_thaw(struct ata_port *ap);
310 static void nv_ck804_freeze(struct ata_port *ap);
311 static void nv_ck804_thaw(struct ata_port *ap);
312 static int nv_adma_slave_config(struct scsi_device *sdev);
313 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
314 static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
315 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
316 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
317 static void nv_adma_irq_clear(struct ata_port *ap);
318 static int nv_adma_port_start(struct ata_port *ap);
319 static void nv_adma_port_stop(struct ata_port *ap);
320 #ifdef CONFIG_PM
321 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
322 static int nv_adma_port_resume(struct ata_port *ap);
323 #endif
324 static void nv_adma_freeze(struct ata_port *ap);
325 static void nv_adma_thaw(struct ata_port *ap);
326 static void nv_adma_error_handler(struct ata_port *ap);
327 static void nv_adma_host_stop(struct ata_host *host);
328 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
329 static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
330
331 static void nv_mcp55_thaw(struct ata_port *ap);
332 static void nv_mcp55_freeze(struct ata_port *ap);
333 static void nv_swncq_error_handler(struct ata_port *ap);
334 static int nv_swncq_slave_config(struct scsi_device *sdev);
335 static int nv_swncq_port_start(struct ata_port *ap);
336 static void nv_swncq_qc_prep(struct ata_queued_cmd *qc);
337 static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
338 static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc);
339 static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis);
340 static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance);
341 #ifdef CONFIG_PM
342 static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg);
343 static int nv_swncq_port_resume(struct ata_port *ap);
344 #endif
345
346 enum nv_host_type
347 {
348 GENERIC,
349 NFORCE2,
350 NFORCE3 = NFORCE2, /* NF2 == NF3 as far as sata_nv is concerned */
351 CK804,
352 ADMA,
353 SWNCQ,
354 };
355
356 static const struct pci_device_id nv_pci_tbl[] = {
357 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
358 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
359 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
360 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
361 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
362 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
363 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
364 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), SWNCQ },
365 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), SWNCQ },
366 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), SWNCQ },
367 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), SWNCQ },
368 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
369 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
370 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
371
372 { } /* terminate list */
373 };
374
375 static struct pci_driver nv_pci_driver = {
376 .name = DRV_NAME,
377 .id_table = nv_pci_tbl,
378 .probe = nv_init_one,
379 #ifdef CONFIG_PM
380 .suspend = ata_pci_device_suspend,
381 .resume = nv_pci_device_resume,
382 #endif
383 .remove = ata_pci_remove_one,
384 };
385
386 static struct scsi_host_template nv_sht = {
387 ATA_BMDMA_SHT(DRV_NAME),
388 };
389
390 static struct scsi_host_template nv_adma_sht = {
391 ATA_NCQ_SHT(DRV_NAME),
392 .can_queue = NV_ADMA_MAX_CPBS,
393 .sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN,
394 .dma_boundary = NV_ADMA_DMA_BOUNDARY,
395 .slave_configure = nv_adma_slave_config,
396 };
397
398 static struct scsi_host_template nv_swncq_sht = {
399 ATA_NCQ_SHT(DRV_NAME),
400 .can_queue = ATA_MAX_QUEUE,
401 .sg_tablesize = LIBATA_MAX_PRD,
402 .dma_boundary = ATA_DMA_BOUNDARY,
403 .slave_configure = nv_swncq_slave_config,
404 };
405
406 static struct ata_port_operations nv_generic_ops = {
407 .inherits = &ata_bmdma_port_ops,
408 .hardreset = ATA_OP_NULL,
409 .scr_read = nv_scr_read,
410 .scr_write = nv_scr_write,
411 };
412
413 static struct ata_port_operations nv_nf2_ops = {
414 .inherits = &nv_generic_ops,
415 .freeze = nv_nf2_freeze,
416 .thaw = nv_nf2_thaw,
417 };
418
419 static struct ata_port_operations nv_ck804_ops = {
420 .inherits = &nv_generic_ops,
421 .freeze = nv_ck804_freeze,
422 .thaw = nv_ck804_thaw,
423 .host_stop = nv_ck804_host_stop,
424 };
425
426 static struct ata_port_operations nv_adma_ops = {
427 .inherits = &nv_generic_ops,
428
429 .check_atapi_dma = nv_adma_check_atapi_dma,
430 .sff_tf_read = nv_adma_tf_read,
431 .qc_defer = ata_std_qc_defer,
432 .qc_prep = nv_adma_qc_prep,
433 .qc_issue = nv_adma_qc_issue,
434 .sff_irq_clear = nv_adma_irq_clear,
435
436 .freeze = nv_adma_freeze,
437 .thaw = nv_adma_thaw,
438 .error_handler = nv_adma_error_handler,
439 .post_internal_cmd = nv_adma_post_internal_cmd,
440
441 .port_start = nv_adma_port_start,
442 .port_stop = nv_adma_port_stop,
443 #ifdef CONFIG_PM
444 .port_suspend = nv_adma_port_suspend,
445 .port_resume = nv_adma_port_resume,
446 #endif
447 .host_stop = nv_adma_host_stop,
448 };
449
450 static struct ata_port_operations nv_swncq_ops = {
451 .inherits = &nv_generic_ops,
452
453 .qc_defer = ata_std_qc_defer,
454 .qc_prep = nv_swncq_qc_prep,
455 .qc_issue = nv_swncq_qc_issue,
456
457 .freeze = nv_mcp55_freeze,
458 .thaw = nv_mcp55_thaw,
459 .error_handler = nv_swncq_error_handler,
460
461 #ifdef CONFIG_PM
462 .port_suspend = nv_swncq_port_suspend,
463 .port_resume = nv_swncq_port_resume,
464 #endif
465 .port_start = nv_swncq_port_start,
466 };
467
468 struct nv_pi_priv {
469 irq_handler_t irq_handler;
470 struct scsi_host_template *sht;
471 };
472
473 #define NV_PI_PRIV(_irq_handler, _sht) \
474 &(struct nv_pi_priv){ .irq_handler = _irq_handler, .sht = _sht }
475
476 static const struct ata_port_info nv_port_info[] = {
477 /* generic */
478 {
479 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
480 .pio_mask = NV_PIO_MASK,
481 .mwdma_mask = NV_MWDMA_MASK,
482 .udma_mask = NV_UDMA_MASK,
483 .port_ops = &nv_generic_ops,
484 .private_data = NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
485 },
486 /* nforce2/3 */
487 {
488 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
489 .pio_mask = NV_PIO_MASK,
490 .mwdma_mask = NV_MWDMA_MASK,
491 .udma_mask = NV_UDMA_MASK,
492 .port_ops = &nv_nf2_ops,
493 .private_data = NV_PI_PRIV(nv_nf2_interrupt, &nv_sht),
494 },
495 /* ck804 */
496 {
497 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
498 .pio_mask = NV_PIO_MASK,
499 .mwdma_mask = NV_MWDMA_MASK,
500 .udma_mask = NV_UDMA_MASK,
501 .port_ops = &nv_ck804_ops,
502 .private_data = NV_PI_PRIV(nv_ck804_interrupt, &nv_sht),
503 },
504 /* ADMA */
505 {
506 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
507 ATA_FLAG_MMIO | ATA_FLAG_NCQ,
508 .pio_mask = NV_PIO_MASK,
509 .mwdma_mask = NV_MWDMA_MASK,
510 .udma_mask = NV_UDMA_MASK,
511 .port_ops = &nv_adma_ops,
512 .private_data = NV_PI_PRIV(nv_adma_interrupt, &nv_adma_sht),
513 },
514 /* SWNCQ */
515 {
516 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
517 ATA_FLAG_NCQ,
518 .pio_mask = NV_PIO_MASK,
519 .mwdma_mask = NV_MWDMA_MASK,
520 .udma_mask = NV_UDMA_MASK,
521 .port_ops = &nv_swncq_ops,
522 .private_data = NV_PI_PRIV(nv_swncq_interrupt, &nv_swncq_sht),
523 },
524 };
525
526 MODULE_AUTHOR("NVIDIA");
527 MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
528 MODULE_LICENSE("GPL");
529 MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
530 MODULE_VERSION(DRV_VERSION);
531
532 static int adma_enabled;
533 static int swncq_enabled = 1;
534
535 static void nv_adma_register_mode(struct ata_port *ap)
536 {
537 struct nv_adma_port_priv *pp = ap->private_data;
538 void __iomem *mmio = pp->ctl_block;
539 u16 tmp, status;
540 int count = 0;
541
542 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
543 return;
544
545 status = readw(mmio + NV_ADMA_STAT);
546 while (!(status & NV_ADMA_STAT_IDLE) && count < 20) {
547 ndelay(50);
548 status = readw(mmio + NV_ADMA_STAT);
549 count++;
550 }
551 if (count == 20)
552 ata_port_printk(ap, KERN_WARNING,
553 "timeout waiting for ADMA IDLE, stat=0x%hx\n",
554 status);
555
556 tmp = readw(mmio + NV_ADMA_CTL);
557 writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
558
559 count = 0;
560 status = readw(mmio + NV_ADMA_STAT);
561 while (!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
562 ndelay(50);
563 status = readw(mmio + NV_ADMA_STAT);
564 count++;
565 }
566 if (count == 20)
567 ata_port_printk(ap, KERN_WARNING,
568 "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
569 status);
570
571 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
572 }
573
574 static void nv_adma_mode(struct ata_port *ap)
575 {
576 struct nv_adma_port_priv *pp = ap->private_data;
577 void __iomem *mmio = pp->ctl_block;
578 u16 tmp, status;
579 int count = 0;
580
581 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
582 return;
583
584 WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
585
586 tmp = readw(mmio + NV_ADMA_CTL);
587 writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
588
589 status = readw(mmio + NV_ADMA_STAT);
590 while (((status & NV_ADMA_STAT_LEGACY) ||
591 !(status & NV_ADMA_STAT_IDLE)) && count < 20) {
592 ndelay(50);
593 status = readw(mmio + NV_ADMA_STAT);
594 count++;
595 }
596 if (count == 20)
597 ata_port_printk(ap, KERN_WARNING,
598 "timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
599 status);
600
601 pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
602 }
603
604 static int nv_adma_slave_config(struct scsi_device *sdev)
605 {
606 struct ata_port *ap = ata_shost_to_port(sdev->host);
607 struct nv_adma_port_priv *pp = ap->private_data;
608 struct nv_adma_port_priv *port0, *port1;
609 struct scsi_device *sdev0, *sdev1;
610 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
611 unsigned long segment_boundary, flags;
612 unsigned short sg_tablesize;
613 int rc;
614 int adma_enable;
615 u32 current_reg, new_reg, config_mask;
616
617 rc = ata_scsi_slave_config(sdev);
618
619 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
620 /* Not a proper libata device, ignore */
621 return rc;
622
623 spin_lock_irqsave(ap->lock, flags);
624
625 if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) {
626 /*
627 * NVIDIA reports that ADMA mode does not support ATAPI commands.
628 * Therefore ATAPI commands are sent through the legacy interface.
629 * However, the legacy interface only supports 32-bit DMA.
630 * Restrict DMA parameters as required by the legacy interface
631 * when an ATAPI device is connected.
632 */
633 segment_boundary = ATA_DMA_BOUNDARY;
634 /* Subtract 1 since an extra entry may be needed for padding, see
635 libata-scsi.c */
636 sg_tablesize = LIBATA_MAX_PRD - 1;
637
638 /* Since the legacy DMA engine is in use, we need to disable ADMA
639 on the port. */
640 adma_enable = 0;
641 nv_adma_register_mode(ap);
642 } else {
643 segment_boundary = NV_ADMA_DMA_BOUNDARY;
644 sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
645 adma_enable = 1;
646 }
647
648 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg);
649
650 if (ap->port_no == 1)
651 config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
652 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
653 else
654 config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
655 NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
656
657 if (adma_enable) {
658 new_reg = current_reg | config_mask;
659 pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
660 } else {
661 new_reg = current_reg & ~config_mask;
662 pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
663 }
664
665 if (current_reg != new_reg)
666 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
667
668 port0 = ap->host->ports[0]->private_data;
669 port1 = ap->host->ports[1]->private_data;
670 sdev0 = ap->host->ports[0]->link.device[0].sdev;
671 sdev1 = ap->host->ports[1]->link.device[0].sdev;
672 if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
673 (port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
674 /** We have to set the DMA mask to 32-bit if either port is in
675 ATAPI mode, since they are on the same PCI device which is
676 used for DMA mapping. If we set the mask we also need to set
677 the bounce limit on both ports to ensure that the block
678 layer doesn't feed addresses that cause DMA mapping to
679 choke. If either SCSI device is not allocated yet, it's OK
680 since that port will discover its correct setting when it
681 does get allocated.
682 Note: Setting 32-bit mask should not fail. */
683 if (sdev0)
684 blk_queue_bounce_limit(sdev0->request_queue,
685 ATA_DMA_MASK);
686 if (sdev1)
687 blk_queue_bounce_limit(sdev1->request_queue,
688 ATA_DMA_MASK);
689
690 pci_set_dma_mask(pdev, ATA_DMA_MASK);
691 } else {
692 /** This shouldn't fail as it was set to this value before */
693 pci_set_dma_mask(pdev, pp->adma_dma_mask);
694 if (sdev0)
695 blk_queue_bounce_limit(sdev0->request_queue,
696 pp->adma_dma_mask);
697 if (sdev1)
698 blk_queue_bounce_limit(sdev1->request_queue,
699 pp->adma_dma_mask);
700 }
701
702 blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
703 blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize);
704 ata_port_printk(ap, KERN_INFO,
705 "DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
706 (unsigned long long)*ap->host->dev->dma_mask,
707 segment_boundary, sg_tablesize);
708
709 spin_unlock_irqrestore(ap->lock, flags);
710
711 return rc;
712 }
713
714 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
715 {
716 struct nv_adma_port_priv *pp = qc->ap->private_data;
717 return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
718 }
719
720 static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
721 {
722 /* Other than when internal or pass-through commands are executed,
723 the only time this function will be called in ADMA mode will be
724 if a command fails. In the failure case we don't care about going
725 into register mode with ADMA commands pending, as the commands will
726 all shortly be aborted anyway. We assume that NCQ commands are not
727 issued via passthrough, which is the only way that switching into
728 ADMA mode could abort outstanding commands. */
729 nv_adma_register_mode(ap);
730
731 ata_sff_tf_read(ap, tf);
732 }
733
734 static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
735 {
736 unsigned int idx = 0;
737
738 if (tf->flags & ATA_TFLAG_ISADDR) {
739 if (tf->flags & ATA_TFLAG_LBA48) {
740 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->hob_feature | WNB);
741 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
742 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->hob_lbal);
743 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->hob_lbam);
744 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->hob_lbah);
745 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature);
746 } else
747 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature | WNB);
748
749 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->nsect);
750 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->lbal);
751 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->lbam);
752 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->lbah);
753 }
754
755 if (tf->flags & ATA_TFLAG_DEVICE)
756 cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device);
757
758 cpb[idx++] = cpu_to_le16((ATA_REG_CMD << 8) | tf->command | CMDEND);
759
760 while (idx < 12)
761 cpb[idx++] = cpu_to_le16(IGN);
762
763 return idx;
764 }
765
766 static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
767 {
768 struct nv_adma_port_priv *pp = ap->private_data;
769 u8 flags = pp->cpb[cpb_num].resp_flags;
770
771 VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
772
773 if (unlikely((force_err ||
774 flags & (NV_CPB_RESP_ATA_ERR |
775 NV_CPB_RESP_CMD_ERR |
776 NV_CPB_RESP_CPB_ERR)))) {
777 struct ata_eh_info *ehi = &ap->link.eh_info;
778 int freeze = 0;
779
780 ata_ehi_clear_desc(ehi);
781 __ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x: ", flags);
782 if (flags & NV_CPB_RESP_ATA_ERR) {
783 ata_ehi_push_desc(ehi, "ATA error");
784 ehi->err_mask |= AC_ERR_DEV;
785 } else if (flags & NV_CPB_RESP_CMD_ERR) {
786 ata_ehi_push_desc(ehi, "CMD error");
787 ehi->err_mask |= AC_ERR_DEV;
788 } else if (flags & NV_CPB_RESP_CPB_ERR) {
789 ata_ehi_push_desc(ehi, "CPB error");
790 ehi->err_mask |= AC_ERR_SYSTEM;
791 freeze = 1;
792 } else {
793 /* notifier error, but no error in CPB flags? */
794 ata_ehi_push_desc(ehi, "unknown");
795 ehi->err_mask |= AC_ERR_OTHER;
796 freeze = 1;
797 }
798 /* Kill all commands. EH will determine what actually failed. */
799 if (freeze)
800 ata_port_freeze(ap);
801 else
802 ata_port_abort(ap);
803 return 1;
804 }
805
806 if (likely(flags & NV_CPB_RESP_DONE)) {
807 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num);
808 VPRINTK("CPB flags done, flags=0x%x\n", flags);
809 if (likely(qc)) {
810 DPRINTK("Completing qc from tag %d\n", cpb_num);
811 ata_qc_complete(qc);
812 } else {
813 struct ata_eh_info *ehi = &ap->link.eh_info;
814 /* Notifier bits set without a command may indicate the drive
815 is misbehaving. Raise host state machine violation on this
816 condition. */
817 ata_port_printk(ap, KERN_ERR,
818 "notifier for tag %d with no cmd?\n",
819 cpb_num);
820 ehi->err_mask |= AC_ERR_HSM;
821 ehi->action |= ATA_EH_RESET;
822 ata_port_freeze(ap);
823 return 1;
824 }
825 }
826 return 0;
827 }
828
829 static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
830 {
831 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
832
833 /* freeze if hotplugged */
834 if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
835 ata_port_freeze(ap);
836 return 1;
837 }
838
839 /* bail out if not our interrupt */
840 if (!(irq_stat & NV_INT_DEV))
841 return 0;
842
843 /* DEV interrupt w/ no active qc? */
844 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
845 ata_sff_check_status(ap);
846 return 1;
847 }
848
849 /* handle interrupt */
850 return ata_sff_host_intr(ap, qc);
851 }
852
853 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
854 {
855 struct ata_host *host = dev_instance;
856 int i, handled = 0;
857 u32 notifier_clears[2];
858
859 spin_lock(&host->lock);
860
861 for (i = 0; i < host->n_ports; i++) {
862 struct ata_port *ap = host->ports[i];
863 notifier_clears[i] = 0;
864
865 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
866 struct nv_adma_port_priv *pp = ap->private_data;
867 void __iomem *mmio = pp->ctl_block;
868 u16 status;
869 u32 gen_ctl;
870 u32 notifier, notifier_error;
871
872 /* if ADMA is disabled, use standard ata interrupt handler */
873 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
874 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
875 >> (NV_INT_PORT_SHIFT * i);
876 handled += nv_host_intr(ap, irq_stat);
877 continue;
878 }
879
880 /* if in ATA register mode, check for standard interrupts */
881 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
882 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
883 >> (NV_INT_PORT_SHIFT * i);
884 if (ata_tag_valid(ap->link.active_tag))
885 /** NV_INT_DEV indication seems unreliable at times
886 at least in ADMA mode. Force it on always when a
887 command is active, to prevent losing interrupts. */
888 irq_stat |= NV_INT_DEV;
889 handled += nv_host_intr(ap, irq_stat);
890 }
891
892 notifier = readl(mmio + NV_ADMA_NOTIFIER);
893 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
894 notifier_clears[i] = notifier | notifier_error;
895
896 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
897
898 if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
899 !notifier_error)
900 /* Nothing to do */
901 continue;
902
903 status = readw(mmio + NV_ADMA_STAT);
904
905 /* Clear status. Ensure the controller sees the clearing before we start
906 looking at any of the CPB statuses, so that any CPB completions after
907 this point in the handler will raise another interrupt. */
908 writew(status, mmio + NV_ADMA_STAT);
909 readw(mmio + NV_ADMA_STAT); /* flush posted write */
910 rmb();
911
912 handled++; /* irq handled if we got here */
913
914 /* freeze if hotplugged or controller error */
915 if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
916 NV_ADMA_STAT_HOTUNPLUG |
917 NV_ADMA_STAT_TIMEOUT |
918 NV_ADMA_STAT_SERROR))) {
919 struct ata_eh_info *ehi = &ap->link.eh_info;
920
921 ata_ehi_clear_desc(ehi);
922 __ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status);
923 if (status & NV_ADMA_STAT_TIMEOUT) {
924 ehi->err_mask |= AC_ERR_SYSTEM;
925 ata_ehi_push_desc(ehi, "timeout");
926 } else if (status & NV_ADMA_STAT_HOTPLUG) {
927 ata_ehi_hotplugged(ehi);
928 ata_ehi_push_desc(ehi, "hotplug");
929 } else if (status & NV_ADMA_STAT_HOTUNPLUG) {
930 ata_ehi_hotplugged(ehi);
931 ata_ehi_push_desc(ehi, "hot unplug");
932 } else if (status & NV_ADMA_STAT_SERROR) {
933 /* let libata analyze SError and figure out the cause */
934 ata_ehi_push_desc(ehi, "SError");
935 } else
936 ata_ehi_push_desc(ehi, "unknown");
937 ata_port_freeze(ap);
938 continue;
939 }
940
941 if (status & (NV_ADMA_STAT_DONE |
942 NV_ADMA_STAT_CPBERR |
943 NV_ADMA_STAT_CMD_COMPLETE)) {
944 u32 check_commands = notifier_clears[i];
945 int pos, error = 0;
946
947 if (status & NV_ADMA_STAT_CPBERR) {
948 /* Check all active commands */
949 if (ata_tag_valid(ap->link.active_tag))
950 check_commands = 1 <<
951 ap->link.active_tag;
952 else
953 check_commands = ap->
954 link.sactive;
955 }
956
957 /** Check CPBs for completed commands */
958 while ((pos = ffs(check_commands)) && !error) {
959 pos--;
960 error = nv_adma_check_cpb(ap, pos,
961 notifier_error & (1 << pos));
962 check_commands &= ~(1 << pos);
963 }
964 }
965 }
966 }
967
968 if (notifier_clears[0] || notifier_clears[1]) {
969 /* Note: Both notifier clear registers must be written
970 if either is set, even if one is zero, according to NVIDIA. */
971 struct nv_adma_port_priv *pp = host->ports[0]->private_data;
972 writel(notifier_clears[0], pp->notifier_clear_block);
973 pp = host->ports[1]->private_data;
974 writel(notifier_clears[1], pp->notifier_clear_block);
975 }
976
977 spin_unlock(&host->lock);
978
979 return IRQ_RETVAL(handled);
980 }
981
982 static void nv_adma_freeze(struct ata_port *ap)
983 {
984 struct nv_adma_port_priv *pp = ap->private_data;
985 void __iomem *mmio = pp->ctl_block;
986 u16 tmp;
987
988 nv_ck804_freeze(ap);
989
990 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
991 return;
992
993 /* clear any outstanding CK804 notifications */
994 writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
995 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
996
997 /* Disable interrupt */
998 tmp = readw(mmio + NV_ADMA_CTL);
999 writew(tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1000 mmio + NV_ADMA_CTL);
1001 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1002 }
1003
1004 static void nv_adma_thaw(struct ata_port *ap)
1005 {
1006 struct nv_adma_port_priv *pp = ap->private_data;
1007 void __iomem *mmio = pp->ctl_block;
1008 u16 tmp;
1009
1010 nv_ck804_thaw(ap);
1011
1012 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1013 return;
1014
1015 /* Enable interrupt */
1016 tmp = readw(mmio + NV_ADMA_CTL);
1017 writew(tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1018 mmio + NV_ADMA_CTL);
1019 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1020 }
1021
1022 static void nv_adma_irq_clear(struct ata_port *ap)
1023 {
1024 struct nv_adma_port_priv *pp = ap->private_data;
1025 void __iomem *mmio = pp->ctl_block;
1026 u32 notifier_clears[2];
1027
1028 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
1029 ata_sff_irq_clear(ap);
1030 return;
1031 }
1032
1033 /* clear any outstanding CK804 notifications */
1034 writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
1035 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1036
1037 /* clear ADMA status */
1038 writew(0xffff, mmio + NV_ADMA_STAT);
1039
1040 /* clear notifiers - note both ports need to be written with
1041 something even though we are only clearing on one */
1042 if (ap->port_no == 0) {
1043 notifier_clears[0] = 0xFFFFFFFF;
1044 notifier_clears[1] = 0;
1045 } else {
1046 notifier_clears[0] = 0;
1047 notifier_clears[1] = 0xFFFFFFFF;
1048 }
1049 pp = ap->host->ports[0]->private_data;
1050 writel(notifier_clears[0], pp->notifier_clear_block);
1051 pp = ap->host->ports[1]->private_data;
1052 writel(notifier_clears[1], pp->notifier_clear_block);
1053 }
1054
1055 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
1056 {
1057 struct nv_adma_port_priv *pp = qc->ap->private_data;
1058
1059 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
1060 ata_sff_post_internal_cmd(qc);
1061 }
1062
1063 static int nv_adma_port_start(struct ata_port *ap)
1064 {
1065 struct device *dev = ap->host->dev;
1066 struct nv_adma_port_priv *pp;
1067 int rc;
1068 void *mem;
1069 dma_addr_t mem_dma;
1070 void __iomem *mmio;
1071 struct pci_dev *pdev = to_pci_dev(dev);
1072 u16 tmp;
1073
1074 VPRINTK("ENTER\n");
1075
1076 /* Ensure DMA mask is set to 32-bit before allocating legacy PRD and
1077 pad buffers */
1078 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1079 if (rc)
1080 return rc;
1081 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1082 if (rc)
1083 return rc;
1084
1085 rc = ata_port_start(ap);
1086 if (rc)
1087 return rc;
1088
1089 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1090 if (!pp)
1091 return -ENOMEM;
1092
1093 mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
1094 ap->port_no * NV_ADMA_PORT_SIZE;
1095 pp->ctl_block = mmio;
1096 pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
1097 pp->notifier_clear_block = pp->gen_block +
1098 NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
1099
1100 /* Now that the legacy PRD and padding buffer are allocated we can
1101 safely raise the DMA mask to allocate the CPB/APRD table.
1102 These are allowed to fail since we store the value that ends up
1103 being used to set as the bounce limit in slave_config later if
1104 needed. */
1105 pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1106 pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1107 pp->adma_dma_mask = *dev->dma_mask;
1108
1109 mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
1110 &mem_dma, GFP_KERNEL);
1111 if (!mem)
1112 return -ENOMEM;
1113 memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
1114
1115 /*
1116 * First item in chunk of DMA memory:
1117 * 128-byte command parameter block (CPB)
1118 * one for each command tag
1119 */
1120 pp->cpb = mem;
1121 pp->cpb_dma = mem_dma;
1122
1123 writel(mem_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
1124 writel((mem_dma >> 16) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
1125
1126 mem += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1127 mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1128
1129 /*
1130 * Second item: block of ADMA_SGTBL_LEN s/g entries
1131 */
1132 pp->aprd = mem;
1133 pp->aprd_dma = mem_dma;
1134
1135 ap->private_data = pp;
1136
1137 /* clear any outstanding interrupt conditions */
1138 writew(0xffff, mmio + NV_ADMA_STAT);
1139
1140 /* initialize port variables */
1141 pp->flags = NV_ADMA_PORT_REGISTER_MODE;
1142
1143 /* clear CPB fetch count */
1144 writew(0, mmio + NV_ADMA_CPB_COUNT);
1145
1146 /* clear GO for register mode, enable interrupt */
1147 tmp = readw(mmio + NV_ADMA_CTL);
1148 writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1149 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1150
1151 tmp = readw(mmio + NV_ADMA_CTL);
1152 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1153 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1154 udelay(1);
1155 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1156 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1157
1158 return 0;
1159 }
1160
1161 static void nv_adma_port_stop(struct ata_port *ap)
1162 {
1163 struct nv_adma_port_priv *pp = ap->private_data;
1164 void __iomem *mmio = pp->ctl_block;
1165
1166 VPRINTK("ENTER\n");
1167 writew(0, mmio + NV_ADMA_CTL);
1168 }
1169
1170 #ifdef CONFIG_PM
1171 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
1172 {
1173 struct nv_adma_port_priv *pp = ap->private_data;
1174 void __iomem *mmio = pp->ctl_block;
1175
1176 /* Go to register mode - clears GO */
1177 nv_adma_register_mode(ap);
1178
1179 /* clear CPB fetch count */
1180 writew(0, mmio + NV_ADMA_CPB_COUNT);
1181
1182 /* disable interrupt, shut down port */
1183 writew(0, mmio + NV_ADMA_CTL);
1184
1185 return 0;
1186 }
1187
1188 static int nv_adma_port_resume(struct ata_port *ap)
1189 {
1190 struct nv_adma_port_priv *pp = ap->private_data;
1191 void __iomem *mmio = pp->ctl_block;
1192 u16 tmp;
1193
1194 /* set CPB block location */
1195 writel(pp->cpb_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
1196 writel((pp->cpb_dma >> 16) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
1197
1198 /* clear any outstanding interrupt conditions */
1199 writew(0xffff, mmio + NV_ADMA_STAT);
1200
1201 /* initialize port variables */
1202 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
1203
1204 /* clear CPB fetch count */
1205 writew(0, mmio + NV_ADMA_CPB_COUNT);
1206
1207 /* clear GO for register mode, enable interrupt */
1208 tmp = readw(mmio + NV_ADMA_CTL);
1209 writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1210 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1211
1212 tmp = readw(mmio + NV_ADMA_CTL);
1213 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1214 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1215 udelay(1);
1216 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1217 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1218
1219 return 0;
1220 }
1221 #endif
1222
1223 static void nv_adma_setup_port(struct ata_port *ap)
1224 {
1225 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1226 struct ata_ioports *ioport = &ap->ioaddr;
1227
1228 VPRINTK("ENTER\n");
1229
1230 mmio += NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE;
1231
1232 ioport->cmd_addr = mmio;
1233 ioport->data_addr = mmio + (ATA_REG_DATA * 4);
1234 ioport->error_addr =
1235 ioport->feature_addr = mmio + (ATA_REG_ERR * 4);
1236 ioport->nsect_addr = mmio + (ATA_REG_NSECT * 4);
1237 ioport->lbal_addr = mmio + (ATA_REG_LBAL * 4);
1238 ioport->lbam_addr = mmio + (ATA_REG_LBAM * 4);
1239 ioport->lbah_addr = mmio + (ATA_REG_LBAH * 4);
1240 ioport->device_addr = mmio + (ATA_REG_DEVICE * 4);
1241 ioport->status_addr =
1242 ioport->command_addr = mmio + (ATA_REG_STATUS * 4);
1243 ioport->altstatus_addr =
1244 ioport->ctl_addr = mmio + 0x20;
1245 }
1246
1247 static int nv_adma_host_init(struct ata_host *host)
1248 {
1249 struct pci_dev *pdev = to_pci_dev(host->dev);
1250 unsigned int i;
1251 u32 tmp32;
1252
1253 VPRINTK("ENTER\n");
1254
1255 /* enable ADMA on the ports */
1256 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1257 tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1258 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1259 NV_MCP_SATA_CFG_20_PORT1_EN |
1260 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1261
1262 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1263
1264 for (i = 0; i < host->n_ports; i++)
1265 nv_adma_setup_port(host->ports[i]);
1266
1267 return 0;
1268 }
1269
1270 static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1271 struct scatterlist *sg,
1272 int idx,
1273 struct nv_adma_prd *aprd)
1274 {
1275 u8 flags = 0;
1276 if (qc->tf.flags & ATA_TFLAG_WRITE)
1277 flags |= NV_APRD_WRITE;
1278 if (idx == qc->n_elem - 1)
1279 flags |= NV_APRD_END;
1280 else if (idx != 4)
1281 flags |= NV_APRD_CONT;
1282
1283 aprd->addr = cpu_to_le64(((u64)sg_dma_address(sg)));
1284 aprd->len = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
1285 aprd->flags = flags;
1286 aprd->packet_len = 0;
1287 }
1288
1289 static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1290 {
1291 struct nv_adma_port_priv *pp = qc->ap->private_data;
1292 struct nv_adma_prd *aprd;
1293 struct scatterlist *sg;
1294 unsigned int si;
1295
1296 VPRINTK("ENTER\n");
1297
1298 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1299 aprd = (si < 5) ? &cpb->aprd[si] :
1300 &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (si-5)];
1301 nv_adma_fill_aprd(qc, sg, si, aprd);
1302 }
1303 if (si > 5)
1304 cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));
1305 else
1306 cpb->next_aprd = cpu_to_le64(0);
1307 }
1308
1309 static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
1310 {
1311 struct nv_adma_port_priv *pp = qc->ap->private_data;
1312
1313 /* ADMA engine can only be used for non-ATAPI DMA commands,
1314 or interrupt-driven no-data commands. */
1315 if ((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
1316 (qc->tf.flags & ATA_TFLAG_POLLING))
1317 return 1;
1318
1319 if ((qc->flags & ATA_QCFLAG_DMAMAP) ||
1320 (qc->tf.protocol == ATA_PROT_NODATA))
1321 return 0;
1322
1323 return 1;
1324 }
1325
1326 static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1327 {
1328 struct nv_adma_port_priv *pp = qc->ap->private_data;
1329 struct nv_adma_cpb *cpb = &pp->cpb[qc->tag];
1330 u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
1331 NV_CPB_CTL_IEN;
1332
1333 if (nv_adma_use_reg_mode(qc)) {
1334 BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1335 (qc->flags & ATA_QCFLAG_DMAMAP));
1336 nv_adma_register_mode(qc->ap);
1337 ata_sff_qc_prep(qc);
1338 return;
1339 }
1340
1341 cpb->resp_flags = NV_CPB_RESP_DONE;
1342 wmb();
1343 cpb->ctl_flags = 0;
1344 wmb();
1345
1346 cpb->len = 3;
1347 cpb->tag = qc->tag;
1348 cpb->next_cpb_idx = 0;
1349
1350 /* turn on NCQ flags for NCQ commands */
1351 if (qc->tf.protocol == ATA_PROT_NCQ)
1352 ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1353
1354 VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1355
1356 nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1357
1358 if (qc->flags & ATA_QCFLAG_DMAMAP) {
1359 nv_adma_fill_sg(qc, cpb);
1360 ctl_flags |= NV_CPB_CTL_APRD_VALID;
1361 } else
1362 memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
1363
1364 /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID
1365 until we are finished filling in all of the contents */
1366 wmb();
1367 cpb->ctl_flags = ctl_flags;
1368 wmb();
1369 cpb->resp_flags = 0;
1370 }
1371
1372 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1373 {
1374 struct nv_adma_port_priv *pp = qc->ap->private_data;
1375 void __iomem *mmio = pp->ctl_block;
1376 int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ);
1377
1378 VPRINTK("ENTER\n");
1379
1380 /* We can't handle result taskfile with NCQ commands, since
1381 retrieving the taskfile switches us out of ADMA mode and would abort
1382 existing commands. */
1383 if (unlikely(qc->tf.protocol == ATA_PROT_NCQ &&
1384 (qc->flags & ATA_QCFLAG_RESULT_TF))) {
1385 ata_dev_printk(qc->dev, KERN_ERR,
1386 "NCQ w/ RESULT_TF not allowed\n");
1387 return AC_ERR_SYSTEM;
1388 }
1389
1390 if (nv_adma_use_reg_mode(qc)) {
1391 /* use ATA register mode */
1392 VPRINTK("using ATA register mode: 0x%lx\n", qc->flags);
1393 BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1394 (qc->flags & ATA_QCFLAG_DMAMAP));
1395 nv_adma_register_mode(qc->ap);
1396 return ata_sff_qc_issue(qc);
1397 } else
1398 nv_adma_mode(qc->ap);
1399
1400 /* write append register, command tag in lower 8 bits
1401 and (number of cpbs to append -1) in top 8 bits */
1402 wmb();
1403
1404 if (curr_ncq != pp->last_issue_ncq) {
1405 /* Seems to need some delay before switching between NCQ and
1406 non-NCQ commands, else we get command timeouts and such. */
1407 udelay(20);
1408 pp->last_issue_ncq = curr_ncq;
1409 }
1410
1411 writew(qc->tag, mmio + NV_ADMA_APPEND);
1412
1413 DPRINTK("Issued tag %u\n", qc->tag);
1414
1415 return 0;
1416 }
1417
1418 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1419 {
1420 struct ata_host *host = dev_instance;
1421 unsigned int i;
1422 unsigned int handled = 0;
1423 unsigned long flags;
1424
1425 spin_lock_irqsave(&host->lock, flags);
1426
1427 for (i = 0; i < host->n_ports; i++) {
1428 struct ata_port *ap;
1429
1430 ap = host->ports[i];
1431 if (ap &&
1432 !(ap->flags & ATA_FLAG_DISABLED)) {
1433 struct ata_queued_cmd *qc;
1434
1435 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1436 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
1437 handled += ata_sff_host_intr(ap, qc);
1438 else
1439 // No request pending? Clear interrupt status
1440 // anyway, in case there's one pending.
1441 ap->ops->sff_check_status(ap);
1442 }
1443
1444 }
1445
1446 spin_unlock_irqrestore(&host->lock, flags);
1447
1448 return IRQ_RETVAL(handled);
1449 }
1450
1451 static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
1452 {
1453 int i, handled = 0;
1454
1455 for (i = 0; i < host->n_ports; i++) {
1456 struct ata_port *ap = host->ports[i];
1457
1458 if (ap && !(ap->flags & ATA_FLAG_DISABLED))
1459 handled += nv_host_intr(ap, irq_stat);
1460
1461 irq_stat >>= NV_INT_PORT_SHIFT;
1462 }
1463
1464 return IRQ_RETVAL(handled);
1465 }
1466
1467 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
1468 {
1469 struct ata_host *host = dev_instance;
1470 u8 irq_stat;
1471 irqreturn_t ret;
1472
1473 spin_lock(&host->lock);
1474 irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
1475 ret = nv_do_interrupt(host, irq_stat);
1476 spin_unlock(&host->lock);
1477
1478 return ret;
1479 }
1480
1481 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
1482 {
1483 struct ata_host *host = dev_instance;
1484 u8 irq_stat;
1485 irqreturn_t ret;
1486
1487 spin_lock(&host->lock);
1488 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1489 ret = nv_do_interrupt(host, irq_stat);
1490 spin_unlock(&host->lock);
1491
1492 return ret;
1493 }
1494
1495 static int nv_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val)
1496 {
1497 if (sc_reg > SCR_CONTROL)
1498 return -EINVAL;
1499
1500 *val = ioread32(ap->ioaddr.scr_addr + (sc_reg * 4));
1501 return 0;
1502 }
1503
1504 static int nv_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val)
1505 {
1506 if (sc_reg > SCR_CONTROL)
1507 return -EINVAL;
1508
1509 iowrite32(val, ap->ioaddr.scr_addr + (sc_reg * 4));
1510 return 0;
1511 }
1512
1513 static void nv_nf2_freeze(struct ata_port *ap)
1514 {
1515 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1516 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1517 u8 mask;
1518
1519 mask = ioread8(scr_addr + NV_INT_ENABLE);
1520 mask &= ~(NV_INT_ALL << shift);
1521 iowrite8(mask, scr_addr + NV_INT_ENABLE);
1522 }
1523
1524 static void nv_nf2_thaw(struct ata_port *ap)
1525 {
1526 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1527 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1528 u8 mask;
1529
1530 iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
1531
1532 mask = ioread8(scr_addr + NV_INT_ENABLE);
1533 mask |= (NV_INT_MASK << shift);
1534 iowrite8(mask, scr_addr + NV_INT_ENABLE);
1535 }
1536
1537 static void nv_ck804_freeze(struct ata_port *ap)
1538 {
1539 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1540 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1541 u8 mask;
1542
1543 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1544 mask &= ~(NV_INT_ALL << shift);
1545 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1546 }
1547
1548 static void nv_ck804_thaw(struct ata_port *ap)
1549 {
1550 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1551 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1552 u8 mask;
1553
1554 writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1555
1556 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1557 mask |= (NV_INT_MASK << shift);
1558 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1559 }
1560
1561 static void nv_mcp55_freeze(struct ata_port *ap)
1562 {
1563 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1564 int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1565 u32 mask;
1566
1567 writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1568
1569 mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1570 mask &= ~(NV_INT_ALL_MCP55 << shift);
1571 writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1572 ata_sff_freeze(ap);
1573 }
1574
1575 static void nv_mcp55_thaw(struct ata_port *ap)
1576 {
1577 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1578 int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1579 u32 mask;
1580
1581 writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1582
1583 mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1584 mask |= (NV_INT_MASK_MCP55 << shift);
1585 writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1586 ata_sff_thaw(ap);
1587 }
1588
1589 static void nv_adma_error_handler(struct ata_port *ap)
1590 {
1591 struct nv_adma_port_priv *pp = ap->private_data;
1592 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
1593 void __iomem *mmio = pp->ctl_block;
1594 int i;
1595 u16 tmp;
1596
1597 if (ata_tag_valid(ap->link.active_tag) || ap->link.sactive) {
1598 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1599 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1600 u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
1601 u32 status = readw(mmio + NV_ADMA_STAT);
1602 u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
1603 u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
1604
1605 ata_port_printk(ap, KERN_ERR,
1606 "EH in ADMA mode, notifier 0x%X "
1607 "notifier_error 0x%X gen_ctl 0x%X status 0x%X "
1608 "next cpb count 0x%X next cpb idx 0x%x\n",
1609 notifier, notifier_error, gen_ctl, status,
1610 cpb_count, next_cpb_idx);
1611
1612 for (i = 0; i < NV_ADMA_MAX_CPBS; i++) {
1613 struct nv_adma_cpb *cpb = &pp->cpb[i];
1614 if ((ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) ||
1615 ap->link.sactive & (1 << i))
1616 ata_port_printk(ap, KERN_ERR,
1617 "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1618 i, cpb->ctl_flags, cpb->resp_flags);
1619 }
1620 }
1621
1622 /* Push us back into port register mode for error handling. */
1623 nv_adma_register_mode(ap);
1624
1625 /* Mark all of the CPBs as invalid to prevent them from
1626 being executed */
1627 for (i = 0; i < NV_ADMA_MAX_CPBS; i++)
1628 pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1629
1630 /* clear CPB fetch count */
1631 writew(0, mmio + NV_ADMA_CPB_COUNT);
1632
1633 /* Reset channel */
1634 tmp = readw(mmio + NV_ADMA_CTL);
1635 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1636 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1637 udelay(1);
1638 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1639 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1640 }
1641
1642 ata_sff_error_handler(ap);
1643 }
1644
1645 static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc)
1646 {
1647 struct nv_swncq_port_priv *pp = ap->private_data;
1648 struct defer_queue *dq = &pp->defer_queue;
1649
1650 /* queue is full */
1651 WARN_ON(dq->tail - dq->head == ATA_MAX_QUEUE);
1652 dq->defer_bits |= (1 << qc->tag);
1653 dq->tag[dq->tail++ & (ATA_MAX_QUEUE - 1)] = qc->tag;
1654 }
1655
1656 static struct ata_queued_cmd *nv_swncq_qc_from_dq(struct ata_port *ap)
1657 {
1658 struct nv_swncq_port_priv *pp = ap->private_data;
1659 struct defer_queue *dq = &pp->defer_queue;
1660 unsigned int tag;
1661
1662 if (dq->head == dq->tail) /* null queue */
1663 return NULL;
1664
1665 tag = dq->tag[dq->head & (ATA_MAX_QUEUE - 1)];
1666 dq->tag[dq->head++ & (ATA_MAX_QUEUE - 1)] = ATA_TAG_POISON;
1667 WARN_ON(!(dq->defer_bits & (1 << tag)));
1668 dq->defer_bits &= ~(1 << tag);
1669
1670 return ata_qc_from_tag(ap, tag);
1671 }
1672
1673 static void nv_swncq_fis_reinit(struct ata_port *ap)
1674 {
1675 struct nv_swncq_port_priv *pp = ap->private_data;
1676
1677 pp->dhfis_bits = 0;
1678 pp->dmafis_bits = 0;
1679 pp->sdbfis_bits = 0;
1680 pp->ncq_flags = 0;
1681 }
1682
1683 static void nv_swncq_pp_reinit(struct ata_port *ap)
1684 {
1685 struct nv_swncq_port_priv *pp = ap->private_data;
1686 struct defer_queue *dq = &pp->defer_queue;
1687
1688 dq->head = 0;
1689 dq->tail = 0;
1690 dq->defer_bits = 0;
1691 pp->qc_active = 0;
1692 pp->last_issue_tag = ATA_TAG_POISON;
1693 nv_swncq_fis_reinit(ap);
1694 }
1695
1696 static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis)
1697 {
1698 struct nv_swncq_port_priv *pp = ap->private_data;
1699
1700 writew(fis, pp->irq_block);
1701 }
1702
1703 static void __ata_bmdma_stop(struct ata_port *ap)
1704 {
1705 struct ata_queued_cmd qc;
1706
1707 qc.ap = ap;
1708 ata_bmdma_stop(&qc);
1709 }
1710
1711 static void nv_swncq_ncq_stop(struct ata_port *ap)
1712 {
1713 struct nv_swncq_port_priv *pp = ap->private_data;
1714 unsigned int i;
1715 u32 sactive;
1716 u32 done_mask;
1717
1718 ata_port_printk(ap, KERN_ERR,
1719 "EH in SWNCQ mode,QC:qc_active 0x%X sactive 0x%X\n",
1720 ap->qc_active, ap->link.sactive);
1721 ata_port_printk(ap, KERN_ERR,
1722 "SWNCQ:qc_active 0x%X defer_bits 0x%X last_issue_tag 0x%x\n "
1723 "dhfis 0x%X dmafis 0x%X sdbfis 0x%X\n",
1724 pp->qc_active, pp->defer_queue.defer_bits, pp->last_issue_tag,
1725 pp->dhfis_bits, pp->dmafis_bits, pp->sdbfis_bits);
1726
1727 ata_port_printk(ap, KERN_ERR, "ATA_REG 0x%X ERR_REG 0x%X\n",
1728 ap->ops->sff_check_status(ap),
1729 ioread8(ap->ioaddr.error_addr));
1730
1731 sactive = readl(pp->sactive_block);
1732 done_mask = pp->qc_active ^ sactive;
1733
1734 ata_port_printk(ap, KERN_ERR, "tag : dhfis dmafis sdbfis sacitve\n");
1735 for (i = 0; i < ATA_MAX_QUEUE; i++) {
1736 u8 err = 0;
1737 if (pp->qc_active & (1 << i))
1738 err = 0;
1739 else if (done_mask & (1 << i))
1740 err = 1;
1741 else
1742 continue;
1743
1744 ata_port_printk(ap, KERN_ERR,
1745 "tag 0x%x: %01x %01x %01x %01x %s\n", i,
1746 (pp->dhfis_bits >> i) & 0x1,
1747 (pp->dmafis_bits >> i) & 0x1,
1748 (pp->sdbfis_bits >> i) & 0x1,
1749 (sactive >> i) & 0x1,
1750 (err ? "error! tag doesn't exit" : " "));
1751 }
1752
1753 nv_swncq_pp_reinit(ap);
1754 ap->ops->sff_irq_clear(ap);
1755 __ata_bmdma_stop(ap);
1756 nv_swncq_irq_clear(ap, 0xffff);
1757 }
1758
1759 static void nv_swncq_error_handler(struct ata_port *ap)
1760 {
1761 struct ata_eh_context *ehc = &ap->link.eh_context;
1762
1763 if (ap->link.sactive) {
1764 nv_swncq_ncq_stop(ap);
1765 ehc->i.action |= ATA_EH_RESET;
1766 }
1767
1768 ata_sff_error_handler(ap);
1769 }
1770
1771 #ifdef CONFIG_PM
1772 static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg)
1773 {
1774 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1775 u32 tmp;
1776
1777 /* clear irq */
1778 writel(~0, mmio + NV_INT_STATUS_MCP55);
1779
1780 /* disable irq */
1781 writel(0, mmio + NV_INT_ENABLE_MCP55);
1782
1783 /* disable swncq */
1784 tmp = readl(mmio + NV_CTL_MCP55);
1785 tmp &= ~(NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ);
1786 writel(tmp, mmio + NV_CTL_MCP55);
1787
1788 return 0;
1789 }
1790
1791 static int nv_swncq_port_resume(struct ata_port *ap)
1792 {
1793 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1794 u32 tmp;
1795
1796 /* clear irq */
1797 writel(~0, mmio + NV_INT_STATUS_MCP55);
1798
1799 /* enable irq */
1800 writel(0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1801
1802 /* enable swncq */
1803 tmp = readl(mmio + NV_CTL_MCP55);
1804 writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1805
1806 return 0;
1807 }
1808 #endif
1809
1810 static void nv_swncq_host_init(struct ata_host *host)
1811 {
1812 u32 tmp;
1813 void __iomem *mmio = host->iomap[NV_MMIO_BAR];
1814 struct pci_dev *pdev = to_pci_dev(host->dev);
1815 u8 regval;
1816
1817 /* disable ECO 398 */
1818 pci_read_config_byte(pdev, 0x7f, &regval);
1819 regval &= ~(1 << 7);
1820 pci_write_config_byte(pdev, 0x7f, regval);
1821
1822 /* enable swncq */
1823 tmp = readl(mmio + NV_CTL_MCP55);
1824 VPRINTK("HOST_CTL:0x%X\n", tmp);
1825 writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1826
1827 /* enable irq intr */
1828 tmp = readl(mmio + NV_INT_ENABLE_MCP55);
1829 VPRINTK("HOST_ENABLE:0x%X\n", tmp);
1830 writel(tmp | 0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1831
1832 /* clear port irq */
1833 writel(~0x0, mmio + NV_INT_STATUS_MCP55);
1834 }
1835
1836 static int nv_swncq_slave_config(struct scsi_device *sdev)
1837 {
1838 struct ata_port *ap = ata_shost_to_port(sdev->host);
1839 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
1840 struct ata_device *dev;
1841 int rc;
1842 u8 rev;
1843 u8 check_maxtor = 0;
1844 unsigned char model_num[ATA_ID_PROD_LEN + 1];
1845
1846 rc = ata_scsi_slave_config(sdev);
1847 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
1848 /* Not a proper libata device, ignore */
1849 return rc;
1850
1851 dev = &ap->link.device[sdev->id];
1852 if (!(ap->flags & ATA_FLAG_NCQ) || dev->class == ATA_DEV_ATAPI)
1853 return rc;
1854
1855 /* if MCP51 and Maxtor, then disable ncq */
1856 if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA ||
1857 pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2)
1858 check_maxtor = 1;
1859
1860 /* if MCP55 and rev <= a2 and Maxtor, then disable ncq */
1861 if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA ||
1862 pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2) {
1863 pci_read_config_byte(pdev, 0x8, &rev);
1864 if (rev <= 0xa2)
1865 check_maxtor = 1;
1866 }
1867
1868 if (!check_maxtor)
1869 return rc;
1870
1871 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
1872
1873 if (strncmp(model_num, "Maxtor", 6) == 0) {
1874 ata_scsi_change_queue_depth(sdev, 1);
1875 ata_dev_printk(dev, KERN_NOTICE,
1876 "Disabling SWNCQ mode (depth %x)\n", sdev->queue_depth);
1877 }
1878
1879 return rc;
1880 }
1881
1882 static int nv_swncq_port_start(struct ata_port *ap)
1883 {
1884 struct device *dev = ap->host->dev;
1885 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1886 struct nv_swncq_port_priv *pp;
1887 int rc;
1888
1889 rc = ata_port_start(ap);
1890 if (rc)
1891 return rc;
1892
1893 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1894 if (!pp)
1895 return -ENOMEM;
1896
1897 pp->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE,
1898 &pp->prd_dma, GFP_KERNEL);
1899 if (!pp->prd)
1900 return -ENOMEM;
1901 memset(pp->prd, 0, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE);
1902
1903 ap->private_data = pp;
1904 pp->sactive_block = ap->ioaddr.scr_addr + 4 * SCR_ACTIVE;
1905 pp->irq_block = mmio + NV_INT_STATUS_MCP55 + ap->port_no * 2;
1906 pp->tag_block = mmio + NV_NCQ_REG_MCP55 + ap->port_no * 2;
1907
1908 return 0;
1909 }
1910
1911 static void nv_swncq_qc_prep(struct ata_queued_cmd *qc)
1912 {
1913 if (qc->tf.protocol != ATA_PROT_NCQ) {
1914 ata_sff_qc_prep(qc);
1915 return;
1916 }
1917
1918 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1919 return;
1920
1921 nv_swncq_fill_sg(qc);
1922 }
1923
1924 static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
1925 {
1926 struct ata_port *ap = qc->ap;
1927 struct scatterlist *sg;
1928 struct nv_swncq_port_priv *pp = ap->private_data;
1929 struct ata_prd *prd;
1930 unsigned int si, idx;
1931
1932 prd = pp->prd + ATA_MAX_PRD * qc->tag;
1933
1934 idx = 0;
1935 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1936 u32 addr, offset;
1937 u32 sg_len, len;
1938
1939 addr = (u32)sg_dma_address(sg);
1940 sg_len = sg_dma_len(sg);
1941
1942 while (sg_len) {
1943 offset = addr & 0xffff;
1944 len = sg_len;
1945 if ((offset + sg_len) > 0x10000)
1946 len = 0x10000 - offset;
1947
1948 prd[idx].addr = cpu_to_le32(addr);
1949 prd[idx].flags_len = cpu_to_le32(len & 0xffff);
1950
1951 idx++;
1952 sg_len -= len;
1953 addr += len;
1954 }
1955 }
1956
1957 prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
1958 }
1959
1960 static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap,
1961 struct ata_queued_cmd *qc)
1962 {
1963 struct nv_swncq_port_priv *pp = ap->private_data;
1964
1965 if (qc == NULL)
1966 return 0;
1967
1968 DPRINTK("Enter\n");
1969
1970 writel((1 << qc->tag), pp->sactive_block);
1971 pp->last_issue_tag = qc->tag;
1972 pp->dhfis_bits &= ~(1 << qc->tag);
1973 pp->dmafis_bits &= ~(1 << qc->tag);
1974 pp->qc_active |= (0x1 << qc->tag);
1975
1976 ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
1977 ap->ops->sff_exec_command(ap, &qc->tf);
1978
1979 DPRINTK("Issued tag %u\n", qc->tag);
1980
1981 return 0;
1982 }
1983
1984 static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc)
1985 {
1986 struct ata_port *ap = qc->ap;
1987 struct nv_swncq_port_priv *pp = ap->private_data;
1988
1989 if (qc->tf.protocol != ATA_PROT_NCQ)
1990 return ata_sff_qc_issue(qc);
1991
1992 DPRINTK("Enter\n");
1993
1994 if (!pp->qc_active)
1995 nv_swncq_issue_atacmd(ap, qc);
1996 else
1997 nv_swncq_qc_to_dq(ap, qc); /* add qc to defer queue */
1998
1999 return 0;
2000 }
2001
2002 static void nv_swncq_hotplug(struct ata_port *ap, u32 fis)
2003 {
2004 u32 serror;
2005 struct ata_eh_info *ehi = &ap->link.eh_info;
2006
2007 ata_ehi_clear_desc(ehi);
2008
2009 /* AHCI needs SError cleared; otherwise, it might lock up */
2010 sata_scr_read(&ap->link, SCR_ERROR, &serror);
2011 sata_scr_write(&ap->link, SCR_ERROR, serror);
2012
2013 /* analyze @irq_stat */
2014 if (fis & NV_SWNCQ_IRQ_ADDED)
2015 ata_ehi_push_desc(ehi, "hot plug");
2016 else if (fis & NV_SWNCQ_IRQ_REMOVED)
2017 ata_ehi_push_desc(ehi, "hot unplug");
2018
2019 ata_ehi_hotplugged(ehi);
2020
2021 /* okay, let's hand over to EH */
2022 ehi->serror |= serror;
2023
2024 ata_port_freeze(ap);
2025 }
2026
2027 static int nv_swncq_sdbfis(struct ata_port *ap)
2028 {
2029 struct ata_queued_cmd *qc;
2030 struct nv_swncq_port_priv *pp = ap->private_data;
2031 struct ata_eh_info *ehi = &ap->link.eh_info;
2032 u32 sactive;
2033 int nr_done = 0;
2034 u32 done_mask;
2035 int i;
2036 u8 host_stat;
2037 u8 lack_dhfis = 0;
2038
2039 host_stat = ap->ops->bmdma_status(ap);
2040 if (unlikely(host_stat & ATA_DMA_ERR)) {
2041 /* error when transfering data to/from memory */
2042 ata_ehi_clear_desc(ehi);
2043 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
2044 ehi->err_mask |= AC_ERR_HOST_BUS;
2045 ehi->action |= ATA_EH_RESET;
2046 return -EINVAL;
2047 }
2048
2049 ap->ops->sff_irq_clear(ap);
2050 __ata_bmdma_stop(ap);
2051
2052 sactive = readl(pp->sactive_block);
2053 done_mask = pp->qc_active ^ sactive;
2054
2055 if (unlikely(done_mask & sactive)) {
2056 ata_ehi_clear_desc(ehi);
2057 ata_ehi_push_desc(ehi, "illegal SWNCQ:qc_active transition"
2058 "(%08x->%08x)", pp->qc_active, sactive);
2059 ehi->err_mask |= AC_ERR_HSM;
2060 ehi->action |= ATA_EH_RESET;
2061 return -EINVAL;
2062 }
2063 for (i = 0; i < ATA_MAX_QUEUE; i++) {
2064 if (!(done_mask & (1 << i)))
2065 continue;
2066
2067 qc = ata_qc_from_tag(ap, i);
2068 if (qc) {
2069 ata_qc_complete(qc);
2070 pp->qc_active &= ~(1 << i);
2071 pp->dhfis_bits &= ~(1 << i);
2072 pp->dmafis_bits &= ~(1 << i);
2073 pp->sdbfis_bits |= (1 << i);
2074 nr_done++;
2075 }
2076 }
2077
2078 if (!ap->qc_active) {
2079 DPRINTK("over\n");
2080 nv_swncq_pp_reinit(ap);
2081 return nr_done;
2082 }
2083
2084 if (pp->qc_active & pp->dhfis_bits)
2085 return nr_done;
2086
2087 if ((pp->ncq_flags & ncq_saw_backout) ||
2088 (pp->qc_active ^ pp->dhfis_bits))
2089 /* if the controller cann't get a device to host register FIS,
2090 * The driver needs to reissue the new command.
2091 */
2092 lack_dhfis = 1;
2093
2094 DPRINTK("id 0x%x QC: qc_active 0x%x,"
2095 "SWNCQ:qc_active 0x%X defer_bits %X "
2096 "dhfis 0x%X dmafis 0x%X last_issue_tag %x\n",
2097 ap->print_id, ap->qc_active, pp->qc_active,
2098 pp->defer_queue.defer_bits, pp->dhfis_bits,
2099 pp->dmafis_bits, pp->last_issue_tag);
2100
2101 nv_swncq_fis_reinit(ap);
2102
2103 if (lack_dhfis) {
2104 qc = ata_qc_from_tag(ap, pp->last_issue_tag);
2105 nv_swncq_issue_atacmd(ap, qc);
2106 return nr_done;
2107 }
2108
2109 if (pp->defer_queue.defer_bits) {
2110 /* send deferral queue command */
2111 qc = nv_swncq_qc_from_dq(ap);
2112 WARN_ON(qc == NULL);
2113 nv_swncq_issue_atacmd(ap, qc);
2114 }
2115
2116 return nr_done;
2117 }
2118
2119 static inline u32 nv_swncq_tag(struct ata_port *ap)
2120 {
2121 struct nv_swncq_port_priv *pp = ap->private_data;
2122 u32 tag;
2123
2124 tag = readb(pp->tag_block) >> 2;
2125 return (tag & 0x1f);
2126 }
2127
2128 static int nv_swncq_dmafis(struct ata_port *ap)
2129 {
2130 struct ata_queued_cmd *qc;
2131 unsigned int rw;
2132 u8 dmactl;
2133 u32 tag;
2134 struct nv_swncq_port_priv *pp = ap->private_data;
2135
2136 __ata_bmdma_stop(ap);
2137 tag = nv_swncq_tag(ap);
2138
2139 DPRINTK("dma setup tag 0x%x\n", tag);
2140 qc = ata_qc_from_tag(ap, tag);
2141
2142 if (unlikely(!qc))
2143 return 0;
2144
2145 rw = qc->tf.flags & ATA_TFLAG_WRITE;
2146
2147 /* load PRD table addr. */
2148 iowrite32(pp->prd_dma + ATA_PRD_TBL_SZ * qc->tag,
2149 ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
2150
2151 /* specify data direction, triple-check start bit is clear */
2152 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2153 dmactl &= ~ATA_DMA_WR;
2154 if (!rw)
2155 dmactl |= ATA_DMA_WR;
2156
2157 iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2158
2159 return 1;
2160 }
2161
2162 static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis)
2163 {
2164 struct nv_swncq_port_priv *pp = ap->private_data;
2165 struct ata_queued_cmd *qc;
2166 struct ata_eh_info *ehi = &ap->link.eh_info;
2167 u32 serror;
2168 u8 ata_stat;
2169 int rc = 0;
2170
2171 ata_stat = ap->ops->sff_check_status(ap);
2172 nv_swncq_irq_clear(ap, fis);
2173 if (!fis)
2174 return;
2175
2176 if (ap->pflags & ATA_PFLAG_FROZEN)
2177 return;
2178
2179 if (fis & NV_SWNCQ_IRQ_HOTPLUG) {
2180 nv_swncq_hotplug(ap, fis);
2181 return;
2182 }
2183
2184 if (!pp->qc_active)
2185 return;
2186
2187 if (ap->ops->scr_read(ap, SCR_ERROR, &serror))
2188 return;
2189 ap->ops->scr_write(ap, SCR_ERROR, serror);
2190
2191 if (ata_stat & ATA_ERR) {
2192 ata_ehi_clear_desc(ehi);
2193 ata_ehi_push_desc(ehi, "Ata error. fis:0x%X", fis);
2194 ehi->err_mask |= AC_ERR_DEV;
2195 ehi->serror |= serror;
2196 ehi->action |= ATA_EH_RESET;
2197 ata_port_freeze(ap);
2198 return;
2199 }
2200
2201 if (fis & NV_SWNCQ_IRQ_BACKOUT) {
2202 /* If the IRQ is backout, driver must issue
2203 * the new command again some time later.
2204 */
2205 pp->ncq_flags |= ncq_saw_backout;
2206 }
2207
2208 if (fis & NV_SWNCQ_IRQ_SDBFIS) {
2209 pp->ncq_flags |= ncq_saw_sdb;
2210 DPRINTK("id 0x%x SWNCQ: qc_active 0x%X "
2211 "dhfis 0x%X dmafis 0x%X sactive 0x%X\n",
2212 ap->print_id, pp->qc_active, pp->dhfis_bits,
2213 pp->dmafis_bits, readl(pp->sactive_block));
2214 rc = nv_swncq_sdbfis(ap);
2215 if (rc < 0)
2216 goto irq_error;
2217 }
2218
2219 if (fis & NV_SWNCQ_IRQ_DHREGFIS) {
2220 /* The interrupt indicates the new command
2221 * was transmitted correctly to the drive.
2222 */
2223 pp->dhfis_bits |= (0x1 << pp->last_issue_tag);
2224 pp->ncq_flags |= ncq_saw_d2h;
2225 if (pp->ncq_flags & (ncq_saw_sdb | ncq_saw_backout)) {
2226 ata_ehi_push_desc(ehi, "illegal fis transaction");
2227 ehi->err_mask |= AC_ERR_HSM;
2228 ehi->action |= ATA_EH_RESET;
2229 goto irq_error;
2230 }
2231
2232 if (!(fis & NV_SWNCQ_IRQ_DMASETUP) &&
2233 !(pp->ncq_flags & ncq_saw_dmas)) {
2234 ata_stat = ap->ops->sff_check_status(ap);
2235 if (ata_stat & ATA_BUSY)
2236 goto irq_exit;
2237
2238 if (pp->defer_queue.defer_bits) {
2239 DPRINTK("send next command\n");
2240 qc = nv_swncq_qc_from_dq(ap);
2241 nv_swncq_issue_atacmd(ap, qc);
2242 }
2243 }
2244 }
2245
2246 if (fis & NV_SWNCQ_IRQ_DMASETUP) {
2247 /* program the dma controller with appropriate PRD buffers
2248 * and start the DMA transfer for requested command.
2249 */
2250 pp->dmafis_bits |= (0x1 << nv_swncq_tag(ap));
2251 pp->ncq_flags |= ncq_saw_dmas;
2252 rc = nv_swncq_dmafis(ap);
2253 }
2254
2255 irq_exit:
2256 return;
2257 irq_error:
2258 ata_ehi_push_desc(ehi, "fis:0x%x", fis);
2259 ata_port_freeze(ap);
2260 return;
2261 }
2262
2263 static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance)
2264 {
2265 struct ata_host *host = dev_instance;
2266 unsigned int i;
2267 unsigned int handled = 0;
2268 unsigned long flags;
2269 u32 irq_stat;
2270
2271 spin_lock_irqsave(&host->lock, flags);
2272
2273 irq_stat = readl(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_MCP55);
2274
2275 for (i = 0; i < host->n_ports; i++) {
2276 struct ata_port *ap = host->ports[i];
2277
2278 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
2279 if (ap->link.sactive) {
2280 nv_swncq_host_interrupt(ap, (u16)irq_stat);
2281 handled = 1;
2282 } else {
2283 if (irq_stat) /* reserve Hotplug */
2284 nv_swncq_irq_clear(ap, 0xfff0);
2285
2286 handled += nv_host_intr(ap, (u8)irq_stat);
2287 }
2288 }
2289 irq_stat >>= NV_INT_PORT_SHIFT_MCP55;
2290 }
2291
2292 spin_unlock_irqrestore(&host->lock, flags);
2293
2294 return IRQ_RETVAL(handled);
2295 }
2296
2297 static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2298 {
2299 static int printed_version;
2300 const struct ata_port_info *ppi[] = { NULL, NULL };
2301 struct nv_pi_priv *ipriv;
2302 struct ata_host *host;
2303 struct nv_host_priv *hpriv;
2304 int rc;
2305 u32 bar;
2306 void __iomem *base;
2307 unsigned long type = ent->driver_data;
2308
2309 // Make sure this is a SATA controller by counting the number of bars
2310 // (NVIDIA SATA controllers will always have six bars). Otherwise,
2311 // it's an IDE controller and we ignore it.
2312 for (bar = 0; bar < 6; bar++)
2313 if (pci_resource_start(pdev, bar) == 0)
2314 return -ENODEV;
2315
2316 if (!printed_version++)
2317 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
2318
2319 rc = pcim_enable_device(pdev);
2320 if (rc)
2321 return rc;
2322
2323 /* determine type and allocate host */
2324 if (type == CK804 && adma_enabled) {
2325 dev_printk(KERN_NOTICE, &pdev->dev, "Using ADMA mode\n");
2326 type = ADMA;
2327 }
2328
2329 if (type == SWNCQ) {
2330 if (swncq_enabled)
2331 dev_printk(KERN_NOTICE, &pdev->dev,
2332 "Using SWNCQ mode\n");
2333 else
2334 type = GENERIC;
2335 }
2336
2337 ppi[0] = &nv_port_info[type];
2338 ipriv = ppi[0]->private_data;
2339 rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
2340 if (rc)
2341 return rc;
2342
2343 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2344 if (!hpriv)
2345 return -ENOMEM;
2346 hpriv->type = type;
2347 host->private_data = hpriv;
2348
2349 /* request and iomap NV_MMIO_BAR */
2350 rc = pcim_iomap_regions(pdev, 1 << NV_MMIO_BAR, DRV_NAME);
2351 if (rc)
2352 return rc;
2353
2354 /* configure SCR access */
2355 base = host->iomap[NV_MMIO_BAR];
2356 host->ports[0]->ioaddr.scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
2357 host->ports[1]->ioaddr.scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
2358
2359 /* enable SATA space for CK804 */
2360 if (type >= CK804) {
2361 u8 regval;
2362
2363 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2364 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2365 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2366 }
2367
2368 /* init ADMA */
2369 if (type == ADMA) {
2370 rc = nv_adma_host_init(host);
2371 if (rc)
2372 return rc;
2373 } else if (type == SWNCQ)
2374 nv_swncq_host_init(host);
2375
2376 pci_set_master(pdev);
2377 return ata_host_activate(host, pdev->irq, ipriv->irq_handler,
2378 IRQF_SHARED, ipriv->sht);
2379 }
2380
2381 #ifdef CONFIG_PM
2382 static int nv_pci_device_resume(struct pci_dev *pdev)
2383 {
2384 struct ata_host *host = dev_get_drvdata(&pdev->dev);
2385 struct nv_host_priv *hpriv = host->private_data;
2386 int rc;
2387
2388 rc = ata_pci_device_do_resume(pdev);
2389 if (rc)
2390 return rc;
2391
2392 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2393 if (hpriv->type >= CK804) {
2394 u8 regval;
2395
2396 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2397 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2398 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2399 }
2400 if (hpriv->type == ADMA) {
2401 u32 tmp32;
2402 struct nv_adma_port_priv *pp;
2403 /* enable/disable ADMA on the ports appropriately */
2404 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2405
2406 pp = host->ports[0]->private_data;
2407 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2408 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2409 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2410 else
2411 tmp32 |= (NV_MCP_SATA_CFG_20_PORT0_EN |
2412 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2413 pp = host->ports[1]->private_data;
2414 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2415 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
2416 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2417 else
2418 tmp32 |= (NV_MCP_SATA_CFG_20_PORT1_EN |
2419 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2420
2421 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2422 }
2423 }
2424
2425 ata_host_resume(host);
2426
2427 return 0;
2428 }
2429 #endif
2430
2431 static void nv_ck804_host_stop(struct ata_host *host)
2432 {
2433 struct pci_dev *pdev = to_pci_dev(host->dev);
2434 u8 regval;
2435
2436 /* disable SATA space for CK804 */
2437 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2438 regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2439 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2440 }
2441
2442 static void nv_adma_host_stop(struct ata_host *host)
2443 {
2444 struct pci_dev *pdev = to_pci_dev(host->dev);
2445 u32 tmp32;
2446
2447 /* disable ADMA on the ports */
2448 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2449 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2450 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
2451 NV_MCP_SATA_CFG_20_PORT1_EN |
2452 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2453
2454 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2455
2456 nv_ck804_host_stop(host);
2457 }
2458
2459 static int __init nv_init(void)
2460 {
2461 return pci_register_driver(&nv_pci_driver);
2462 }
2463
2464 static void __exit nv_exit(void)
2465 {
2466 pci_unregister_driver(&nv_pci_driver);
2467 }
2468
2469 module_init(nv_init);
2470 module_exit(nv_exit);
2471 module_param_named(adma, adma_enabled, bool, 0444);
2472 MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: true)");
2473 module_param_named(swncq, swncq_enabled, bool, 0444);
2474 MODULE_PARM_DESC(swncq, "Enable use of SWNCQ (Default: true)");
2475
This page took 0.087587 seconds and 6 git commands to generate.