powerpc: Minor cleanups of kernel virt address space definitions
[deliverable/linux.git] / drivers / ata / sata_nv.c
1 /*
2 * sata_nv.c - NVIDIA nForce SATA
3 *
4 * Copyright 2004 NVIDIA Corp. All rights reserved.
5 * Copyright 2004 Andrew Chew
6 *
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
21 *
22 *
23 * libata documentation is available via 'make {ps|pdf}docs',
24 * as Documentation/DocBook/libata.*
25 *
26 * No hardware documentation available outside of NVIDIA.
27 * This driver programs the NVIDIA SATA controller in a similar
28 * fashion as with other PCI IDE BMDMA controllers, with a few
29 * NV-specific details such as register offsets, SATA phy location,
30 * hotplug info, etc.
31 *
32 * CK804/MCP04 controllers support an alternate programming interface
33 * similar to the ADMA specification (with some modifications).
34 * This allows the use of NCQ. Non-DMA-mapped ATA commands are still
35 * sent through the legacy interface.
36 *
37 */
38
39 #include <linux/kernel.h>
40 #include <linux/module.h>
41 #include <linux/pci.h>
42 #include <linux/init.h>
43 #include <linux/blkdev.h>
44 #include <linux/delay.h>
45 #include <linux/interrupt.h>
46 #include <linux/device.h>
47 #include <scsi/scsi_host.h>
48 #include <scsi/scsi_device.h>
49 #include <linux/libata.h>
50
51 #define DRV_NAME "sata_nv"
52 #define DRV_VERSION "3.5"
53
54 #define NV_ADMA_DMA_BOUNDARY 0xffffffffUL
55
56 enum {
57 NV_MMIO_BAR = 5,
58
59 NV_PORTS = 2,
60 NV_PIO_MASK = ATA_PIO4,
61 NV_MWDMA_MASK = ATA_MWDMA2,
62 NV_UDMA_MASK = ATA_UDMA6,
63 NV_PORT0_SCR_REG_OFFSET = 0x00,
64 NV_PORT1_SCR_REG_OFFSET = 0x40,
65
66 /* INT_STATUS/ENABLE */
67 NV_INT_STATUS = 0x10,
68 NV_INT_ENABLE = 0x11,
69 NV_INT_STATUS_CK804 = 0x440,
70 NV_INT_ENABLE_CK804 = 0x441,
71
72 /* INT_STATUS/ENABLE bits */
73 NV_INT_DEV = 0x01,
74 NV_INT_PM = 0x02,
75 NV_INT_ADDED = 0x04,
76 NV_INT_REMOVED = 0x08,
77
78 NV_INT_PORT_SHIFT = 4, /* each port occupies 4 bits */
79
80 NV_INT_ALL = 0x0f,
81 NV_INT_MASK = NV_INT_DEV |
82 NV_INT_ADDED | NV_INT_REMOVED,
83
84 /* INT_CONFIG */
85 NV_INT_CONFIG = 0x12,
86 NV_INT_CONFIG_METHD = 0x01, // 0 = INT, 1 = SMI
87
88 // For PCI config register 20
89 NV_MCP_SATA_CFG_20 = 0x50,
90 NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
91 NV_MCP_SATA_CFG_20_PORT0_EN = (1 << 17),
92 NV_MCP_SATA_CFG_20_PORT1_EN = (1 << 16),
93 NV_MCP_SATA_CFG_20_PORT0_PWB_EN = (1 << 14),
94 NV_MCP_SATA_CFG_20_PORT1_PWB_EN = (1 << 12),
95
96 NV_ADMA_MAX_CPBS = 32,
97 NV_ADMA_CPB_SZ = 128,
98 NV_ADMA_APRD_SZ = 16,
99 NV_ADMA_SGTBL_LEN = (1024 - NV_ADMA_CPB_SZ) /
100 NV_ADMA_APRD_SZ,
101 NV_ADMA_SGTBL_TOTAL_LEN = NV_ADMA_SGTBL_LEN + 5,
102 NV_ADMA_SGTBL_SZ = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
103 NV_ADMA_PORT_PRIV_DMA_SZ = NV_ADMA_MAX_CPBS *
104 (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
105
106 /* BAR5 offset to ADMA general registers */
107 NV_ADMA_GEN = 0x400,
108 NV_ADMA_GEN_CTL = 0x00,
109 NV_ADMA_NOTIFIER_CLEAR = 0x30,
110
111 /* BAR5 offset to ADMA ports */
112 NV_ADMA_PORT = 0x480,
113
114 /* size of ADMA port register space */
115 NV_ADMA_PORT_SIZE = 0x100,
116
117 /* ADMA port registers */
118 NV_ADMA_CTL = 0x40,
119 NV_ADMA_CPB_COUNT = 0x42,
120 NV_ADMA_NEXT_CPB_IDX = 0x43,
121 NV_ADMA_STAT = 0x44,
122 NV_ADMA_CPB_BASE_LOW = 0x48,
123 NV_ADMA_CPB_BASE_HIGH = 0x4C,
124 NV_ADMA_APPEND = 0x50,
125 NV_ADMA_NOTIFIER = 0x68,
126 NV_ADMA_NOTIFIER_ERROR = 0x6C,
127
128 /* NV_ADMA_CTL register bits */
129 NV_ADMA_CTL_HOTPLUG_IEN = (1 << 0),
130 NV_ADMA_CTL_CHANNEL_RESET = (1 << 5),
131 NV_ADMA_CTL_GO = (1 << 7),
132 NV_ADMA_CTL_AIEN = (1 << 8),
133 NV_ADMA_CTL_READ_NON_COHERENT = (1 << 11),
134 NV_ADMA_CTL_WRITE_NON_COHERENT = (1 << 12),
135
136 /* CPB response flag bits */
137 NV_CPB_RESP_DONE = (1 << 0),
138 NV_CPB_RESP_ATA_ERR = (1 << 3),
139 NV_CPB_RESP_CMD_ERR = (1 << 4),
140 NV_CPB_RESP_CPB_ERR = (1 << 7),
141
142 /* CPB control flag bits */
143 NV_CPB_CTL_CPB_VALID = (1 << 0),
144 NV_CPB_CTL_QUEUE = (1 << 1),
145 NV_CPB_CTL_APRD_VALID = (1 << 2),
146 NV_CPB_CTL_IEN = (1 << 3),
147 NV_CPB_CTL_FPDMA = (1 << 4),
148
149 /* APRD flags */
150 NV_APRD_WRITE = (1 << 1),
151 NV_APRD_END = (1 << 2),
152 NV_APRD_CONT = (1 << 3),
153
154 /* NV_ADMA_STAT flags */
155 NV_ADMA_STAT_TIMEOUT = (1 << 0),
156 NV_ADMA_STAT_HOTUNPLUG = (1 << 1),
157 NV_ADMA_STAT_HOTPLUG = (1 << 2),
158 NV_ADMA_STAT_CPBERR = (1 << 4),
159 NV_ADMA_STAT_SERROR = (1 << 5),
160 NV_ADMA_STAT_CMD_COMPLETE = (1 << 6),
161 NV_ADMA_STAT_IDLE = (1 << 8),
162 NV_ADMA_STAT_LEGACY = (1 << 9),
163 NV_ADMA_STAT_STOPPED = (1 << 10),
164 NV_ADMA_STAT_DONE = (1 << 12),
165 NV_ADMA_STAT_ERR = NV_ADMA_STAT_CPBERR |
166 NV_ADMA_STAT_TIMEOUT,
167
168 /* port flags */
169 NV_ADMA_PORT_REGISTER_MODE = (1 << 0),
170 NV_ADMA_ATAPI_SETUP_COMPLETE = (1 << 1),
171
172 /* MCP55 reg offset */
173 NV_CTL_MCP55 = 0x400,
174 NV_INT_STATUS_MCP55 = 0x440,
175 NV_INT_ENABLE_MCP55 = 0x444,
176 NV_NCQ_REG_MCP55 = 0x448,
177
178 /* MCP55 */
179 NV_INT_ALL_MCP55 = 0xffff,
180 NV_INT_PORT_SHIFT_MCP55 = 16, /* each port occupies 16 bits */
181 NV_INT_MASK_MCP55 = NV_INT_ALL_MCP55 & 0xfffd,
182
183 /* SWNCQ ENABLE BITS*/
184 NV_CTL_PRI_SWNCQ = 0x02,
185 NV_CTL_SEC_SWNCQ = 0x04,
186
187 /* SW NCQ status bits*/
188 NV_SWNCQ_IRQ_DEV = (1 << 0),
189 NV_SWNCQ_IRQ_PM = (1 << 1),
190 NV_SWNCQ_IRQ_ADDED = (1 << 2),
191 NV_SWNCQ_IRQ_REMOVED = (1 << 3),
192
193 NV_SWNCQ_IRQ_BACKOUT = (1 << 4),
194 NV_SWNCQ_IRQ_SDBFIS = (1 << 5),
195 NV_SWNCQ_IRQ_DHREGFIS = (1 << 6),
196 NV_SWNCQ_IRQ_DMASETUP = (1 << 7),
197
198 NV_SWNCQ_IRQ_HOTPLUG = NV_SWNCQ_IRQ_ADDED |
199 NV_SWNCQ_IRQ_REMOVED,
200
201 };
202
203 /* ADMA Physical Region Descriptor - one SG segment */
204 struct nv_adma_prd {
205 __le64 addr;
206 __le32 len;
207 u8 flags;
208 u8 packet_len;
209 __le16 reserved;
210 };
211
212 enum nv_adma_regbits {
213 CMDEND = (1 << 15), /* end of command list */
214 WNB = (1 << 14), /* wait-not-BSY */
215 IGN = (1 << 13), /* ignore this entry */
216 CS1n = (1 << (4 + 8)), /* std. PATA signals follow... */
217 DA2 = (1 << (2 + 8)),
218 DA1 = (1 << (1 + 8)),
219 DA0 = (1 << (0 + 8)),
220 };
221
222 /* ADMA Command Parameter Block
223 The first 5 SG segments are stored inside the Command Parameter Block itself.
224 If there are more than 5 segments the remainder are stored in a separate
225 memory area indicated by next_aprd. */
226 struct nv_adma_cpb {
227 u8 resp_flags; /* 0 */
228 u8 reserved1; /* 1 */
229 u8 ctl_flags; /* 2 */
230 /* len is length of taskfile in 64 bit words */
231 u8 len; /* 3 */
232 u8 tag; /* 4 */
233 u8 next_cpb_idx; /* 5 */
234 __le16 reserved2; /* 6-7 */
235 __le16 tf[12]; /* 8-31 */
236 struct nv_adma_prd aprd[5]; /* 32-111 */
237 __le64 next_aprd; /* 112-119 */
238 __le64 reserved3; /* 120-127 */
239 };
240
241
242 struct nv_adma_port_priv {
243 struct nv_adma_cpb *cpb;
244 dma_addr_t cpb_dma;
245 struct nv_adma_prd *aprd;
246 dma_addr_t aprd_dma;
247 void __iomem *ctl_block;
248 void __iomem *gen_block;
249 void __iomem *notifier_clear_block;
250 u64 adma_dma_mask;
251 u8 flags;
252 int last_issue_ncq;
253 };
254
255 struct nv_host_priv {
256 unsigned long type;
257 };
258
259 struct defer_queue {
260 u32 defer_bits;
261 unsigned int head;
262 unsigned int tail;
263 unsigned int tag[ATA_MAX_QUEUE];
264 };
265
266 enum ncq_saw_flag_list {
267 ncq_saw_d2h = (1U << 0),
268 ncq_saw_dmas = (1U << 1),
269 ncq_saw_sdb = (1U << 2),
270 ncq_saw_backout = (1U << 3),
271 };
272
273 struct nv_swncq_port_priv {
274 struct ata_prd *prd; /* our SG list */
275 dma_addr_t prd_dma; /* and its DMA mapping */
276 void __iomem *sactive_block;
277 void __iomem *irq_block;
278 void __iomem *tag_block;
279 u32 qc_active;
280
281 unsigned int last_issue_tag;
282
283 /* fifo circular queue to store deferral command */
284 struct defer_queue defer_queue;
285
286 /* for NCQ interrupt analysis */
287 u32 dhfis_bits;
288 u32 dmafis_bits;
289 u32 sdbfis_bits;
290
291 unsigned int ncq_flags;
292 };
293
294
295 #define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & (1 << (19 + (12 * (PORT)))))
296
297 static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
298 #ifdef CONFIG_PM
299 static int nv_pci_device_resume(struct pci_dev *pdev);
300 #endif
301 static void nv_ck804_host_stop(struct ata_host *host);
302 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
303 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
304 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
305 static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
306 static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
307
308 static int nv_noclassify_hardreset(struct ata_link *link, unsigned int *class,
309 unsigned long deadline);
310 static void nv_nf2_freeze(struct ata_port *ap);
311 static void nv_nf2_thaw(struct ata_port *ap);
312 static void nv_ck804_freeze(struct ata_port *ap);
313 static void nv_ck804_thaw(struct ata_port *ap);
314 static int nv_adma_slave_config(struct scsi_device *sdev);
315 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
316 static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
317 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
318 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
319 static void nv_adma_irq_clear(struct ata_port *ap);
320 static int nv_adma_port_start(struct ata_port *ap);
321 static void nv_adma_port_stop(struct ata_port *ap);
322 #ifdef CONFIG_PM
323 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
324 static int nv_adma_port_resume(struct ata_port *ap);
325 #endif
326 static void nv_adma_freeze(struct ata_port *ap);
327 static void nv_adma_thaw(struct ata_port *ap);
328 static void nv_adma_error_handler(struct ata_port *ap);
329 static void nv_adma_host_stop(struct ata_host *host);
330 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
331 static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
332
333 static void nv_mcp55_thaw(struct ata_port *ap);
334 static void nv_mcp55_freeze(struct ata_port *ap);
335 static void nv_swncq_error_handler(struct ata_port *ap);
336 static int nv_swncq_slave_config(struct scsi_device *sdev);
337 static int nv_swncq_port_start(struct ata_port *ap);
338 static void nv_swncq_qc_prep(struct ata_queued_cmd *qc);
339 static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
340 static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc);
341 static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis);
342 static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance);
343 #ifdef CONFIG_PM
344 static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg);
345 static int nv_swncq_port_resume(struct ata_port *ap);
346 #endif
347
348 enum nv_host_type
349 {
350 GENERIC,
351 NFORCE2,
352 NFORCE3 = NFORCE2, /* NF2 == NF3 as far as sata_nv is concerned */
353 CK804,
354 ADMA,
355 MCP5x,
356 SWNCQ,
357 };
358
359 static const struct pci_device_id nv_pci_tbl[] = {
360 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
361 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
362 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
363 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
364 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
365 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
366 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
367 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), MCP5x },
368 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), MCP5x },
369 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), MCP5x },
370 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), MCP5x },
371 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
372 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
373 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
374
375 { } /* terminate list */
376 };
377
378 static struct pci_driver nv_pci_driver = {
379 .name = DRV_NAME,
380 .id_table = nv_pci_tbl,
381 .probe = nv_init_one,
382 #ifdef CONFIG_PM
383 .suspend = ata_pci_device_suspend,
384 .resume = nv_pci_device_resume,
385 #endif
386 .remove = ata_pci_remove_one,
387 };
388
389 static struct scsi_host_template nv_sht = {
390 ATA_BMDMA_SHT(DRV_NAME),
391 };
392
393 static struct scsi_host_template nv_adma_sht = {
394 ATA_NCQ_SHT(DRV_NAME),
395 .can_queue = NV_ADMA_MAX_CPBS,
396 .sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN,
397 .dma_boundary = NV_ADMA_DMA_BOUNDARY,
398 .slave_configure = nv_adma_slave_config,
399 };
400
401 static struct scsi_host_template nv_swncq_sht = {
402 ATA_NCQ_SHT(DRV_NAME),
403 .can_queue = ATA_MAX_QUEUE,
404 .sg_tablesize = LIBATA_MAX_PRD,
405 .dma_boundary = ATA_DMA_BOUNDARY,
406 .slave_configure = nv_swncq_slave_config,
407 };
408
409 static struct ata_port_operations nv_common_ops = {
410 .inherits = &ata_bmdma_port_ops,
411 .lost_interrupt = ATA_OP_NULL,
412 .scr_read = nv_scr_read,
413 .scr_write = nv_scr_write,
414 };
415
416 /* OSDL bz11195 reports that link doesn't come online after hardreset
417 * on generic nv's and there have been several other similar reports
418 * on linux-ide. Disable hardreset for generic nv's.
419 */
420 static struct ata_port_operations nv_generic_ops = {
421 .inherits = &nv_common_ops,
422 .hardreset = ATA_OP_NULL,
423 };
424
425 /* nf2 is ripe with hardreset related problems.
426 *
427 * kernel bz#3352 reports nf2/3 controllers can't determine device
428 * signature reliably. The following thread reports detection failure
429 * on cold boot with the standard debouncing timing.
430 *
431 * http://thread.gmane.org/gmane.linux.ide/34098
432 *
433 * And bz#12176 reports that hardreset simply doesn't work on nf2.
434 * Give up on it and just don't do hardreset.
435 */
436 static struct ata_port_operations nv_nf2_ops = {
437 .inherits = &nv_generic_ops,
438 .freeze = nv_nf2_freeze,
439 .thaw = nv_nf2_thaw,
440 };
441
442 /* For initial probing after boot and hot plugging, hardreset mostly
443 * works fine on CK804 but curiously, reprobing on the initial port by
444 * rescanning or rmmod/insmod fails to acquire the initial D2H Reg FIS
445 * in somewhat undeterministic way. Use noclassify hardreset.
446 */
447 static struct ata_port_operations nv_ck804_ops = {
448 .inherits = &nv_common_ops,
449 .freeze = nv_ck804_freeze,
450 .thaw = nv_ck804_thaw,
451 .hardreset = nv_noclassify_hardreset,
452 .host_stop = nv_ck804_host_stop,
453 };
454
455 static struct ata_port_operations nv_adma_ops = {
456 .inherits = &nv_ck804_ops,
457
458 .check_atapi_dma = nv_adma_check_atapi_dma,
459 .sff_tf_read = nv_adma_tf_read,
460 .qc_defer = ata_std_qc_defer,
461 .qc_prep = nv_adma_qc_prep,
462 .qc_issue = nv_adma_qc_issue,
463 .sff_irq_clear = nv_adma_irq_clear,
464
465 .freeze = nv_adma_freeze,
466 .thaw = nv_adma_thaw,
467 .error_handler = nv_adma_error_handler,
468 .post_internal_cmd = nv_adma_post_internal_cmd,
469
470 .port_start = nv_adma_port_start,
471 .port_stop = nv_adma_port_stop,
472 #ifdef CONFIG_PM
473 .port_suspend = nv_adma_port_suspend,
474 .port_resume = nv_adma_port_resume,
475 #endif
476 .host_stop = nv_adma_host_stop,
477 };
478
479 /* Kernel bz#12351 reports that when SWNCQ is enabled, for hotplug to
480 * work, hardreset should be used and hardreset can't report proper
481 * signature, which suggests that mcp5x is closer to nf2 as long as
482 * reset quirkiness is concerned. Define separate ops for mcp5x with
483 * nv_noclassify_hardreset().
484 */
485 static struct ata_port_operations nv_mcp5x_ops = {
486 .inherits = &nv_common_ops,
487 .hardreset = nv_noclassify_hardreset,
488 };
489
490 static struct ata_port_operations nv_swncq_ops = {
491 .inherits = &nv_mcp5x_ops,
492
493 .qc_defer = ata_std_qc_defer,
494 .qc_prep = nv_swncq_qc_prep,
495 .qc_issue = nv_swncq_qc_issue,
496
497 .freeze = nv_mcp55_freeze,
498 .thaw = nv_mcp55_thaw,
499 .error_handler = nv_swncq_error_handler,
500
501 #ifdef CONFIG_PM
502 .port_suspend = nv_swncq_port_suspend,
503 .port_resume = nv_swncq_port_resume,
504 #endif
505 .port_start = nv_swncq_port_start,
506 };
507
508 struct nv_pi_priv {
509 irq_handler_t irq_handler;
510 struct scsi_host_template *sht;
511 };
512
513 #define NV_PI_PRIV(_irq_handler, _sht) \
514 &(struct nv_pi_priv){ .irq_handler = _irq_handler, .sht = _sht }
515
516 static const struct ata_port_info nv_port_info[] = {
517 /* generic */
518 {
519 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
520 .pio_mask = NV_PIO_MASK,
521 .mwdma_mask = NV_MWDMA_MASK,
522 .udma_mask = NV_UDMA_MASK,
523 .port_ops = &nv_generic_ops,
524 .private_data = NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
525 },
526 /* nforce2/3 */
527 {
528 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
529 .pio_mask = NV_PIO_MASK,
530 .mwdma_mask = NV_MWDMA_MASK,
531 .udma_mask = NV_UDMA_MASK,
532 .port_ops = &nv_nf2_ops,
533 .private_data = NV_PI_PRIV(nv_nf2_interrupt, &nv_sht),
534 },
535 /* ck804 */
536 {
537 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
538 .pio_mask = NV_PIO_MASK,
539 .mwdma_mask = NV_MWDMA_MASK,
540 .udma_mask = NV_UDMA_MASK,
541 .port_ops = &nv_ck804_ops,
542 .private_data = NV_PI_PRIV(nv_ck804_interrupt, &nv_sht),
543 },
544 /* ADMA */
545 {
546 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
547 ATA_FLAG_MMIO | ATA_FLAG_NCQ,
548 .pio_mask = NV_PIO_MASK,
549 .mwdma_mask = NV_MWDMA_MASK,
550 .udma_mask = NV_UDMA_MASK,
551 .port_ops = &nv_adma_ops,
552 .private_data = NV_PI_PRIV(nv_adma_interrupt, &nv_adma_sht),
553 },
554 /* MCP5x */
555 {
556 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
557 .pio_mask = NV_PIO_MASK,
558 .mwdma_mask = NV_MWDMA_MASK,
559 .udma_mask = NV_UDMA_MASK,
560 .port_ops = &nv_mcp5x_ops,
561 .private_data = NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
562 },
563 /* SWNCQ */
564 {
565 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
566 ATA_FLAG_NCQ,
567 .pio_mask = NV_PIO_MASK,
568 .mwdma_mask = NV_MWDMA_MASK,
569 .udma_mask = NV_UDMA_MASK,
570 .port_ops = &nv_swncq_ops,
571 .private_data = NV_PI_PRIV(nv_swncq_interrupt, &nv_swncq_sht),
572 },
573 };
574
575 MODULE_AUTHOR("NVIDIA");
576 MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
577 MODULE_LICENSE("GPL");
578 MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
579 MODULE_VERSION(DRV_VERSION);
580
581 static int adma_enabled;
582 static int swncq_enabled = 1;
583
584 static void nv_adma_register_mode(struct ata_port *ap)
585 {
586 struct nv_adma_port_priv *pp = ap->private_data;
587 void __iomem *mmio = pp->ctl_block;
588 u16 tmp, status;
589 int count = 0;
590
591 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
592 return;
593
594 status = readw(mmio + NV_ADMA_STAT);
595 while (!(status & NV_ADMA_STAT_IDLE) && count < 20) {
596 ndelay(50);
597 status = readw(mmio + NV_ADMA_STAT);
598 count++;
599 }
600 if (count == 20)
601 ata_port_printk(ap, KERN_WARNING,
602 "timeout waiting for ADMA IDLE, stat=0x%hx\n",
603 status);
604
605 tmp = readw(mmio + NV_ADMA_CTL);
606 writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
607
608 count = 0;
609 status = readw(mmio + NV_ADMA_STAT);
610 while (!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
611 ndelay(50);
612 status = readw(mmio + NV_ADMA_STAT);
613 count++;
614 }
615 if (count == 20)
616 ata_port_printk(ap, KERN_WARNING,
617 "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
618 status);
619
620 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
621 }
622
623 static void nv_adma_mode(struct ata_port *ap)
624 {
625 struct nv_adma_port_priv *pp = ap->private_data;
626 void __iomem *mmio = pp->ctl_block;
627 u16 tmp, status;
628 int count = 0;
629
630 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
631 return;
632
633 WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
634
635 tmp = readw(mmio + NV_ADMA_CTL);
636 writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
637
638 status = readw(mmio + NV_ADMA_STAT);
639 while (((status & NV_ADMA_STAT_LEGACY) ||
640 !(status & NV_ADMA_STAT_IDLE)) && count < 20) {
641 ndelay(50);
642 status = readw(mmio + NV_ADMA_STAT);
643 count++;
644 }
645 if (count == 20)
646 ata_port_printk(ap, KERN_WARNING,
647 "timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
648 status);
649
650 pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
651 }
652
653 static int nv_adma_slave_config(struct scsi_device *sdev)
654 {
655 struct ata_port *ap = ata_shost_to_port(sdev->host);
656 struct nv_adma_port_priv *pp = ap->private_data;
657 struct nv_adma_port_priv *port0, *port1;
658 struct scsi_device *sdev0, *sdev1;
659 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
660 unsigned long segment_boundary, flags;
661 unsigned short sg_tablesize;
662 int rc;
663 int adma_enable;
664 u32 current_reg, new_reg, config_mask;
665
666 rc = ata_scsi_slave_config(sdev);
667
668 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
669 /* Not a proper libata device, ignore */
670 return rc;
671
672 spin_lock_irqsave(ap->lock, flags);
673
674 if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) {
675 /*
676 * NVIDIA reports that ADMA mode does not support ATAPI commands.
677 * Therefore ATAPI commands are sent through the legacy interface.
678 * However, the legacy interface only supports 32-bit DMA.
679 * Restrict DMA parameters as required by the legacy interface
680 * when an ATAPI device is connected.
681 */
682 segment_boundary = ATA_DMA_BOUNDARY;
683 /* Subtract 1 since an extra entry may be needed for padding, see
684 libata-scsi.c */
685 sg_tablesize = LIBATA_MAX_PRD - 1;
686
687 /* Since the legacy DMA engine is in use, we need to disable ADMA
688 on the port. */
689 adma_enable = 0;
690 nv_adma_register_mode(ap);
691 } else {
692 segment_boundary = NV_ADMA_DMA_BOUNDARY;
693 sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
694 adma_enable = 1;
695 }
696
697 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg);
698
699 if (ap->port_no == 1)
700 config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
701 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
702 else
703 config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
704 NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
705
706 if (adma_enable) {
707 new_reg = current_reg | config_mask;
708 pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
709 } else {
710 new_reg = current_reg & ~config_mask;
711 pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
712 }
713
714 if (current_reg != new_reg)
715 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
716
717 port0 = ap->host->ports[0]->private_data;
718 port1 = ap->host->ports[1]->private_data;
719 sdev0 = ap->host->ports[0]->link.device[0].sdev;
720 sdev1 = ap->host->ports[1]->link.device[0].sdev;
721 if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
722 (port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
723 /** We have to set the DMA mask to 32-bit if either port is in
724 ATAPI mode, since they are on the same PCI device which is
725 used for DMA mapping. If we set the mask we also need to set
726 the bounce limit on both ports to ensure that the block
727 layer doesn't feed addresses that cause DMA mapping to
728 choke. If either SCSI device is not allocated yet, it's OK
729 since that port will discover its correct setting when it
730 does get allocated.
731 Note: Setting 32-bit mask should not fail. */
732 if (sdev0)
733 blk_queue_bounce_limit(sdev0->request_queue,
734 ATA_DMA_MASK);
735 if (sdev1)
736 blk_queue_bounce_limit(sdev1->request_queue,
737 ATA_DMA_MASK);
738
739 pci_set_dma_mask(pdev, ATA_DMA_MASK);
740 } else {
741 /** This shouldn't fail as it was set to this value before */
742 pci_set_dma_mask(pdev, pp->adma_dma_mask);
743 if (sdev0)
744 blk_queue_bounce_limit(sdev0->request_queue,
745 pp->adma_dma_mask);
746 if (sdev1)
747 blk_queue_bounce_limit(sdev1->request_queue,
748 pp->adma_dma_mask);
749 }
750
751 blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
752 blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize);
753 ata_port_printk(ap, KERN_INFO,
754 "DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
755 (unsigned long long)*ap->host->dev->dma_mask,
756 segment_boundary, sg_tablesize);
757
758 spin_unlock_irqrestore(ap->lock, flags);
759
760 return rc;
761 }
762
763 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
764 {
765 struct nv_adma_port_priv *pp = qc->ap->private_data;
766 return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
767 }
768
769 static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
770 {
771 /* Other than when internal or pass-through commands are executed,
772 the only time this function will be called in ADMA mode will be
773 if a command fails. In the failure case we don't care about going
774 into register mode with ADMA commands pending, as the commands will
775 all shortly be aborted anyway. We assume that NCQ commands are not
776 issued via passthrough, which is the only way that switching into
777 ADMA mode could abort outstanding commands. */
778 nv_adma_register_mode(ap);
779
780 ata_sff_tf_read(ap, tf);
781 }
782
783 static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
784 {
785 unsigned int idx = 0;
786
787 if (tf->flags & ATA_TFLAG_ISADDR) {
788 if (tf->flags & ATA_TFLAG_LBA48) {
789 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->hob_feature | WNB);
790 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
791 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->hob_lbal);
792 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->hob_lbam);
793 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->hob_lbah);
794 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature);
795 } else
796 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature | WNB);
797
798 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->nsect);
799 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->lbal);
800 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->lbam);
801 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->lbah);
802 }
803
804 if (tf->flags & ATA_TFLAG_DEVICE)
805 cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device);
806
807 cpb[idx++] = cpu_to_le16((ATA_REG_CMD << 8) | tf->command | CMDEND);
808
809 while (idx < 12)
810 cpb[idx++] = cpu_to_le16(IGN);
811
812 return idx;
813 }
814
815 static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
816 {
817 struct nv_adma_port_priv *pp = ap->private_data;
818 u8 flags = pp->cpb[cpb_num].resp_flags;
819
820 VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
821
822 if (unlikely((force_err ||
823 flags & (NV_CPB_RESP_ATA_ERR |
824 NV_CPB_RESP_CMD_ERR |
825 NV_CPB_RESP_CPB_ERR)))) {
826 struct ata_eh_info *ehi = &ap->link.eh_info;
827 int freeze = 0;
828
829 ata_ehi_clear_desc(ehi);
830 __ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x: ", flags);
831 if (flags & NV_CPB_RESP_ATA_ERR) {
832 ata_ehi_push_desc(ehi, "ATA error");
833 ehi->err_mask |= AC_ERR_DEV;
834 } else if (flags & NV_CPB_RESP_CMD_ERR) {
835 ata_ehi_push_desc(ehi, "CMD error");
836 ehi->err_mask |= AC_ERR_DEV;
837 } else if (flags & NV_CPB_RESP_CPB_ERR) {
838 ata_ehi_push_desc(ehi, "CPB error");
839 ehi->err_mask |= AC_ERR_SYSTEM;
840 freeze = 1;
841 } else {
842 /* notifier error, but no error in CPB flags? */
843 ata_ehi_push_desc(ehi, "unknown");
844 ehi->err_mask |= AC_ERR_OTHER;
845 freeze = 1;
846 }
847 /* Kill all commands. EH will determine what actually failed. */
848 if (freeze)
849 ata_port_freeze(ap);
850 else
851 ata_port_abort(ap);
852 return 1;
853 }
854
855 if (likely(flags & NV_CPB_RESP_DONE)) {
856 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num);
857 VPRINTK("CPB flags done, flags=0x%x\n", flags);
858 if (likely(qc)) {
859 DPRINTK("Completing qc from tag %d\n", cpb_num);
860 ata_qc_complete(qc);
861 } else {
862 struct ata_eh_info *ehi = &ap->link.eh_info;
863 /* Notifier bits set without a command may indicate the drive
864 is misbehaving. Raise host state machine violation on this
865 condition. */
866 ata_port_printk(ap, KERN_ERR,
867 "notifier for tag %d with no cmd?\n",
868 cpb_num);
869 ehi->err_mask |= AC_ERR_HSM;
870 ehi->action |= ATA_EH_RESET;
871 ata_port_freeze(ap);
872 return 1;
873 }
874 }
875 return 0;
876 }
877
878 static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
879 {
880 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
881
882 /* freeze if hotplugged */
883 if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
884 ata_port_freeze(ap);
885 return 1;
886 }
887
888 /* bail out if not our interrupt */
889 if (!(irq_stat & NV_INT_DEV))
890 return 0;
891
892 /* DEV interrupt w/ no active qc? */
893 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
894 ata_sff_check_status(ap);
895 return 1;
896 }
897
898 /* handle interrupt */
899 return ata_sff_host_intr(ap, qc);
900 }
901
902 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
903 {
904 struct ata_host *host = dev_instance;
905 int i, handled = 0;
906 u32 notifier_clears[2];
907
908 spin_lock(&host->lock);
909
910 for (i = 0; i < host->n_ports; i++) {
911 struct ata_port *ap = host->ports[i];
912 notifier_clears[i] = 0;
913
914 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
915 struct nv_adma_port_priv *pp = ap->private_data;
916 void __iomem *mmio = pp->ctl_block;
917 u16 status;
918 u32 gen_ctl;
919 u32 notifier, notifier_error;
920
921 /* if ADMA is disabled, use standard ata interrupt handler */
922 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
923 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
924 >> (NV_INT_PORT_SHIFT * i);
925 handled += nv_host_intr(ap, irq_stat);
926 continue;
927 }
928
929 /* if in ATA register mode, check for standard interrupts */
930 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
931 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
932 >> (NV_INT_PORT_SHIFT * i);
933 if (ata_tag_valid(ap->link.active_tag))
934 /** NV_INT_DEV indication seems unreliable at times
935 at least in ADMA mode. Force it on always when a
936 command is active, to prevent losing interrupts. */
937 irq_stat |= NV_INT_DEV;
938 handled += nv_host_intr(ap, irq_stat);
939 }
940
941 notifier = readl(mmio + NV_ADMA_NOTIFIER);
942 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
943 notifier_clears[i] = notifier | notifier_error;
944
945 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
946
947 if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
948 !notifier_error)
949 /* Nothing to do */
950 continue;
951
952 status = readw(mmio + NV_ADMA_STAT);
953
954 /* Clear status. Ensure the controller sees the clearing before we start
955 looking at any of the CPB statuses, so that any CPB completions after
956 this point in the handler will raise another interrupt. */
957 writew(status, mmio + NV_ADMA_STAT);
958 readw(mmio + NV_ADMA_STAT); /* flush posted write */
959 rmb();
960
961 handled++; /* irq handled if we got here */
962
963 /* freeze if hotplugged or controller error */
964 if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
965 NV_ADMA_STAT_HOTUNPLUG |
966 NV_ADMA_STAT_TIMEOUT |
967 NV_ADMA_STAT_SERROR))) {
968 struct ata_eh_info *ehi = &ap->link.eh_info;
969
970 ata_ehi_clear_desc(ehi);
971 __ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status);
972 if (status & NV_ADMA_STAT_TIMEOUT) {
973 ehi->err_mask |= AC_ERR_SYSTEM;
974 ata_ehi_push_desc(ehi, "timeout");
975 } else if (status & NV_ADMA_STAT_HOTPLUG) {
976 ata_ehi_hotplugged(ehi);
977 ata_ehi_push_desc(ehi, "hotplug");
978 } else if (status & NV_ADMA_STAT_HOTUNPLUG) {
979 ata_ehi_hotplugged(ehi);
980 ata_ehi_push_desc(ehi, "hot unplug");
981 } else if (status & NV_ADMA_STAT_SERROR) {
982 /* let libata analyze SError and figure out the cause */
983 ata_ehi_push_desc(ehi, "SError");
984 } else
985 ata_ehi_push_desc(ehi, "unknown");
986 ata_port_freeze(ap);
987 continue;
988 }
989
990 if (status & (NV_ADMA_STAT_DONE |
991 NV_ADMA_STAT_CPBERR |
992 NV_ADMA_STAT_CMD_COMPLETE)) {
993 u32 check_commands = notifier_clears[i];
994 int pos, error = 0;
995
996 if (status & NV_ADMA_STAT_CPBERR) {
997 /* Check all active commands */
998 if (ata_tag_valid(ap->link.active_tag))
999 check_commands = 1 <<
1000 ap->link.active_tag;
1001 else
1002 check_commands = ap->
1003 link.sactive;
1004 }
1005
1006 /** Check CPBs for completed commands */
1007 while ((pos = ffs(check_commands)) && !error) {
1008 pos--;
1009 error = nv_adma_check_cpb(ap, pos,
1010 notifier_error & (1 << pos));
1011 check_commands &= ~(1 << pos);
1012 }
1013 }
1014 }
1015 }
1016
1017 if (notifier_clears[0] || notifier_clears[1]) {
1018 /* Note: Both notifier clear registers must be written
1019 if either is set, even if one is zero, according to NVIDIA. */
1020 struct nv_adma_port_priv *pp = host->ports[0]->private_data;
1021 writel(notifier_clears[0], pp->notifier_clear_block);
1022 pp = host->ports[1]->private_data;
1023 writel(notifier_clears[1], pp->notifier_clear_block);
1024 }
1025
1026 spin_unlock(&host->lock);
1027
1028 return IRQ_RETVAL(handled);
1029 }
1030
1031 static void nv_adma_freeze(struct ata_port *ap)
1032 {
1033 struct nv_adma_port_priv *pp = ap->private_data;
1034 void __iomem *mmio = pp->ctl_block;
1035 u16 tmp;
1036
1037 nv_ck804_freeze(ap);
1038
1039 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1040 return;
1041
1042 /* clear any outstanding CK804 notifications */
1043 writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
1044 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1045
1046 /* Disable interrupt */
1047 tmp = readw(mmio + NV_ADMA_CTL);
1048 writew(tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1049 mmio + NV_ADMA_CTL);
1050 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1051 }
1052
1053 static void nv_adma_thaw(struct ata_port *ap)
1054 {
1055 struct nv_adma_port_priv *pp = ap->private_data;
1056 void __iomem *mmio = pp->ctl_block;
1057 u16 tmp;
1058
1059 nv_ck804_thaw(ap);
1060
1061 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1062 return;
1063
1064 /* Enable interrupt */
1065 tmp = readw(mmio + NV_ADMA_CTL);
1066 writew(tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1067 mmio + NV_ADMA_CTL);
1068 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1069 }
1070
1071 static void nv_adma_irq_clear(struct ata_port *ap)
1072 {
1073 struct nv_adma_port_priv *pp = ap->private_data;
1074 void __iomem *mmio = pp->ctl_block;
1075 u32 notifier_clears[2];
1076
1077 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
1078 ata_sff_irq_clear(ap);
1079 return;
1080 }
1081
1082 /* clear any outstanding CK804 notifications */
1083 writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
1084 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1085
1086 /* clear ADMA status */
1087 writew(0xffff, mmio + NV_ADMA_STAT);
1088
1089 /* clear notifiers - note both ports need to be written with
1090 something even though we are only clearing on one */
1091 if (ap->port_no == 0) {
1092 notifier_clears[0] = 0xFFFFFFFF;
1093 notifier_clears[1] = 0;
1094 } else {
1095 notifier_clears[0] = 0;
1096 notifier_clears[1] = 0xFFFFFFFF;
1097 }
1098 pp = ap->host->ports[0]->private_data;
1099 writel(notifier_clears[0], pp->notifier_clear_block);
1100 pp = ap->host->ports[1]->private_data;
1101 writel(notifier_clears[1], pp->notifier_clear_block);
1102 }
1103
1104 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
1105 {
1106 struct nv_adma_port_priv *pp = qc->ap->private_data;
1107
1108 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
1109 ata_sff_post_internal_cmd(qc);
1110 }
1111
1112 static int nv_adma_port_start(struct ata_port *ap)
1113 {
1114 struct device *dev = ap->host->dev;
1115 struct nv_adma_port_priv *pp;
1116 int rc;
1117 void *mem;
1118 dma_addr_t mem_dma;
1119 void __iomem *mmio;
1120 struct pci_dev *pdev = to_pci_dev(dev);
1121 u16 tmp;
1122
1123 VPRINTK("ENTER\n");
1124
1125 /* Ensure DMA mask is set to 32-bit before allocating legacy PRD and
1126 pad buffers */
1127 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1128 if (rc)
1129 return rc;
1130 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1131 if (rc)
1132 return rc;
1133
1134 rc = ata_port_start(ap);
1135 if (rc)
1136 return rc;
1137
1138 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1139 if (!pp)
1140 return -ENOMEM;
1141
1142 mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
1143 ap->port_no * NV_ADMA_PORT_SIZE;
1144 pp->ctl_block = mmio;
1145 pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
1146 pp->notifier_clear_block = pp->gen_block +
1147 NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
1148
1149 /* Now that the legacy PRD and padding buffer are allocated we can
1150 safely raise the DMA mask to allocate the CPB/APRD table.
1151 These are allowed to fail since we store the value that ends up
1152 being used to set as the bounce limit in slave_config later if
1153 needed. */
1154 pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1155 pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1156 pp->adma_dma_mask = *dev->dma_mask;
1157
1158 mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
1159 &mem_dma, GFP_KERNEL);
1160 if (!mem)
1161 return -ENOMEM;
1162 memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
1163
1164 /*
1165 * First item in chunk of DMA memory:
1166 * 128-byte command parameter block (CPB)
1167 * one for each command tag
1168 */
1169 pp->cpb = mem;
1170 pp->cpb_dma = mem_dma;
1171
1172 writel(mem_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
1173 writel((mem_dma >> 16) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
1174
1175 mem += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1176 mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1177
1178 /*
1179 * Second item: block of ADMA_SGTBL_LEN s/g entries
1180 */
1181 pp->aprd = mem;
1182 pp->aprd_dma = mem_dma;
1183
1184 ap->private_data = pp;
1185
1186 /* clear any outstanding interrupt conditions */
1187 writew(0xffff, mmio + NV_ADMA_STAT);
1188
1189 /* initialize port variables */
1190 pp->flags = NV_ADMA_PORT_REGISTER_MODE;
1191
1192 /* clear CPB fetch count */
1193 writew(0, mmio + NV_ADMA_CPB_COUNT);
1194
1195 /* clear GO for register mode, enable interrupt */
1196 tmp = readw(mmio + NV_ADMA_CTL);
1197 writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1198 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1199
1200 tmp = readw(mmio + NV_ADMA_CTL);
1201 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1202 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1203 udelay(1);
1204 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1205 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1206
1207 return 0;
1208 }
1209
1210 static void nv_adma_port_stop(struct ata_port *ap)
1211 {
1212 struct nv_adma_port_priv *pp = ap->private_data;
1213 void __iomem *mmio = pp->ctl_block;
1214
1215 VPRINTK("ENTER\n");
1216 writew(0, mmio + NV_ADMA_CTL);
1217 }
1218
1219 #ifdef CONFIG_PM
1220 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
1221 {
1222 struct nv_adma_port_priv *pp = ap->private_data;
1223 void __iomem *mmio = pp->ctl_block;
1224
1225 /* Go to register mode - clears GO */
1226 nv_adma_register_mode(ap);
1227
1228 /* clear CPB fetch count */
1229 writew(0, mmio + NV_ADMA_CPB_COUNT);
1230
1231 /* disable interrupt, shut down port */
1232 writew(0, mmio + NV_ADMA_CTL);
1233
1234 return 0;
1235 }
1236
1237 static int nv_adma_port_resume(struct ata_port *ap)
1238 {
1239 struct nv_adma_port_priv *pp = ap->private_data;
1240 void __iomem *mmio = pp->ctl_block;
1241 u16 tmp;
1242
1243 /* set CPB block location */
1244 writel(pp->cpb_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
1245 writel((pp->cpb_dma >> 16) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
1246
1247 /* clear any outstanding interrupt conditions */
1248 writew(0xffff, mmio + NV_ADMA_STAT);
1249
1250 /* initialize port variables */
1251 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
1252
1253 /* clear CPB fetch count */
1254 writew(0, mmio + NV_ADMA_CPB_COUNT);
1255
1256 /* clear GO for register mode, enable interrupt */
1257 tmp = readw(mmio + NV_ADMA_CTL);
1258 writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1259 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1260
1261 tmp = readw(mmio + NV_ADMA_CTL);
1262 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1263 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1264 udelay(1);
1265 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1266 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1267
1268 return 0;
1269 }
1270 #endif
1271
1272 static void nv_adma_setup_port(struct ata_port *ap)
1273 {
1274 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1275 struct ata_ioports *ioport = &ap->ioaddr;
1276
1277 VPRINTK("ENTER\n");
1278
1279 mmio += NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE;
1280
1281 ioport->cmd_addr = mmio;
1282 ioport->data_addr = mmio + (ATA_REG_DATA * 4);
1283 ioport->error_addr =
1284 ioport->feature_addr = mmio + (ATA_REG_ERR * 4);
1285 ioport->nsect_addr = mmio + (ATA_REG_NSECT * 4);
1286 ioport->lbal_addr = mmio + (ATA_REG_LBAL * 4);
1287 ioport->lbam_addr = mmio + (ATA_REG_LBAM * 4);
1288 ioport->lbah_addr = mmio + (ATA_REG_LBAH * 4);
1289 ioport->device_addr = mmio + (ATA_REG_DEVICE * 4);
1290 ioport->status_addr =
1291 ioport->command_addr = mmio + (ATA_REG_STATUS * 4);
1292 ioport->altstatus_addr =
1293 ioport->ctl_addr = mmio + 0x20;
1294 }
1295
1296 static int nv_adma_host_init(struct ata_host *host)
1297 {
1298 struct pci_dev *pdev = to_pci_dev(host->dev);
1299 unsigned int i;
1300 u32 tmp32;
1301
1302 VPRINTK("ENTER\n");
1303
1304 /* enable ADMA on the ports */
1305 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1306 tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1307 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1308 NV_MCP_SATA_CFG_20_PORT1_EN |
1309 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1310
1311 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1312
1313 for (i = 0; i < host->n_ports; i++)
1314 nv_adma_setup_port(host->ports[i]);
1315
1316 return 0;
1317 }
1318
1319 static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1320 struct scatterlist *sg,
1321 int idx,
1322 struct nv_adma_prd *aprd)
1323 {
1324 u8 flags = 0;
1325 if (qc->tf.flags & ATA_TFLAG_WRITE)
1326 flags |= NV_APRD_WRITE;
1327 if (idx == qc->n_elem - 1)
1328 flags |= NV_APRD_END;
1329 else if (idx != 4)
1330 flags |= NV_APRD_CONT;
1331
1332 aprd->addr = cpu_to_le64(((u64)sg_dma_address(sg)));
1333 aprd->len = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
1334 aprd->flags = flags;
1335 aprd->packet_len = 0;
1336 }
1337
1338 static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1339 {
1340 struct nv_adma_port_priv *pp = qc->ap->private_data;
1341 struct nv_adma_prd *aprd;
1342 struct scatterlist *sg;
1343 unsigned int si;
1344
1345 VPRINTK("ENTER\n");
1346
1347 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1348 aprd = (si < 5) ? &cpb->aprd[si] :
1349 &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (si-5)];
1350 nv_adma_fill_aprd(qc, sg, si, aprd);
1351 }
1352 if (si > 5)
1353 cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));
1354 else
1355 cpb->next_aprd = cpu_to_le64(0);
1356 }
1357
1358 static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
1359 {
1360 struct nv_adma_port_priv *pp = qc->ap->private_data;
1361
1362 /* ADMA engine can only be used for non-ATAPI DMA commands,
1363 or interrupt-driven no-data commands. */
1364 if ((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
1365 (qc->tf.flags & ATA_TFLAG_POLLING))
1366 return 1;
1367
1368 if ((qc->flags & ATA_QCFLAG_DMAMAP) ||
1369 (qc->tf.protocol == ATA_PROT_NODATA))
1370 return 0;
1371
1372 return 1;
1373 }
1374
1375 static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1376 {
1377 struct nv_adma_port_priv *pp = qc->ap->private_data;
1378 struct nv_adma_cpb *cpb = &pp->cpb[qc->tag];
1379 u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
1380 NV_CPB_CTL_IEN;
1381
1382 if (nv_adma_use_reg_mode(qc)) {
1383 BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1384 (qc->flags & ATA_QCFLAG_DMAMAP));
1385 nv_adma_register_mode(qc->ap);
1386 ata_sff_qc_prep(qc);
1387 return;
1388 }
1389
1390 cpb->resp_flags = NV_CPB_RESP_DONE;
1391 wmb();
1392 cpb->ctl_flags = 0;
1393 wmb();
1394
1395 cpb->len = 3;
1396 cpb->tag = qc->tag;
1397 cpb->next_cpb_idx = 0;
1398
1399 /* turn on NCQ flags for NCQ commands */
1400 if (qc->tf.protocol == ATA_PROT_NCQ)
1401 ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1402
1403 VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1404
1405 nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1406
1407 if (qc->flags & ATA_QCFLAG_DMAMAP) {
1408 nv_adma_fill_sg(qc, cpb);
1409 ctl_flags |= NV_CPB_CTL_APRD_VALID;
1410 } else
1411 memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
1412
1413 /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID
1414 until we are finished filling in all of the contents */
1415 wmb();
1416 cpb->ctl_flags = ctl_flags;
1417 wmb();
1418 cpb->resp_flags = 0;
1419 }
1420
1421 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1422 {
1423 struct nv_adma_port_priv *pp = qc->ap->private_data;
1424 void __iomem *mmio = pp->ctl_block;
1425 int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ);
1426
1427 VPRINTK("ENTER\n");
1428
1429 /* We can't handle result taskfile with NCQ commands, since
1430 retrieving the taskfile switches us out of ADMA mode and would abort
1431 existing commands. */
1432 if (unlikely(qc->tf.protocol == ATA_PROT_NCQ &&
1433 (qc->flags & ATA_QCFLAG_RESULT_TF))) {
1434 ata_dev_printk(qc->dev, KERN_ERR,
1435 "NCQ w/ RESULT_TF not allowed\n");
1436 return AC_ERR_SYSTEM;
1437 }
1438
1439 if (nv_adma_use_reg_mode(qc)) {
1440 /* use ATA register mode */
1441 VPRINTK("using ATA register mode: 0x%lx\n", qc->flags);
1442 BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1443 (qc->flags & ATA_QCFLAG_DMAMAP));
1444 nv_adma_register_mode(qc->ap);
1445 return ata_sff_qc_issue(qc);
1446 } else
1447 nv_adma_mode(qc->ap);
1448
1449 /* write append register, command tag in lower 8 bits
1450 and (number of cpbs to append -1) in top 8 bits */
1451 wmb();
1452
1453 if (curr_ncq != pp->last_issue_ncq) {
1454 /* Seems to need some delay before switching between NCQ and
1455 non-NCQ commands, else we get command timeouts and such. */
1456 udelay(20);
1457 pp->last_issue_ncq = curr_ncq;
1458 }
1459
1460 writew(qc->tag, mmio + NV_ADMA_APPEND);
1461
1462 DPRINTK("Issued tag %u\n", qc->tag);
1463
1464 return 0;
1465 }
1466
1467 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1468 {
1469 struct ata_host *host = dev_instance;
1470 unsigned int i;
1471 unsigned int handled = 0;
1472 unsigned long flags;
1473
1474 spin_lock_irqsave(&host->lock, flags);
1475
1476 for (i = 0; i < host->n_ports; i++) {
1477 struct ata_port *ap;
1478
1479 ap = host->ports[i];
1480 if (ap &&
1481 !(ap->flags & ATA_FLAG_DISABLED)) {
1482 struct ata_queued_cmd *qc;
1483
1484 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1485 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
1486 handled += ata_sff_host_intr(ap, qc);
1487 else
1488 // No request pending? Clear interrupt status
1489 // anyway, in case there's one pending.
1490 ap->ops->sff_check_status(ap);
1491 }
1492
1493 }
1494
1495 spin_unlock_irqrestore(&host->lock, flags);
1496
1497 return IRQ_RETVAL(handled);
1498 }
1499
1500 static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
1501 {
1502 int i, handled = 0;
1503
1504 for (i = 0; i < host->n_ports; i++) {
1505 struct ata_port *ap = host->ports[i];
1506
1507 if (ap && !(ap->flags & ATA_FLAG_DISABLED))
1508 handled += nv_host_intr(ap, irq_stat);
1509
1510 irq_stat >>= NV_INT_PORT_SHIFT;
1511 }
1512
1513 return IRQ_RETVAL(handled);
1514 }
1515
1516 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
1517 {
1518 struct ata_host *host = dev_instance;
1519 u8 irq_stat;
1520 irqreturn_t ret;
1521
1522 spin_lock(&host->lock);
1523 irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
1524 ret = nv_do_interrupt(host, irq_stat);
1525 spin_unlock(&host->lock);
1526
1527 return ret;
1528 }
1529
1530 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
1531 {
1532 struct ata_host *host = dev_instance;
1533 u8 irq_stat;
1534 irqreturn_t ret;
1535
1536 spin_lock(&host->lock);
1537 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1538 ret = nv_do_interrupt(host, irq_stat);
1539 spin_unlock(&host->lock);
1540
1541 return ret;
1542 }
1543
1544 static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
1545 {
1546 if (sc_reg > SCR_CONTROL)
1547 return -EINVAL;
1548
1549 *val = ioread32(link->ap->ioaddr.scr_addr + (sc_reg * 4));
1550 return 0;
1551 }
1552
1553 static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
1554 {
1555 if (sc_reg > SCR_CONTROL)
1556 return -EINVAL;
1557
1558 iowrite32(val, link->ap->ioaddr.scr_addr + (sc_reg * 4));
1559 return 0;
1560 }
1561
1562 static int nv_noclassify_hardreset(struct ata_link *link, unsigned int *class,
1563 unsigned long deadline)
1564 {
1565 bool online;
1566 int rc;
1567
1568 rc = sata_link_hardreset(link, sata_deb_timing_hotplug, deadline,
1569 &online, NULL);
1570 return online ? -EAGAIN : rc;
1571 }
1572
1573 static void nv_nf2_freeze(struct ata_port *ap)
1574 {
1575 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1576 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1577 u8 mask;
1578
1579 mask = ioread8(scr_addr + NV_INT_ENABLE);
1580 mask &= ~(NV_INT_ALL << shift);
1581 iowrite8(mask, scr_addr + NV_INT_ENABLE);
1582 }
1583
1584 static void nv_nf2_thaw(struct ata_port *ap)
1585 {
1586 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1587 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1588 u8 mask;
1589
1590 iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
1591
1592 mask = ioread8(scr_addr + NV_INT_ENABLE);
1593 mask |= (NV_INT_MASK << shift);
1594 iowrite8(mask, scr_addr + NV_INT_ENABLE);
1595 }
1596
1597 static void nv_ck804_freeze(struct ata_port *ap)
1598 {
1599 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1600 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1601 u8 mask;
1602
1603 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1604 mask &= ~(NV_INT_ALL << shift);
1605 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1606 }
1607
1608 static void nv_ck804_thaw(struct ata_port *ap)
1609 {
1610 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1611 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1612 u8 mask;
1613
1614 writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1615
1616 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1617 mask |= (NV_INT_MASK << shift);
1618 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1619 }
1620
1621 static void nv_mcp55_freeze(struct ata_port *ap)
1622 {
1623 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1624 int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1625 u32 mask;
1626
1627 writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1628
1629 mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1630 mask &= ~(NV_INT_ALL_MCP55 << shift);
1631 writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1632 ata_sff_freeze(ap);
1633 }
1634
1635 static void nv_mcp55_thaw(struct ata_port *ap)
1636 {
1637 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1638 int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1639 u32 mask;
1640
1641 writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1642
1643 mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1644 mask |= (NV_INT_MASK_MCP55 << shift);
1645 writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1646 ata_sff_thaw(ap);
1647 }
1648
1649 static void nv_adma_error_handler(struct ata_port *ap)
1650 {
1651 struct nv_adma_port_priv *pp = ap->private_data;
1652 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
1653 void __iomem *mmio = pp->ctl_block;
1654 int i;
1655 u16 tmp;
1656
1657 if (ata_tag_valid(ap->link.active_tag) || ap->link.sactive) {
1658 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1659 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1660 u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
1661 u32 status = readw(mmio + NV_ADMA_STAT);
1662 u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
1663 u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
1664
1665 ata_port_printk(ap, KERN_ERR,
1666 "EH in ADMA mode, notifier 0x%X "
1667 "notifier_error 0x%X gen_ctl 0x%X status 0x%X "
1668 "next cpb count 0x%X next cpb idx 0x%x\n",
1669 notifier, notifier_error, gen_ctl, status,
1670 cpb_count, next_cpb_idx);
1671
1672 for (i = 0; i < NV_ADMA_MAX_CPBS; i++) {
1673 struct nv_adma_cpb *cpb = &pp->cpb[i];
1674 if ((ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) ||
1675 ap->link.sactive & (1 << i))
1676 ata_port_printk(ap, KERN_ERR,
1677 "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1678 i, cpb->ctl_flags, cpb->resp_flags);
1679 }
1680 }
1681
1682 /* Push us back into port register mode for error handling. */
1683 nv_adma_register_mode(ap);
1684
1685 /* Mark all of the CPBs as invalid to prevent them from
1686 being executed */
1687 for (i = 0; i < NV_ADMA_MAX_CPBS; i++)
1688 pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1689
1690 /* clear CPB fetch count */
1691 writew(0, mmio + NV_ADMA_CPB_COUNT);
1692
1693 /* Reset channel */
1694 tmp = readw(mmio + NV_ADMA_CTL);
1695 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1696 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1697 udelay(1);
1698 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1699 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1700 }
1701
1702 ata_sff_error_handler(ap);
1703 }
1704
1705 static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc)
1706 {
1707 struct nv_swncq_port_priv *pp = ap->private_data;
1708 struct defer_queue *dq = &pp->defer_queue;
1709
1710 /* queue is full */
1711 WARN_ON(dq->tail - dq->head == ATA_MAX_QUEUE);
1712 dq->defer_bits |= (1 << qc->tag);
1713 dq->tag[dq->tail++ & (ATA_MAX_QUEUE - 1)] = qc->tag;
1714 }
1715
1716 static struct ata_queued_cmd *nv_swncq_qc_from_dq(struct ata_port *ap)
1717 {
1718 struct nv_swncq_port_priv *pp = ap->private_data;
1719 struct defer_queue *dq = &pp->defer_queue;
1720 unsigned int tag;
1721
1722 if (dq->head == dq->tail) /* null queue */
1723 return NULL;
1724
1725 tag = dq->tag[dq->head & (ATA_MAX_QUEUE - 1)];
1726 dq->tag[dq->head++ & (ATA_MAX_QUEUE - 1)] = ATA_TAG_POISON;
1727 WARN_ON(!(dq->defer_bits & (1 << tag)));
1728 dq->defer_bits &= ~(1 << tag);
1729
1730 return ata_qc_from_tag(ap, tag);
1731 }
1732
1733 static void nv_swncq_fis_reinit(struct ata_port *ap)
1734 {
1735 struct nv_swncq_port_priv *pp = ap->private_data;
1736
1737 pp->dhfis_bits = 0;
1738 pp->dmafis_bits = 0;
1739 pp->sdbfis_bits = 0;
1740 pp->ncq_flags = 0;
1741 }
1742
1743 static void nv_swncq_pp_reinit(struct ata_port *ap)
1744 {
1745 struct nv_swncq_port_priv *pp = ap->private_data;
1746 struct defer_queue *dq = &pp->defer_queue;
1747
1748 dq->head = 0;
1749 dq->tail = 0;
1750 dq->defer_bits = 0;
1751 pp->qc_active = 0;
1752 pp->last_issue_tag = ATA_TAG_POISON;
1753 nv_swncq_fis_reinit(ap);
1754 }
1755
1756 static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis)
1757 {
1758 struct nv_swncq_port_priv *pp = ap->private_data;
1759
1760 writew(fis, pp->irq_block);
1761 }
1762
1763 static void __ata_bmdma_stop(struct ata_port *ap)
1764 {
1765 struct ata_queued_cmd qc;
1766
1767 qc.ap = ap;
1768 ata_bmdma_stop(&qc);
1769 }
1770
1771 static void nv_swncq_ncq_stop(struct ata_port *ap)
1772 {
1773 struct nv_swncq_port_priv *pp = ap->private_data;
1774 unsigned int i;
1775 u32 sactive;
1776 u32 done_mask;
1777
1778 ata_port_printk(ap, KERN_ERR,
1779 "EH in SWNCQ mode,QC:qc_active 0x%X sactive 0x%X\n",
1780 ap->qc_active, ap->link.sactive);
1781 ata_port_printk(ap, KERN_ERR,
1782 "SWNCQ:qc_active 0x%X defer_bits 0x%X last_issue_tag 0x%x\n "
1783 "dhfis 0x%X dmafis 0x%X sdbfis 0x%X\n",
1784 pp->qc_active, pp->defer_queue.defer_bits, pp->last_issue_tag,
1785 pp->dhfis_bits, pp->dmafis_bits, pp->sdbfis_bits);
1786
1787 ata_port_printk(ap, KERN_ERR, "ATA_REG 0x%X ERR_REG 0x%X\n",
1788 ap->ops->sff_check_status(ap),
1789 ioread8(ap->ioaddr.error_addr));
1790
1791 sactive = readl(pp->sactive_block);
1792 done_mask = pp->qc_active ^ sactive;
1793
1794 ata_port_printk(ap, KERN_ERR, "tag : dhfis dmafis sdbfis sacitve\n");
1795 for (i = 0; i < ATA_MAX_QUEUE; i++) {
1796 u8 err = 0;
1797 if (pp->qc_active & (1 << i))
1798 err = 0;
1799 else if (done_mask & (1 << i))
1800 err = 1;
1801 else
1802 continue;
1803
1804 ata_port_printk(ap, KERN_ERR,
1805 "tag 0x%x: %01x %01x %01x %01x %s\n", i,
1806 (pp->dhfis_bits >> i) & 0x1,
1807 (pp->dmafis_bits >> i) & 0x1,
1808 (pp->sdbfis_bits >> i) & 0x1,
1809 (sactive >> i) & 0x1,
1810 (err ? "error! tag doesn't exit" : " "));
1811 }
1812
1813 nv_swncq_pp_reinit(ap);
1814 ap->ops->sff_irq_clear(ap);
1815 __ata_bmdma_stop(ap);
1816 nv_swncq_irq_clear(ap, 0xffff);
1817 }
1818
1819 static void nv_swncq_error_handler(struct ata_port *ap)
1820 {
1821 struct ata_eh_context *ehc = &ap->link.eh_context;
1822
1823 if (ap->link.sactive) {
1824 nv_swncq_ncq_stop(ap);
1825 ehc->i.action |= ATA_EH_RESET;
1826 }
1827
1828 ata_sff_error_handler(ap);
1829 }
1830
1831 #ifdef CONFIG_PM
1832 static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg)
1833 {
1834 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1835 u32 tmp;
1836
1837 /* clear irq */
1838 writel(~0, mmio + NV_INT_STATUS_MCP55);
1839
1840 /* disable irq */
1841 writel(0, mmio + NV_INT_ENABLE_MCP55);
1842
1843 /* disable swncq */
1844 tmp = readl(mmio + NV_CTL_MCP55);
1845 tmp &= ~(NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ);
1846 writel(tmp, mmio + NV_CTL_MCP55);
1847
1848 return 0;
1849 }
1850
1851 static int nv_swncq_port_resume(struct ata_port *ap)
1852 {
1853 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1854 u32 tmp;
1855
1856 /* clear irq */
1857 writel(~0, mmio + NV_INT_STATUS_MCP55);
1858
1859 /* enable irq */
1860 writel(0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1861
1862 /* enable swncq */
1863 tmp = readl(mmio + NV_CTL_MCP55);
1864 writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1865
1866 return 0;
1867 }
1868 #endif
1869
1870 static void nv_swncq_host_init(struct ata_host *host)
1871 {
1872 u32 tmp;
1873 void __iomem *mmio = host->iomap[NV_MMIO_BAR];
1874 struct pci_dev *pdev = to_pci_dev(host->dev);
1875 u8 regval;
1876
1877 /* disable ECO 398 */
1878 pci_read_config_byte(pdev, 0x7f, &regval);
1879 regval &= ~(1 << 7);
1880 pci_write_config_byte(pdev, 0x7f, regval);
1881
1882 /* enable swncq */
1883 tmp = readl(mmio + NV_CTL_MCP55);
1884 VPRINTK("HOST_CTL:0x%X\n", tmp);
1885 writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1886
1887 /* enable irq intr */
1888 tmp = readl(mmio + NV_INT_ENABLE_MCP55);
1889 VPRINTK("HOST_ENABLE:0x%X\n", tmp);
1890 writel(tmp | 0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1891
1892 /* clear port irq */
1893 writel(~0x0, mmio + NV_INT_STATUS_MCP55);
1894 }
1895
1896 static int nv_swncq_slave_config(struct scsi_device *sdev)
1897 {
1898 struct ata_port *ap = ata_shost_to_port(sdev->host);
1899 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
1900 struct ata_device *dev;
1901 int rc;
1902 u8 rev;
1903 u8 check_maxtor = 0;
1904 unsigned char model_num[ATA_ID_PROD_LEN + 1];
1905
1906 rc = ata_scsi_slave_config(sdev);
1907 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
1908 /* Not a proper libata device, ignore */
1909 return rc;
1910
1911 dev = &ap->link.device[sdev->id];
1912 if (!(ap->flags & ATA_FLAG_NCQ) || dev->class == ATA_DEV_ATAPI)
1913 return rc;
1914
1915 /* if MCP51 and Maxtor, then disable ncq */
1916 if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA ||
1917 pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2)
1918 check_maxtor = 1;
1919
1920 /* if MCP55 and rev <= a2 and Maxtor, then disable ncq */
1921 if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA ||
1922 pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2) {
1923 pci_read_config_byte(pdev, 0x8, &rev);
1924 if (rev <= 0xa2)
1925 check_maxtor = 1;
1926 }
1927
1928 if (!check_maxtor)
1929 return rc;
1930
1931 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
1932
1933 if (strncmp(model_num, "Maxtor", 6) == 0) {
1934 ata_scsi_change_queue_depth(sdev, 1);
1935 ata_dev_printk(dev, KERN_NOTICE,
1936 "Disabling SWNCQ mode (depth %x)\n", sdev->queue_depth);
1937 }
1938
1939 return rc;
1940 }
1941
1942 static int nv_swncq_port_start(struct ata_port *ap)
1943 {
1944 struct device *dev = ap->host->dev;
1945 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1946 struct nv_swncq_port_priv *pp;
1947 int rc;
1948
1949 rc = ata_port_start(ap);
1950 if (rc)
1951 return rc;
1952
1953 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1954 if (!pp)
1955 return -ENOMEM;
1956
1957 pp->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE,
1958 &pp->prd_dma, GFP_KERNEL);
1959 if (!pp->prd)
1960 return -ENOMEM;
1961 memset(pp->prd, 0, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE);
1962
1963 ap->private_data = pp;
1964 pp->sactive_block = ap->ioaddr.scr_addr + 4 * SCR_ACTIVE;
1965 pp->irq_block = mmio + NV_INT_STATUS_MCP55 + ap->port_no * 2;
1966 pp->tag_block = mmio + NV_NCQ_REG_MCP55 + ap->port_no * 2;
1967
1968 return 0;
1969 }
1970
1971 static void nv_swncq_qc_prep(struct ata_queued_cmd *qc)
1972 {
1973 if (qc->tf.protocol != ATA_PROT_NCQ) {
1974 ata_sff_qc_prep(qc);
1975 return;
1976 }
1977
1978 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1979 return;
1980
1981 nv_swncq_fill_sg(qc);
1982 }
1983
1984 static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
1985 {
1986 struct ata_port *ap = qc->ap;
1987 struct scatterlist *sg;
1988 struct nv_swncq_port_priv *pp = ap->private_data;
1989 struct ata_prd *prd;
1990 unsigned int si, idx;
1991
1992 prd = pp->prd + ATA_MAX_PRD * qc->tag;
1993
1994 idx = 0;
1995 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1996 u32 addr, offset;
1997 u32 sg_len, len;
1998
1999 addr = (u32)sg_dma_address(sg);
2000 sg_len = sg_dma_len(sg);
2001
2002 while (sg_len) {
2003 offset = addr & 0xffff;
2004 len = sg_len;
2005 if ((offset + sg_len) > 0x10000)
2006 len = 0x10000 - offset;
2007
2008 prd[idx].addr = cpu_to_le32(addr);
2009 prd[idx].flags_len = cpu_to_le32(len & 0xffff);
2010
2011 idx++;
2012 sg_len -= len;
2013 addr += len;
2014 }
2015 }
2016
2017 prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2018 }
2019
2020 static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap,
2021 struct ata_queued_cmd *qc)
2022 {
2023 struct nv_swncq_port_priv *pp = ap->private_data;
2024
2025 if (qc == NULL)
2026 return 0;
2027
2028 DPRINTK("Enter\n");
2029
2030 writel((1 << qc->tag), pp->sactive_block);
2031 pp->last_issue_tag = qc->tag;
2032 pp->dhfis_bits &= ~(1 << qc->tag);
2033 pp->dmafis_bits &= ~(1 << qc->tag);
2034 pp->qc_active |= (0x1 << qc->tag);
2035
2036 ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
2037 ap->ops->sff_exec_command(ap, &qc->tf);
2038
2039 DPRINTK("Issued tag %u\n", qc->tag);
2040
2041 return 0;
2042 }
2043
2044 static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc)
2045 {
2046 struct ata_port *ap = qc->ap;
2047 struct nv_swncq_port_priv *pp = ap->private_data;
2048
2049 if (qc->tf.protocol != ATA_PROT_NCQ)
2050 return ata_sff_qc_issue(qc);
2051
2052 DPRINTK("Enter\n");
2053
2054 if (!pp->qc_active)
2055 nv_swncq_issue_atacmd(ap, qc);
2056 else
2057 nv_swncq_qc_to_dq(ap, qc); /* add qc to defer queue */
2058
2059 return 0;
2060 }
2061
2062 static void nv_swncq_hotplug(struct ata_port *ap, u32 fis)
2063 {
2064 u32 serror;
2065 struct ata_eh_info *ehi = &ap->link.eh_info;
2066
2067 ata_ehi_clear_desc(ehi);
2068
2069 /* AHCI needs SError cleared; otherwise, it might lock up */
2070 sata_scr_read(&ap->link, SCR_ERROR, &serror);
2071 sata_scr_write(&ap->link, SCR_ERROR, serror);
2072
2073 /* analyze @irq_stat */
2074 if (fis & NV_SWNCQ_IRQ_ADDED)
2075 ata_ehi_push_desc(ehi, "hot plug");
2076 else if (fis & NV_SWNCQ_IRQ_REMOVED)
2077 ata_ehi_push_desc(ehi, "hot unplug");
2078
2079 ata_ehi_hotplugged(ehi);
2080
2081 /* okay, let's hand over to EH */
2082 ehi->serror |= serror;
2083
2084 ata_port_freeze(ap);
2085 }
2086
2087 static int nv_swncq_sdbfis(struct ata_port *ap)
2088 {
2089 struct ata_queued_cmd *qc;
2090 struct nv_swncq_port_priv *pp = ap->private_data;
2091 struct ata_eh_info *ehi = &ap->link.eh_info;
2092 u32 sactive;
2093 int nr_done = 0;
2094 u32 done_mask;
2095 int i;
2096 u8 host_stat;
2097 u8 lack_dhfis = 0;
2098
2099 host_stat = ap->ops->bmdma_status(ap);
2100 if (unlikely(host_stat & ATA_DMA_ERR)) {
2101 /* error when transfering data to/from memory */
2102 ata_ehi_clear_desc(ehi);
2103 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
2104 ehi->err_mask |= AC_ERR_HOST_BUS;
2105 ehi->action |= ATA_EH_RESET;
2106 return -EINVAL;
2107 }
2108
2109 ap->ops->sff_irq_clear(ap);
2110 __ata_bmdma_stop(ap);
2111
2112 sactive = readl(pp->sactive_block);
2113 done_mask = pp->qc_active ^ sactive;
2114
2115 if (unlikely(done_mask & sactive)) {
2116 ata_ehi_clear_desc(ehi);
2117 ata_ehi_push_desc(ehi, "illegal SWNCQ:qc_active transition"
2118 "(%08x->%08x)", pp->qc_active, sactive);
2119 ehi->err_mask |= AC_ERR_HSM;
2120 ehi->action |= ATA_EH_RESET;
2121 return -EINVAL;
2122 }
2123 for (i = 0; i < ATA_MAX_QUEUE; i++) {
2124 if (!(done_mask & (1 << i)))
2125 continue;
2126
2127 qc = ata_qc_from_tag(ap, i);
2128 if (qc) {
2129 ata_qc_complete(qc);
2130 pp->qc_active &= ~(1 << i);
2131 pp->dhfis_bits &= ~(1 << i);
2132 pp->dmafis_bits &= ~(1 << i);
2133 pp->sdbfis_bits |= (1 << i);
2134 nr_done++;
2135 }
2136 }
2137
2138 if (!ap->qc_active) {
2139 DPRINTK("over\n");
2140 nv_swncq_pp_reinit(ap);
2141 return nr_done;
2142 }
2143
2144 if (pp->qc_active & pp->dhfis_bits)
2145 return nr_done;
2146
2147 if ((pp->ncq_flags & ncq_saw_backout) ||
2148 (pp->qc_active ^ pp->dhfis_bits))
2149 /* if the controller cann't get a device to host register FIS,
2150 * The driver needs to reissue the new command.
2151 */
2152 lack_dhfis = 1;
2153
2154 DPRINTK("id 0x%x QC: qc_active 0x%x,"
2155 "SWNCQ:qc_active 0x%X defer_bits %X "
2156 "dhfis 0x%X dmafis 0x%X last_issue_tag %x\n",
2157 ap->print_id, ap->qc_active, pp->qc_active,
2158 pp->defer_queue.defer_bits, pp->dhfis_bits,
2159 pp->dmafis_bits, pp->last_issue_tag);
2160
2161 nv_swncq_fis_reinit(ap);
2162
2163 if (lack_dhfis) {
2164 qc = ata_qc_from_tag(ap, pp->last_issue_tag);
2165 nv_swncq_issue_atacmd(ap, qc);
2166 return nr_done;
2167 }
2168
2169 if (pp->defer_queue.defer_bits) {
2170 /* send deferral queue command */
2171 qc = nv_swncq_qc_from_dq(ap);
2172 WARN_ON(qc == NULL);
2173 nv_swncq_issue_atacmd(ap, qc);
2174 }
2175
2176 return nr_done;
2177 }
2178
2179 static inline u32 nv_swncq_tag(struct ata_port *ap)
2180 {
2181 struct nv_swncq_port_priv *pp = ap->private_data;
2182 u32 tag;
2183
2184 tag = readb(pp->tag_block) >> 2;
2185 return (tag & 0x1f);
2186 }
2187
2188 static int nv_swncq_dmafis(struct ata_port *ap)
2189 {
2190 struct ata_queued_cmd *qc;
2191 unsigned int rw;
2192 u8 dmactl;
2193 u32 tag;
2194 struct nv_swncq_port_priv *pp = ap->private_data;
2195
2196 __ata_bmdma_stop(ap);
2197 tag = nv_swncq_tag(ap);
2198
2199 DPRINTK("dma setup tag 0x%x\n", tag);
2200 qc = ata_qc_from_tag(ap, tag);
2201
2202 if (unlikely(!qc))
2203 return 0;
2204
2205 rw = qc->tf.flags & ATA_TFLAG_WRITE;
2206
2207 /* load PRD table addr. */
2208 iowrite32(pp->prd_dma + ATA_PRD_TBL_SZ * qc->tag,
2209 ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
2210
2211 /* specify data direction, triple-check start bit is clear */
2212 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2213 dmactl &= ~ATA_DMA_WR;
2214 if (!rw)
2215 dmactl |= ATA_DMA_WR;
2216
2217 iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2218
2219 return 1;
2220 }
2221
2222 static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis)
2223 {
2224 struct nv_swncq_port_priv *pp = ap->private_data;
2225 struct ata_queued_cmd *qc;
2226 struct ata_eh_info *ehi = &ap->link.eh_info;
2227 u32 serror;
2228 u8 ata_stat;
2229 int rc = 0;
2230
2231 ata_stat = ap->ops->sff_check_status(ap);
2232 nv_swncq_irq_clear(ap, fis);
2233 if (!fis)
2234 return;
2235
2236 if (ap->pflags & ATA_PFLAG_FROZEN)
2237 return;
2238
2239 if (fis & NV_SWNCQ_IRQ_HOTPLUG) {
2240 nv_swncq_hotplug(ap, fis);
2241 return;
2242 }
2243
2244 if (!pp->qc_active)
2245 return;
2246
2247 if (ap->ops->scr_read(&ap->link, SCR_ERROR, &serror))
2248 return;
2249 ap->ops->scr_write(&ap->link, SCR_ERROR, serror);
2250
2251 if (ata_stat & ATA_ERR) {
2252 ata_ehi_clear_desc(ehi);
2253 ata_ehi_push_desc(ehi, "Ata error. fis:0x%X", fis);
2254 ehi->err_mask |= AC_ERR_DEV;
2255 ehi->serror |= serror;
2256 ehi->action |= ATA_EH_RESET;
2257 ata_port_freeze(ap);
2258 return;
2259 }
2260
2261 if (fis & NV_SWNCQ_IRQ_BACKOUT) {
2262 /* If the IRQ is backout, driver must issue
2263 * the new command again some time later.
2264 */
2265 pp->ncq_flags |= ncq_saw_backout;
2266 }
2267
2268 if (fis & NV_SWNCQ_IRQ_SDBFIS) {
2269 pp->ncq_flags |= ncq_saw_sdb;
2270 DPRINTK("id 0x%x SWNCQ: qc_active 0x%X "
2271 "dhfis 0x%X dmafis 0x%X sactive 0x%X\n",
2272 ap->print_id, pp->qc_active, pp->dhfis_bits,
2273 pp->dmafis_bits, readl(pp->sactive_block));
2274 rc = nv_swncq_sdbfis(ap);
2275 if (rc < 0)
2276 goto irq_error;
2277 }
2278
2279 if (fis & NV_SWNCQ_IRQ_DHREGFIS) {
2280 /* The interrupt indicates the new command
2281 * was transmitted correctly to the drive.
2282 */
2283 pp->dhfis_bits |= (0x1 << pp->last_issue_tag);
2284 pp->ncq_flags |= ncq_saw_d2h;
2285 if (pp->ncq_flags & (ncq_saw_sdb | ncq_saw_backout)) {
2286 ata_ehi_push_desc(ehi, "illegal fis transaction");
2287 ehi->err_mask |= AC_ERR_HSM;
2288 ehi->action |= ATA_EH_RESET;
2289 goto irq_error;
2290 }
2291
2292 if (!(fis & NV_SWNCQ_IRQ_DMASETUP) &&
2293 !(pp->ncq_flags & ncq_saw_dmas)) {
2294 ata_stat = ap->ops->sff_check_status(ap);
2295 if (ata_stat & ATA_BUSY)
2296 goto irq_exit;
2297
2298 if (pp->defer_queue.defer_bits) {
2299 DPRINTK("send next command\n");
2300 qc = nv_swncq_qc_from_dq(ap);
2301 nv_swncq_issue_atacmd(ap, qc);
2302 }
2303 }
2304 }
2305
2306 if (fis & NV_SWNCQ_IRQ_DMASETUP) {
2307 /* program the dma controller with appropriate PRD buffers
2308 * and start the DMA transfer for requested command.
2309 */
2310 pp->dmafis_bits |= (0x1 << nv_swncq_tag(ap));
2311 pp->ncq_flags |= ncq_saw_dmas;
2312 rc = nv_swncq_dmafis(ap);
2313 }
2314
2315 irq_exit:
2316 return;
2317 irq_error:
2318 ata_ehi_push_desc(ehi, "fis:0x%x", fis);
2319 ata_port_freeze(ap);
2320 return;
2321 }
2322
2323 static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance)
2324 {
2325 struct ata_host *host = dev_instance;
2326 unsigned int i;
2327 unsigned int handled = 0;
2328 unsigned long flags;
2329 u32 irq_stat;
2330
2331 spin_lock_irqsave(&host->lock, flags);
2332
2333 irq_stat = readl(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_MCP55);
2334
2335 for (i = 0; i < host->n_ports; i++) {
2336 struct ata_port *ap = host->ports[i];
2337
2338 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
2339 if (ap->link.sactive) {
2340 nv_swncq_host_interrupt(ap, (u16)irq_stat);
2341 handled = 1;
2342 } else {
2343 if (irq_stat) /* reserve Hotplug */
2344 nv_swncq_irq_clear(ap, 0xfff0);
2345
2346 handled += nv_host_intr(ap, (u8)irq_stat);
2347 }
2348 }
2349 irq_stat >>= NV_INT_PORT_SHIFT_MCP55;
2350 }
2351
2352 spin_unlock_irqrestore(&host->lock, flags);
2353
2354 return IRQ_RETVAL(handled);
2355 }
2356
2357 static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2358 {
2359 static int printed_version;
2360 const struct ata_port_info *ppi[] = { NULL, NULL };
2361 struct nv_pi_priv *ipriv;
2362 struct ata_host *host;
2363 struct nv_host_priv *hpriv;
2364 int rc;
2365 u32 bar;
2366 void __iomem *base;
2367 unsigned long type = ent->driver_data;
2368
2369 // Make sure this is a SATA controller by counting the number of bars
2370 // (NVIDIA SATA controllers will always have six bars). Otherwise,
2371 // it's an IDE controller and we ignore it.
2372 for (bar = 0; bar < 6; bar++)
2373 if (pci_resource_start(pdev, bar) == 0)
2374 return -ENODEV;
2375
2376 if (!printed_version++)
2377 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
2378
2379 rc = pcim_enable_device(pdev);
2380 if (rc)
2381 return rc;
2382
2383 /* determine type and allocate host */
2384 if (type == CK804 && adma_enabled) {
2385 dev_printk(KERN_NOTICE, &pdev->dev, "Using ADMA mode\n");
2386 type = ADMA;
2387 } else if (type == MCP5x && swncq_enabled) {
2388 dev_printk(KERN_NOTICE, &pdev->dev, "Using SWNCQ mode\n");
2389 type = SWNCQ;
2390 }
2391
2392 ppi[0] = &nv_port_info[type];
2393 ipriv = ppi[0]->private_data;
2394 rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
2395 if (rc)
2396 return rc;
2397
2398 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2399 if (!hpriv)
2400 return -ENOMEM;
2401 hpriv->type = type;
2402 host->private_data = hpriv;
2403
2404 /* request and iomap NV_MMIO_BAR */
2405 rc = pcim_iomap_regions(pdev, 1 << NV_MMIO_BAR, DRV_NAME);
2406 if (rc)
2407 return rc;
2408
2409 /* configure SCR access */
2410 base = host->iomap[NV_MMIO_BAR];
2411 host->ports[0]->ioaddr.scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
2412 host->ports[1]->ioaddr.scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
2413
2414 /* enable SATA space for CK804 */
2415 if (type >= CK804) {
2416 u8 regval;
2417
2418 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2419 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2420 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2421 }
2422
2423 /* init ADMA */
2424 if (type == ADMA) {
2425 rc = nv_adma_host_init(host);
2426 if (rc)
2427 return rc;
2428 } else if (type == SWNCQ)
2429 nv_swncq_host_init(host);
2430
2431 pci_set_master(pdev);
2432 return ata_host_activate(host, pdev->irq, ipriv->irq_handler,
2433 IRQF_SHARED, ipriv->sht);
2434 }
2435
2436 #ifdef CONFIG_PM
2437 static int nv_pci_device_resume(struct pci_dev *pdev)
2438 {
2439 struct ata_host *host = dev_get_drvdata(&pdev->dev);
2440 struct nv_host_priv *hpriv = host->private_data;
2441 int rc;
2442
2443 rc = ata_pci_device_do_resume(pdev);
2444 if (rc)
2445 return rc;
2446
2447 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2448 if (hpriv->type >= CK804) {
2449 u8 regval;
2450
2451 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2452 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2453 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2454 }
2455 if (hpriv->type == ADMA) {
2456 u32 tmp32;
2457 struct nv_adma_port_priv *pp;
2458 /* enable/disable ADMA on the ports appropriately */
2459 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2460
2461 pp = host->ports[0]->private_data;
2462 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2463 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2464 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2465 else
2466 tmp32 |= (NV_MCP_SATA_CFG_20_PORT0_EN |
2467 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2468 pp = host->ports[1]->private_data;
2469 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2470 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
2471 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2472 else
2473 tmp32 |= (NV_MCP_SATA_CFG_20_PORT1_EN |
2474 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2475
2476 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2477 }
2478 }
2479
2480 ata_host_resume(host);
2481
2482 return 0;
2483 }
2484 #endif
2485
2486 static void nv_ck804_host_stop(struct ata_host *host)
2487 {
2488 struct pci_dev *pdev = to_pci_dev(host->dev);
2489 u8 regval;
2490
2491 /* disable SATA space for CK804 */
2492 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2493 regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2494 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2495 }
2496
2497 static void nv_adma_host_stop(struct ata_host *host)
2498 {
2499 struct pci_dev *pdev = to_pci_dev(host->dev);
2500 u32 tmp32;
2501
2502 /* disable ADMA on the ports */
2503 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2504 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2505 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
2506 NV_MCP_SATA_CFG_20_PORT1_EN |
2507 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2508
2509 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2510
2511 nv_ck804_host_stop(host);
2512 }
2513
2514 static int __init nv_init(void)
2515 {
2516 return pci_register_driver(&nv_pci_driver);
2517 }
2518
2519 static void __exit nv_exit(void)
2520 {
2521 pci_unregister_driver(&nv_pci_driver);
2522 }
2523
2524 module_init(nv_init);
2525 module_exit(nv_exit);
2526 module_param_named(adma, adma_enabled, bool, 0444);
2527 MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: false)");
2528 module_param_named(swncq, swncq_enabled, bool, 0444);
2529 MODULE_PARM_DESC(swncq, "Enable use of SWNCQ (Default: true)");
2530
This page took 0.083644 seconds and 5 git commands to generate.