2 * sata_nv.c - NVIDIA nForce SATA
4 * Copyright 2004 NVIDIA Corp. All rights reserved.
5 * Copyright 2004 Andrew Chew
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 * libata documentation is available via 'make {ps|pdf}docs',
24 * as Documentation/DocBook/libata.*
26 * No hardware documentation available outside of NVIDIA.
27 * This driver programs the NVIDIA SATA controller in a similar
28 * fashion as with other PCI IDE BMDMA controllers, with a few
29 * NV-specific details such as register offsets, SATA phy location,
32 * CK804/MCP04 controllers support an alternate programming interface
33 * similar to the ADMA specification (with some modifications).
34 * This allows the use of NCQ. Non-DMA-mapped ATA commands are still
35 * sent through the legacy interface.
39 #include <linux/kernel.h>
40 #include <linux/module.h>
41 #include <linux/pci.h>
42 #include <linux/init.h>
43 #include <linux/blkdev.h>
44 #include <linux/delay.h>
45 #include <linux/interrupt.h>
46 #include <linux/device.h>
47 #include <scsi/scsi_host.h>
48 #include <scsi/scsi_device.h>
49 #include <linux/libata.h>
51 #define DRV_NAME "sata_nv"
52 #define DRV_VERSION "3.3"
54 #define NV_ADMA_DMA_BOUNDARY 0xffffffffUL
63 NV_PORT0_SCR_REG_OFFSET
= 0x00,
64 NV_PORT1_SCR_REG_OFFSET
= 0x40,
66 /* INT_STATUS/ENABLE */
69 NV_INT_STATUS_CK804
= 0x440,
70 NV_INT_ENABLE_CK804
= 0x441,
72 /* INT_STATUS/ENABLE bits */
76 NV_INT_REMOVED
= 0x08,
78 NV_INT_PORT_SHIFT
= 4, /* each port occupies 4 bits */
81 NV_INT_MASK
= NV_INT_DEV
|
82 NV_INT_ADDED
| NV_INT_REMOVED
,
86 NV_INT_CONFIG_METHD
= 0x01, // 0 = INT, 1 = SMI
88 // For PCI config register 20
89 NV_MCP_SATA_CFG_20
= 0x50,
90 NV_MCP_SATA_CFG_20_SATA_SPACE_EN
= 0x04,
91 NV_MCP_SATA_CFG_20_PORT0_EN
= (1 << 17),
92 NV_MCP_SATA_CFG_20_PORT1_EN
= (1 << 16),
93 NV_MCP_SATA_CFG_20_PORT0_PWB_EN
= (1 << 14),
94 NV_MCP_SATA_CFG_20_PORT1_PWB_EN
= (1 << 12),
96 NV_ADMA_MAX_CPBS
= 32,
99 NV_ADMA_SGTBL_LEN
= (1024 - NV_ADMA_CPB_SZ
) /
101 NV_ADMA_SGTBL_TOTAL_LEN
= NV_ADMA_SGTBL_LEN
+ 5,
102 NV_ADMA_SGTBL_SZ
= NV_ADMA_SGTBL_LEN
* NV_ADMA_APRD_SZ
,
103 NV_ADMA_PORT_PRIV_DMA_SZ
= NV_ADMA_MAX_CPBS
*
104 (NV_ADMA_CPB_SZ
+ NV_ADMA_SGTBL_SZ
),
106 /* BAR5 offset to ADMA general registers */
108 NV_ADMA_GEN_CTL
= 0x00,
109 NV_ADMA_NOTIFIER_CLEAR
= 0x30,
111 /* BAR5 offset to ADMA ports */
112 NV_ADMA_PORT
= 0x480,
114 /* size of ADMA port register space */
115 NV_ADMA_PORT_SIZE
= 0x100,
117 /* ADMA port registers */
119 NV_ADMA_CPB_COUNT
= 0x42,
120 NV_ADMA_NEXT_CPB_IDX
= 0x43,
122 NV_ADMA_CPB_BASE_LOW
= 0x48,
123 NV_ADMA_CPB_BASE_HIGH
= 0x4C,
124 NV_ADMA_APPEND
= 0x50,
125 NV_ADMA_NOTIFIER
= 0x68,
126 NV_ADMA_NOTIFIER_ERROR
= 0x6C,
128 /* NV_ADMA_CTL register bits */
129 NV_ADMA_CTL_HOTPLUG_IEN
= (1 << 0),
130 NV_ADMA_CTL_CHANNEL_RESET
= (1 << 5),
131 NV_ADMA_CTL_GO
= (1 << 7),
132 NV_ADMA_CTL_AIEN
= (1 << 8),
133 NV_ADMA_CTL_READ_NON_COHERENT
= (1 << 11),
134 NV_ADMA_CTL_WRITE_NON_COHERENT
= (1 << 12),
136 /* CPB response flag bits */
137 NV_CPB_RESP_DONE
= (1 << 0),
138 NV_CPB_RESP_ATA_ERR
= (1 << 3),
139 NV_CPB_RESP_CMD_ERR
= (1 << 4),
140 NV_CPB_RESP_CPB_ERR
= (1 << 7),
142 /* CPB control flag bits */
143 NV_CPB_CTL_CPB_VALID
= (1 << 0),
144 NV_CPB_CTL_QUEUE
= (1 << 1),
145 NV_CPB_CTL_APRD_VALID
= (1 << 2),
146 NV_CPB_CTL_IEN
= (1 << 3),
147 NV_CPB_CTL_FPDMA
= (1 << 4),
150 NV_APRD_WRITE
= (1 << 1),
151 NV_APRD_END
= (1 << 2),
152 NV_APRD_CONT
= (1 << 3),
154 /* NV_ADMA_STAT flags */
155 NV_ADMA_STAT_TIMEOUT
= (1 << 0),
156 NV_ADMA_STAT_HOTUNPLUG
= (1 << 1),
157 NV_ADMA_STAT_HOTPLUG
= (1 << 2),
158 NV_ADMA_STAT_CPBERR
= (1 << 4),
159 NV_ADMA_STAT_SERROR
= (1 << 5),
160 NV_ADMA_STAT_CMD_COMPLETE
= (1 << 6),
161 NV_ADMA_STAT_IDLE
= (1 << 8),
162 NV_ADMA_STAT_LEGACY
= (1 << 9),
163 NV_ADMA_STAT_STOPPED
= (1 << 10),
164 NV_ADMA_STAT_DONE
= (1 << 12),
165 NV_ADMA_STAT_ERR
= NV_ADMA_STAT_CPBERR
|
166 NV_ADMA_STAT_TIMEOUT
,
169 NV_ADMA_PORT_REGISTER_MODE
= (1 << 0),
170 NV_ADMA_ATAPI_SETUP_COMPLETE
= (1 << 1),
174 /* ADMA Physical Region Descriptor - one SG segment */
183 enum nv_adma_regbits
{
184 CMDEND
= (1 << 15), /* end of command list */
185 WNB
= (1 << 14), /* wait-not-BSY */
186 IGN
= (1 << 13), /* ignore this entry */
187 CS1n
= (1 << (4 + 8)), /* std. PATA signals follow... */
188 DA2
= (1 << (2 + 8)),
189 DA1
= (1 << (1 + 8)),
190 DA0
= (1 << (0 + 8)),
193 /* ADMA Command Parameter Block
194 The first 5 SG segments are stored inside the Command Parameter Block itself.
195 If there are more than 5 segments the remainder are stored in a separate
196 memory area indicated by next_aprd. */
198 u8 resp_flags
; /* 0 */
199 u8 reserved1
; /* 1 */
200 u8 ctl_flags
; /* 2 */
201 /* len is length of taskfile in 64 bit words */
204 u8 next_cpb_idx
; /* 5 */
205 __le16 reserved2
; /* 6-7 */
206 __le16 tf
[12]; /* 8-31 */
207 struct nv_adma_prd aprd
[5]; /* 32-111 */
208 __le64 next_aprd
; /* 112-119 */
209 __le64 reserved3
; /* 120-127 */
213 struct nv_adma_port_priv
{
214 struct nv_adma_cpb
*cpb
;
216 struct nv_adma_prd
*aprd
;
218 void __iomem
* ctl_block
;
219 void __iomem
* gen_block
;
220 void __iomem
* notifier_clear_block
;
225 struct nv_host_priv
{
229 #define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & ( 1 << (19 + (12 * (PORT)))))
231 static int nv_init_one (struct pci_dev
*pdev
, const struct pci_device_id
*ent
);
232 static void nv_remove_one (struct pci_dev
*pdev
);
234 static int nv_pci_device_resume(struct pci_dev
*pdev
);
236 static void nv_ck804_host_stop(struct ata_host
*host
);
237 static irqreturn_t
nv_generic_interrupt(int irq
, void *dev_instance
);
238 static irqreturn_t
nv_nf2_interrupt(int irq
, void *dev_instance
);
239 static irqreturn_t
nv_ck804_interrupt(int irq
, void *dev_instance
);
240 static u32
nv_scr_read (struct ata_port
*ap
, unsigned int sc_reg
);
241 static void nv_scr_write (struct ata_port
*ap
, unsigned int sc_reg
, u32 val
);
243 static void nv_nf2_freeze(struct ata_port
*ap
);
244 static void nv_nf2_thaw(struct ata_port
*ap
);
245 static void nv_ck804_freeze(struct ata_port
*ap
);
246 static void nv_ck804_thaw(struct ata_port
*ap
);
247 static void nv_error_handler(struct ata_port
*ap
);
248 static int nv_adma_slave_config(struct scsi_device
*sdev
);
249 static int nv_adma_check_atapi_dma(struct ata_queued_cmd
*qc
);
250 static void nv_adma_qc_prep(struct ata_queued_cmd
*qc
);
251 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd
*qc
);
252 static irqreturn_t
nv_adma_interrupt(int irq
, void *dev_instance
);
253 static void nv_adma_irq_clear(struct ata_port
*ap
);
254 static int nv_adma_port_start(struct ata_port
*ap
);
255 static void nv_adma_port_stop(struct ata_port
*ap
);
257 static int nv_adma_port_suspend(struct ata_port
*ap
, pm_message_t mesg
);
258 static int nv_adma_port_resume(struct ata_port
*ap
);
260 static void nv_adma_error_handler(struct ata_port
*ap
);
261 static void nv_adma_host_stop(struct ata_host
*host
);
262 static void nv_adma_post_internal_cmd(struct ata_queued_cmd
*qc
);
263 static void nv_adma_tf_read(struct ata_port
*ap
, struct ata_taskfile
*tf
);
269 NFORCE3
= NFORCE2
, /* NF2 == NF3 as far as sata_nv is concerned */
274 static const struct pci_device_id nv_pci_tbl
[] = {
275 { PCI_VDEVICE(NVIDIA
, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA
), NFORCE2
},
276 { PCI_VDEVICE(NVIDIA
, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA
), NFORCE3
},
277 { PCI_VDEVICE(NVIDIA
, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2
), NFORCE3
},
278 { PCI_VDEVICE(NVIDIA
, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA
), CK804
},
279 { PCI_VDEVICE(NVIDIA
, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2
), CK804
},
280 { PCI_VDEVICE(NVIDIA
, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA
), CK804
},
281 { PCI_VDEVICE(NVIDIA
, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2
), CK804
},
282 { PCI_VDEVICE(NVIDIA
, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA
), GENERIC
},
283 { PCI_VDEVICE(NVIDIA
, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2
), GENERIC
},
284 { PCI_VDEVICE(NVIDIA
, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA
), GENERIC
},
285 { PCI_VDEVICE(NVIDIA
, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2
), GENERIC
},
286 { PCI_VDEVICE(NVIDIA
, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA
), GENERIC
},
287 { PCI_VDEVICE(NVIDIA
, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2
), GENERIC
},
288 { PCI_VDEVICE(NVIDIA
, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3
), GENERIC
},
289 { PCI_VENDOR_ID_NVIDIA
, PCI_ANY_ID
,
290 PCI_ANY_ID
, PCI_ANY_ID
,
291 PCI_CLASS_STORAGE_IDE
<<8, 0xffff00, GENERIC
},
292 { PCI_VENDOR_ID_NVIDIA
, PCI_ANY_ID
,
293 PCI_ANY_ID
, PCI_ANY_ID
,
294 PCI_CLASS_STORAGE_RAID
<<8, 0xffff00, GENERIC
},
296 { } /* terminate list */
299 static struct pci_driver nv_pci_driver
= {
301 .id_table
= nv_pci_tbl
,
302 .probe
= nv_init_one
,
304 .suspend
= ata_pci_device_suspend
,
305 .resume
= nv_pci_device_resume
,
307 .remove
= nv_remove_one
,
310 static struct scsi_host_template nv_sht
= {
311 .module
= THIS_MODULE
,
313 .ioctl
= ata_scsi_ioctl
,
314 .queuecommand
= ata_scsi_queuecmd
,
315 .can_queue
= ATA_DEF_QUEUE
,
316 .this_id
= ATA_SHT_THIS_ID
,
317 .sg_tablesize
= LIBATA_MAX_PRD
,
318 .cmd_per_lun
= ATA_SHT_CMD_PER_LUN
,
319 .emulated
= ATA_SHT_EMULATED
,
320 .use_clustering
= ATA_SHT_USE_CLUSTERING
,
321 .proc_name
= DRV_NAME
,
322 .dma_boundary
= ATA_DMA_BOUNDARY
,
323 .slave_configure
= ata_scsi_slave_config
,
324 .slave_destroy
= ata_scsi_slave_destroy
,
325 .bios_param
= ata_std_bios_param
,
327 .suspend
= ata_scsi_device_suspend
,
328 .resume
= ata_scsi_device_resume
,
332 static struct scsi_host_template nv_adma_sht
= {
333 .module
= THIS_MODULE
,
335 .ioctl
= ata_scsi_ioctl
,
336 .queuecommand
= ata_scsi_queuecmd
,
337 .can_queue
= NV_ADMA_MAX_CPBS
,
338 .this_id
= ATA_SHT_THIS_ID
,
339 .sg_tablesize
= NV_ADMA_SGTBL_TOTAL_LEN
,
340 .cmd_per_lun
= ATA_SHT_CMD_PER_LUN
,
341 .emulated
= ATA_SHT_EMULATED
,
342 .use_clustering
= ATA_SHT_USE_CLUSTERING
,
343 .proc_name
= DRV_NAME
,
344 .dma_boundary
= NV_ADMA_DMA_BOUNDARY
,
345 .slave_configure
= nv_adma_slave_config
,
346 .slave_destroy
= ata_scsi_slave_destroy
,
347 .bios_param
= ata_std_bios_param
,
349 .suspend
= ata_scsi_device_suspend
,
350 .resume
= ata_scsi_device_resume
,
354 static const struct ata_port_operations nv_generic_ops
= {
355 .port_disable
= ata_port_disable
,
356 .tf_load
= ata_tf_load
,
357 .tf_read
= ata_tf_read
,
358 .exec_command
= ata_exec_command
,
359 .check_status
= ata_check_status
,
360 .dev_select
= ata_std_dev_select
,
361 .bmdma_setup
= ata_bmdma_setup
,
362 .bmdma_start
= ata_bmdma_start
,
363 .bmdma_stop
= ata_bmdma_stop
,
364 .bmdma_status
= ata_bmdma_status
,
365 .qc_prep
= ata_qc_prep
,
366 .qc_issue
= ata_qc_issue_prot
,
367 .freeze
= ata_bmdma_freeze
,
368 .thaw
= ata_bmdma_thaw
,
369 .error_handler
= nv_error_handler
,
370 .post_internal_cmd
= ata_bmdma_post_internal_cmd
,
371 .data_xfer
= ata_data_xfer
,
372 .irq_clear
= ata_bmdma_irq_clear
,
373 .irq_on
= ata_irq_on
,
374 .irq_ack
= ata_irq_ack
,
375 .scr_read
= nv_scr_read
,
376 .scr_write
= nv_scr_write
,
377 .port_start
= ata_port_start
,
380 static const struct ata_port_operations nv_nf2_ops
= {
381 .port_disable
= ata_port_disable
,
382 .tf_load
= ata_tf_load
,
383 .tf_read
= ata_tf_read
,
384 .exec_command
= ata_exec_command
,
385 .check_status
= ata_check_status
,
386 .dev_select
= ata_std_dev_select
,
387 .bmdma_setup
= ata_bmdma_setup
,
388 .bmdma_start
= ata_bmdma_start
,
389 .bmdma_stop
= ata_bmdma_stop
,
390 .bmdma_status
= ata_bmdma_status
,
391 .qc_prep
= ata_qc_prep
,
392 .qc_issue
= ata_qc_issue_prot
,
393 .freeze
= nv_nf2_freeze
,
395 .error_handler
= nv_error_handler
,
396 .post_internal_cmd
= ata_bmdma_post_internal_cmd
,
397 .data_xfer
= ata_data_xfer
,
398 .irq_clear
= ata_bmdma_irq_clear
,
399 .irq_on
= ata_irq_on
,
400 .irq_ack
= ata_irq_ack
,
401 .scr_read
= nv_scr_read
,
402 .scr_write
= nv_scr_write
,
403 .port_start
= ata_port_start
,
406 static const struct ata_port_operations nv_ck804_ops
= {
407 .port_disable
= ata_port_disable
,
408 .tf_load
= ata_tf_load
,
409 .tf_read
= ata_tf_read
,
410 .exec_command
= ata_exec_command
,
411 .check_status
= ata_check_status
,
412 .dev_select
= ata_std_dev_select
,
413 .bmdma_setup
= ata_bmdma_setup
,
414 .bmdma_start
= ata_bmdma_start
,
415 .bmdma_stop
= ata_bmdma_stop
,
416 .bmdma_status
= ata_bmdma_status
,
417 .qc_prep
= ata_qc_prep
,
418 .qc_issue
= ata_qc_issue_prot
,
419 .freeze
= nv_ck804_freeze
,
420 .thaw
= nv_ck804_thaw
,
421 .error_handler
= nv_error_handler
,
422 .post_internal_cmd
= ata_bmdma_post_internal_cmd
,
423 .data_xfer
= ata_data_xfer
,
424 .irq_clear
= ata_bmdma_irq_clear
,
425 .irq_on
= ata_irq_on
,
426 .irq_ack
= ata_irq_ack
,
427 .scr_read
= nv_scr_read
,
428 .scr_write
= nv_scr_write
,
429 .port_start
= ata_port_start
,
430 .host_stop
= nv_ck804_host_stop
,
433 static const struct ata_port_operations nv_adma_ops
= {
434 .port_disable
= ata_port_disable
,
435 .tf_load
= ata_tf_load
,
436 .tf_read
= nv_adma_tf_read
,
437 .check_atapi_dma
= nv_adma_check_atapi_dma
,
438 .exec_command
= ata_exec_command
,
439 .check_status
= ata_check_status
,
440 .dev_select
= ata_std_dev_select
,
441 .bmdma_setup
= ata_bmdma_setup
,
442 .bmdma_start
= ata_bmdma_start
,
443 .bmdma_stop
= ata_bmdma_stop
,
444 .bmdma_status
= ata_bmdma_status
,
445 .qc_prep
= nv_adma_qc_prep
,
446 .qc_issue
= nv_adma_qc_issue
,
447 .freeze
= nv_ck804_freeze
,
448 .thaw
= nv_ck804_thaw
,
449 .error_handler
= nv_adma_error_handler
,
450 .post_internal_cmd
= nv_adma_post_internal_cmd
,
451 .data_xfer
= ata_data_xfer
,
452 .irq_clear
= nv_adma_irq_clear
,
453 .irq_on
= ata_irq_on
,
454 .irq_ack
= ata_irq_ack
,
455 .scr_read
= nv_scr_read
,
456 .scr_write
= nv_scr_write
,
457 .port_start
= nv_adma_port_start
,
458 .port_stop
= nv_adma_port_stop
,
460 .port_suspend
= nv_adma_port_suspend
,
461 .port_resume
= nv_adma_port_resume
,
463 .host_stop
= nv_adma_host_stop
,
466 static struct ata_port_info nv_port_info
[] = {
470 .flags
= ATA_FLAG_SATA
| ATA_FLAG_NO_LEGACY
|
471 ATA_FLAG_HRST_TO_RESUME
,
472 .pio_mask
= NV_PIO_MASK
,
473 .mwdma_mask
= NV_MWDMA_MASK
,
474 .udma_mask
= NV_UDMA_MASK
,
475 .port_ops
= &nv_generic_ops
,
476 .irq_handler
= nv_generic_interrupt
,
481 .flags
= ATA_FLAG_SATA
| ATA_FLAG_NO_LEGACY
|
482 ATA_FLAG_HRST_TO_RESUME
,
483 .pio_mask
= NV_PIO_MASK
,
484 .mwdma_mask
= NV_MWDMA_MASK
,
485 .udma_mask
= NV_UDMA_MASK
,
486 .port_ops
= &nv_nf2_ops
,
487 .irq_handler
= nv_nf2_interrupt
,
492 .flags
= ATA_FLAG_SATA
| ATA_FLAG_NO_LEGACY
|
493 ATA_FLAG_HRST_TO_RESUME
,
494 .pio_mask
= NV_PIO_MASK
,
495 .mwdma_mask
= NV_MWDMA_MASK
,
496 .udma_mask
= NV_UDMA_MASK
,
497 .port_ops
= &nv_ck804_ops
,
498 .irq_handler
= nv_ck804_interrupt
,
503 .flags
= ATA_FLAG_SATA
| ATA_FLAG_NO_LEGACY
|
504 ATA_FLAG_HRST_TO_RESUME
|
505 ATA_FLAG_MMIO
| ATA_FLAG_NCQ
,
506 .pio_mask
= NV_PIO_MASK
,
507 .mwdma_mask
= NV_MWDMA_MASK
,
508 .udma_mask
= NV_UDMA_MASK
,
509 .port_ops
= &nv_adma_ops
,
510 .irq_handler
= nv_adma_interrupt
,
514 MODULE_AUTHOR("NVIDIA");
515 MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
516 MODULE_LICENSE("GPL");
517 MODULE_DEVICE_TABLE(pci
, nv_pci_tbl
);
518 MODULE_VERSION(DRV_VERSION
);
520 static int adma_enabled
= 1;
522 static void nv_adma_register_mode(struct ata_port
*ap
)
524 struct nv_adma_port_priv
*pp
= ap
->private_data
;
525 void __iomem
*mmio
= pp
->ctl_block
;
529 if (pp
->flags
& NV_ADMA_PORT_REGISTER_MODE
)
532 status
= readw(mmio
+ NV_ADMA_STAT
);
533 while(!(status
& NV_ADMA_STAT_IDLE
) && count
< 20) {
535 status
= readw(mmio
+ NV_ADMA_STAT
);
539 ata_port_printk(ap
, KERN_WARNING
,
540 "timeout waiting for ADMA IDLE, stat=0x%hx\n",
543 tmp
= readw(mmio
+ NV_ADMA_CTL
);
544 writew(tmp
& ~NV_ADMA_CTL_GO
, mmio
+ NV_ADMA_CTL
);
547 status
= readw(mmio
+ NV_ADMA_STAT
);
548 while(!(status
& NV_ADMA_STAT_LEGACY
) && count
< 20) {
550 status
= readw(mmio
+ NV_ADMA_STAT
);
554 ata_port_printk(ap
, KERN_WARNING
,
555 "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
558 pp
->flags
|= NV_ADMA_PORT_REGISTER_MODE
;
561 static void nv_adma_mode(struct ata_port
*ap
)
563 struct nv_adma_port_priv
*pp
= ap
->private_data
;
564 void __iomem
*mmio
= pp
->ctl_block
;
568 if (!(pp
->flags
& NV_ADMA_PORT_REGISTER_MODE
))
571 WARN_ON(pp
->flags
& NV_ADMA_ATAPI_SETUP_COMPLETE
);
573 tmp
= readw(mmio
+ NV_ADMA_CTL
);
574 writew(tmp
| NV_ADMA_CTL_GO
, mmio
+ NV_ADMA_CTL
);
576 status
= readw(mmio
+ NV_ADMA_STAT
);
577 while(((status
& NV_ADMA_STAT_LEGACY
) ||
578 !(status
& NV_ADMA_STAT_IDLE
)) && count
< 20) {
580 status
= readw(mmio
+ NV_ADMA_STAT
);
584 ata_port_printk(ap
, KERN_WARNING
,
585 "timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
588 pp
->flags
&= ~NV_ADMA_PORT_REGISTER_MODE
;
591 static int nv_adma_slave_config(struct scsi_device
*sdev
)
593 struct ata_port
*ap
= ata_shost_to_port(sdev
->host
);
594 struct nv_adma_port_priv
*pp
= ap
->private_data
;
595 struct pci_dev
*pdev
= to_pci_dev(ap
->host
->dev
);
597 unsigned long segment_boundary
;
598 unsigned short sg_tablesize
;
601 u32 current_reg
, new_reg
, config_mask
;
603 rc
= ata_scsi_slave_config(sdev
);
605 if (sdev
->id
>= ATA_MAX_DEVICES
|| sdev
->channel
|| sdev
->lun
)
606 /* Not a proper libata device, ignore */
609 if (ap
->device
[sdev
->id
].class == ATA_DEV_ATAPI
) {
611 * NVIDIA reports that ADMA mode does not support ATAPI commands.
612 * Therefore ATAPI commands are sent through the legacy interface.
613 * However, the legacy interface only supports 32-bit DMA.
614 * Restrict DMA parameters as required by the legacy interface
615 * when an ATAPI device is connected.
617 bounce_limit
= ATA_DMA_MASK
;
618 segment_boundary
= ATA_DMA_BOUNDARY
;
619 /* Subtract 1 since an extra entry may be needed for padding, see
621 sg_tablesize
= LIBATA_MAX_PRD
- 1;
623 /* Since the legacy DMA engine is in use, we need to disable ADMA
626 nv_adma_register_mode(ap
);
629 bounce_limit
= *ap
->dev
->dma_mask
;
630 segment_boundary
= NV_ADMA_DMA_BOUNDARY
;
631 sg_tablesize
= NV_ADMA_SGTBL_TOTAL_LEN
;
635 pci_read_config_dword(pdev
, NV_MCP_SATA_CFG_20
, ¤t_reg
);
638 config_mask
= NV_MCP_SATA_CFG_20_PORT1_EN
|
639 NV_MCP_SATA_CFG_20_PORT1_PWB_EN
;
641 config_mask
= NV_MCP_SATA_CFG_20_PORT0_EN
|
642 NV_MCP_SATA_CFG_20_PORT0_PWB_EN
;
645 new_reg
= current_reg
| config_mask
;
646 pp
->flags
&= ~NV_ADMA_ATAPI_SETUP_COMPLETE
;
649 new_reg
= current_reg
& ~config_mask
;
650 pp
->flags
|= NV_ADMA_ATAPI_SETUP_COMPLETE
;
653 if(current_reg
!= new_reg
)
654 pci_write_config_dword(pdev
, NV_MCP_SATA_CFG_20
, new_reg
);
656 blk_queue_bounce_limit(sdev
->request_queue
, bounce_limit
);
657 blk_queue_segment_boundary(sdev
->request_queue
, segment_boundary
);
658 blk_queue_max_hw_segments(sdev
->request_queue
, sg_tablesize
);
659 ata_port_printk(ap
, KERN_INFO
,
660 "bounce limit 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
661 (unsigned long long)bounce_limit
, segment_boundary
, sg_tablesize
);
665 static int nv_adma_check_atapi_dma(struct ata_queued_cmd
*qc
)
667 struct nv_adma_port_priv
*pp
= qc
->ap
->private_data
;
668 return !(pp
->flags
& NV_ADMA_ATAPI_SETUP_COMPLETE
);
671 static void nv_adma_tf_read(struct ata_port
*ap
, struct ata_taskfile
*tf
)
673 /* Since commands where a result TF is requested are not
674 executed in ADMA mode, the only time this function will be called
675 in ADMA mode will be if a command fails. In this case we
676 don't care about going into register mode with ADMA commands
677 pending, as the commands will all shortly be aborted anyway. */
678 nv_adma_register_mode(ap
);
683 static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile
*tf
, __le16
*cpb
)
685 unsigned int idx
= 0;
687 if(tf
->flags
& ATA_TFLAG_ISADDR
) {
688 if (tf
->flags
& ATA_TFLAG_LBA48
) {
689 cpb
[idx
++] = cpu_to_le16((ATA_REG_ERR
<< 8) | tf
->hob_feature
| WNB
);
690 cpb
[idx
++] = cpu_to_le16((ATA_REG_NSECT
<< 8) | tf
->hob_nsect
);
691 cpb
[idx
++] = cpu_to_le16((ATA_REG_LBAL
<< 8) | tf
->hob_lbal
);
692 cpb
[idx
++] = cpu_to_le16((ATA_REG_LBAM
<< 8) | tf
->hob_lbam
);
693 cpb
[idx
++] = cpu_to_le16((ATA_REG_LBAH
<< 8) | tf
->hob_lbah
);
694 cpb
[idx
++] = cpu_to_le16((ATA_REG_ERR
<< 8) | tf
->feature
);
696 cpb
[idx
++] = cpu_to_le16((ATA_REG_ERR
<< 8) | tf
->feature
| WNB
);
698 cpb
[idx
++] = cpu_to_le16((ATA_REG_NSECT
<< 8) | tf
->nsect
);
699 cpb
[idx
++] = cpu_to_le16((ATA_REG_LBAL
<< 8) | tf
->lbal
);
700 cpb
[idx
++] = cpu_to_le16((ATA_REG_LBAM
<< 8) | tf
->lbam
);
701 cpb
[idx
++] = cpu_to_le16((ATA_REG_LBAH
<< 8) | tf
->lbah
);
704 if(tf
->flags
& ATA_TFLAG_DEVICE
)
705 cpb
[idx
++] = cpu_to_le16((ATA_REG_DEVICE
<< 8) | tf
->device
);
707 cpb
[idx
++] = cpu_to_le16((ATA_REG_CMD
<< 8) | tf
->command
| CMDEND
);
710 cpb
[idx
++] = cpu_to_le16(IGN
);
715 static int nv_adma_check_cpb(struct ata_port
*ap
, int cpb_num
, int force_err
)
717 struct nv_adma_port_priv
*pp
= ap
->private_data
;
718 u8 flags
= pp
->cpb
[cpb_num
].resp_flags
;
720 VPRINTK("CPB %d, flags=0x%x\n", cpb_num
, flags
);
722 if (unlikely((force_err
||
723 flags
& (NV_CPB_RESP_ATA_ERR
|
724 NV_CPB_RESP_CMD_ERR
|
725 NV_CPB_RESP_CPB_ERR
)))) {
726 struct ata_eh_info
*ehi
= &ap
->eh_info
;
729 ata_ehi_clear_desc(ehi
);
730 ata_ehi_push_desc(ehi
, "CPB resp_flags 0x%x", flags
);
731 if (flags
& NV_CPB_RESP_ATA_ERR
) {
732 ata_ehi_push_desc(ehi
, ": ATA error");
733 ehi
->err_mask
|= AC_ERR_DEV
;
734 } else if (flags
& NV_CPB_RESP_CMD_ERR
) {
735 ata_ehi_push_desc(ehi
, ": CMD error");
736 ehi
->err_mask
|= AC_ERR_DEV
;
737 } else if (flags
& NV_CPB_RESP_CPB_ERR
) {
738 ata_ehi_push_desc(ehi
, ": CPB error");
739 ehi
->err_mask
|= AC_ERR_SYSTEM
;
742 /* notifier error, but no error in CPB flags? */
743 ehi
->err_mask
|= AC_ERR_OTHER
;
746 /* Kill all commands. EH will determine what actually failed. */
754 if (likely(flags
& NV_CPB_RESP_DONE
)) {
755 struct ata_queued_cmd
*qc
= ata_qc_from_tag(ap
, cpb_num
);
756 VPRINTK("CPB flags done, flags=0x%x\n", flags
);
758 DPRINTK("Completing qc from tag %d\n",cpb_num
);
761 struct ata_eh_info
*ehi
= &ap
->eh_info
;
762 /* Notifier bits set without a command may indicate the drive
763 is misbehaving. Raise host state machine violation on this
765 ata_port_printk(ap
, KERN_ERR
, "notifier for tag %d with no command?\n",
767 ehi
->err_mask
|= AC_ERR_HSM
;
768 ehi
->action
|= ATA_EH_SOFTRESET
;
776 static int nv_host_intr(struct ata_port
*ap
, u8 irq_stat
)
778 struct ata_queued_cmd
*qc
= ata_qc_from_tag(ap
, ap
->active_tag
);
780 /* freeze if hotplugged */
781 if (unlikely(irq_stat
& (NV_INT_ADDED
| NV_INT_REMOVED
))) {
786 /* bail out if not our interrupt */
787 if (!(irq_stat
& NV_INT_DEV
))
790 /* DEV interrupt w/ no active qc? */
791 if (unlikely(!qc
|| (qc
->tf
.flags
& ATA_TFLAG_POLLING
))) {
792 ata_check_status(ap
);
796 /* handle interrupt */
797 return ata_host_intr(ap
, qc
);
800 static irqreturn_t
nv_adma_interrupt(int irq
, void *dev_instance
)
802 struct ata_host
*host
= dev_instance
;
804 u32 notifier_clears
[2];
806 spin_lock(&host
->lock
);
808 for (i
= 0; i
< host
->n_ports
; i
++) {
809 struct ata_port
*ap
= host
->ports
[i
];
810 notifier_clears
[i
] = 0;
812 if (ap
&& !(ap
->flags
& ATA_FLAG_DISABLED
)) {
813 struct nv_adma_port_priv
*pp
= ap
->private_data
;
814 void __iomem
*mmio
= pp
->ctl_block
;
817 u32 notifier
, notifier_error
;
819 /* if in ATA register mode, use standard ata interrupt handler */
820 if (pp
->flags
& NV_ADMA_PORT_REGISTER_MODE
) {
821 u8 irq_stat
= readb(host
->iomap
[NV_MMIO_BAR
] + NV_INT_STATUS_CK804
)
822 >> (NV_INT_PORT_SHIFT
* i
);
823 if(ata_tag_valid(ap
->active_tag
))
824 /** NV_INT_DEV indication seems unreliable at times
825 at least in ADMA mode. Force it on always when a
826 command is active, to prevent losing interrupts. */
827 irq_stat
|= NV_INT_DEV
;
828 handled
+= nv_host_intr(ap
, irq_stat
);
832 notifier
= readl(mmio
+ NV_ADMA_NOTIFIER
);
833 notifier_error
= readl(mmio
+ NV_ADMA_NOTIFIER_ERROR
);
834 notifier_clears
[i
] = notifier
| notifier_error
;
836 gen_ctl
= readl(pp
->gen_block
+ NV_ADMA_GEN_CTL
);
838 if( !NV_ADMA_CHECK_INTR(gen_ctl
, ap
->port_no
) && !notifier
&&
843 status
= readw(mmio
+ NV_ADMA_STAT
);
845 /* Clear status. Ensure the controller sees the clearing before we start
846 looking at any of the CPB statuses, so that any CPB completions after
847 this point in the handler will raise another interrupt. */
848 writew(status
, mmio
+ NV_ADMA_STAT
);
849 readw(mmio
+ NV_ADMA_STAT
); /* flush posted write */
852 handled
++; /* irq handled if we got here */
854 /* freeze if hotplugged or controller error */
855 if (unlikely(status
& (NV_ADMA_STAT_HOTPLUG
|
856 NV_ADMA_STAT_HOTUNPLUG
|
857 NV_ADMA_STAT_TIMEOUT
|
858 NV_ADMA_STAT_SERROR
))) {
859 struct ata_eh_info
*ehi
= &ap
->eh_info
;
861 ata_ehi_clear_desc(ehi
);
862 ata_ehi_push_desc(ehi
, "ADMA status 0x%08x", status
);
863 if (status
& NV_ADMA_STAT_TIMEOUT
) {
864 ehi
->err_mask
|= AC_ERR_SYSTEM
;
865 ata_ehi_push_desc(ehi
, ": timeout");
866 } else if (status
& NV_ADMA_STAT_HOTPLUG
) {
867 ata_ehi_hotplugged(ehi
);
868 ata_ehi_push_desc(ehi
, ": hotplug");
869 } else if (status
& NV_ADMA_STAT_HOTUNPLUG
) {
870 ata_ehi_hotplugged(ehi
);
871 ata_ehi_push_desc(ehi
, ": hot unplug");
872 } else if (status
& NV_ADMA_STAT_SERROR
) {
873 /* let libata analyze SError and figure out the cause */
874 ata_ehi_push_desc(ehi
, ": SError");
880 if (status
& (NV_ADMA_STAT_DONE
|
881 NV_ADMA_STAT_CPBERR
)) {
885 if(ata_tag_valid(ap
->active_tag
))
886 check_commands
= 1 << ap
->active_tag
;
888 check_commands
= ap
->sactive
;
890 /** Check CPBs for completed commands */
891 while ((pos
= ffs(check_commands
)) && !error
) {
893 error
= nv_adma_check_cpb(ap
, pos
,
894 notifier_error
& (1 << pos
) );
895 check_commands
&= ~(1 << pos
);
901 if(notifier_clears
[0] || notifier_clears
[1]) {
902 /* Note: Both notifier clear registers must be written
903 if either is set, even if one is zero, according to NVIDIA. */
904 struct nv_adma_port_priv
*pp
= host
->ports
[0]->private_data
;
905 writel(notifier_clears
[0], pp
->notifier_clear_block
);
906 pp
= host
->ports
[1]->private_data
;
907 writel(notifier_clears
[1], pp
->notifier_clear_block
);
910 spin_unlock(&host
->lock
);
912 return IRQ_RETVAL(handled
);
915 static void nv_adma_irq_clear(struct ata_port
*ap
)
917 struct nv_adma_port_priv
*pp
= ap
->private_data
;
918 void __iomem
*mmio
= pp
->ctl_block
;
919 u16 status
= readw(mmio
+ NV_ADMA_STAT
);
920 u32 notifier
= readl(mmio
+ NV_ADMA_NOTIFIER
);
921 u32 notifier_error
= readl(mmio
+ NV_ADMA_NOTIFIER_ERROR
);
922 void __iomem
*dma_stat_addr
= ap
->ioaddr
.bmdma_addr
+ ATA_DMA_STATUS
;
924 /* clear ADMA status */
925 writew(status
, mmio
+ NV_ADMA_STAT
);
926 writel(notifier
| notifier_error
,
927 pp
->notifier_clear_block
);
929 /** clear legacy status */
930 iowrite8(ioread8(dma_stat_addr
), dma_stat_addr
);
933 static void nv_adma_post_internal_cmd(struct ata_queued_cmd
*qc
)
935 struct nv_adma_port_priv
*pp
= qc
->ap
->private_data
;
937 if(pp
->flags
& NV_ADMA_PORT_REGISTER_MODE
)
938 ata_bmdma_post_internal_cmd(qc
);
941 static int nv_adma_port_start(struct ata_port
*ap
)
943 struct device
*dev
= ap
->host
->dev
;
944 struct nv_adma_port_priv
*pp
;
953 rc
= ata_port_start(ap
);
957 pp
= devm_kzalloc(dev
, sizeof(*pp
), GFP_KERNEL
);
961 mmio
= ap
->host
->iomap
[NV_MMIO_BAR
] + NV_ADMA_PORT
+
962 ap
->port_no
* NV_ADMA_PORT_SIZE
;
963 pp
->ctl_block
= mmio
;
964 pp
->gen_block
= ap
->host
->iomap
[NV_MMIO_BAR
] + NV_ADMA_GEN
;
965 pp
->notifier_clear_block
= pp
->gen_block
+
966 NV_ADMA_NOTIFIER_CLEAR
+ (4 * ap
->port_no
);
968 mem
= dmam_alloc_coherent(dev
, NV_ADMA_PORT_PRIV_DMA_SZ
,
969 &mem_dma
, GFP_KERNEL
);
972 memset(mem
, 0, NV_ADMA_PORT_PRIV_DMA_SZ
);
975 * First item in chunk of DMA memory:
976 * 128-byte command parameter block (CPB)
977 * one for each command tag
980 pp
->cpb_dma
= mem_dma
;
982 writel(mem_dma
& 0xFFFFFFFF, mmio
+ NV_ADMA_CPB_BASE_LOW
);
983 writel((mem_dma
>> 16 ) >> 16, mmio
+ NV_ADMA_CPB_BASE_HIGH
);
985 mem
+= NV_ADMA_MAX_CPBS
* NV_ADMA_CPB_SZ
;
986 mem_dma
+= NV_ADMA_MAX_CPBS
* NV_ADMA_CPB_SZ
;
989 * Second item: block of ADMA_SGTBL_LEN s/g entries
992 pp
->aprd_dma
= mem_dma
;
994 ap
->private_data
= pp
;
996 /* clear any outstanding interrupt conditions */
997 writew(0xffff, mmio
+ NV_ADMA_STAT
);
999 /* initialize port variables */
1000 pp
->flags
= NV_ADMA_PORT_REGISTER_MODE
;
1002 /* clear CPB fetch count */
1003 writew(0, mmio
+ NV_ADMA_CPB_COUNT
);
1005 /* clear GO for register mode, enable interrupt */
1006 tmp
= readw(mmio
+ NV_ADMA_CTL
);
1007 writew( (tmp
& ~NV_ADMA_CTL_GO
) | NV_ADMA_CTL_AIEN
|
1008 NV_ADMA_CTL_HOTPLUG_IEN
, mmio
+ NV_ADMA_CTL
);
1010 tmp
= readw(mmio
+ NV_ADMA_CTL
);
1011 writew(tmp
| NV_ADMA_CTL_CHANNEL_RESET
, mmio
+ NV_ADMA_CTL
);
1012 readw( mmio
+ NV_ADMA_CTL
); /* flush posted write */
1014 writew(tmp
& ~NV_ADMA_CTL_CHANNEL_RESET
, mmio
+ NV_ADMA_CTL
);
1015 readw( mmio
+ NV_ADMA_CTL
); /* flush posted write */
1020 static void nv_adma_port_stop(struct ata_port
*ap
)
1022 struct nv_adma_port_priv
*pp
= ap
->private_data
;
1023 void __iomem
*mmio
= pp
->ctl_block
;
1026 writew(0, mmio
+ NV_ADMA_CTL
);
1030 static int nv_adma_port_suspend(struct ata_port
*ap
, pm_message_t mesg
)
1032 struct nv_adma_port_priv
*pp
= ap
->private_data
;
1033 void __iomem
*mmio
= pp
->ctl_block
;
1035 /* Go to register mode - clears GO */
1036 nv_adma_register_mode(ap
);
1038 /* clear CPB fetch count */
1039 writew(0, mmio
+ NV_ADMA_CPB_COUNT
);
1041 /* disable interrupt, shut down port */
1042 writew(0, mmio
+ NV_ADMA_CTL
);
1047 static int nv_adma_port_resume(struct ata_port
*ap
)
1049 struct nv_adma_port_priv
*pp
= ap
->private_data
;
1050 void __iomem
*mmio
= pp
->ctl_block
;
1053 /* set CPB block location */
1054 writel(pp
->cpb_dma
& 0xFFFFFFFF, mmio
+ NV_ADMA_CPB_BASE_LOW
);
1055 writel((pp
->cpb_dma
>> 16 ) >> 16, mmio
+ NV_ADMA_CPB_BASE_HIGH
);
1057 /* clear any outstanding interrupt conditions */
1058 writew(0xffff, mmio
+ NV_ADMA_STAT
);
1060 /* initialize port variables */
1061 pp
->flags
|= NV_ADMA_PORT_REGISTER_MODE
;
1063 /* clear CPB fetch count */
1064 writew(0, mmio
+ NV_ADMA_CPB_COUNT
);
1066 /* clear GO for register mode, enable interrupt */
1067 tmp
= readw(mmio
+ NV_ADMA_CTL
);
1068 writew( (tmp
& ~NV_ADMA_CTL_GO
) | NV_ADMA_CTL_AIEN
|
1069 NV_ADMA_CTL_HOTPLUG_IEN
, mmio
+ NV_ADMA_CTL
);
1071 tmp
= readw(mmio
+ NV_ADMA_CTL
);
1072 writew(tmp
| NV_ADMA_CTL_CHANNEL_RESET
, mmio
+ NV_ADMA_CTL
);
1073 readw( mmio
+ NV_ADMA_CTL
); /* flush posted write */
1075 writew(tmp
& ~NV_ADMA_CTL_CHANNEL_RESET
, mmio
+ NV_ADMA_CTL
);
1076 readw( mmio
+ NV_ADMA_CTL
); /* flush posted write */
1082 static void nv_adma_setup_port(struct ata_port
*ap
)
1084 void __iomem
*mmio
= ap
->host
->iomap
[NV_MMIO_BAR
];
1085 struct ata_ioports
*ioport
= &ap
->ioaddr
;
1089 mmio
+= NV_ADMA_PORT
+ ap
->port_no
* NV_ADMA_PORT_SIZE
;
1091 ioport
->cmd_addr
= mmio
;
1092 ioport
->data_addr
= mmio
+ (ATA_REG_DATA
* 4);
1093 ioport
->error_addr
=
1094 ioport
->feature_addr
= mmio
+ (ATA_REG_ERR
* 4);
1095 ioport
->nsect_addr
= mmio
+ (ATA_REG_NSECT
* 4);
1096 ioport
->lbal_addr
= mmio
+ (ATA_REG_LBAL
* 4);
1097 ioport
->lbam_addr
= mmio
+ (ATA_REG_LBAM
* 4);
1098 ioport
->lbah_addr
= mmio
+ (ATA_REG_LBAH
* 4);
1099 ioport
->device_addr
= mmio
+ (ATA_REG_DEVICE
* 4);
1100 ioport
->status_addr
=
1101 ioport
->command_addr
= mmio
+ (ATA_REG_STATUS
* 4);
1102 ioport
->altstatus_addr
=
1103 ioport
->ctl_addr
= mmio
+ 0x20;
1106 static int nv_adma_host_init(struct ata_host
*host
)
1108 struct pci_dev
*pdev
= to_pci_dev(host
->dev
);
1114 /* enable ADMA on the ports */
1115 pci_read_config_dword(pdev
, NV_MCP_SATA_CFG_20
, &tmp32
);
1116 tmp32
|= NV_MCP_SATA_CFG_20_PORT0_EN
|
1117 NV_MCP_SATA_CFG_20_PORT0_PWB_EN
|
1118 NV_MCP_SATA_CFG_20_PORT1_EN
|
1119 NV_MCP_SATA_CFG_20_PORT1_PWB_EN
;
1121 pci_write_config_dword(pdev
, NV_MCP_SATA_CFG_20
, tmp32
);
1123 for (i
= 0; i
< host
->n_ports
; i
++)
1124 nv_adma_setup_port(host
->ports
[i
]);
1129 static void nv_adma_fill_aprd(struct ata_queued_cmd
*qc
,
1130 struct scatterlist
*sg
,
1132 struct nv_adma_prd
*aprd
)
1135 if (qc
->tf
.flags
& ATA_TFLAG_WRITE
)
1136 flags
|= NV_APRD_WRITE
;
1137 if (idx
== qc
->n_elem
- 1)
1138 flags
|= NV_APRD_END
;
1140 flags
|= NV_APRD_CONT
;
1142 aprd
->addr
= cpu_to_le64(((u64
)sg_dma_address(sg
)));
1143 aprd
->len
= cpu_to_le32(((u32
)sg_dma_len(sg
))); /* len in bytes */
1144 aprd
->flags
= flags
;
1145 aprd
->packet_len
= 0;
1148 static void nv_adma_fill_sg(struct ata_queued_cmd
*qc
, struct nv_adma_cpb
*cpb
)
1150 struct nv_adma_port_priv
*pp
= qc
->ap
->private_data
;
1152 struct nv_adma_prd
*aprd
;
1153 struct scatterlist
*sg
;
1159 ata_for_each_sg(sg
, qc
) {
1160 aprd
= (idx
< 5) ? &cpb
->aprd
[idx
] : &pp
->aprd
[NV_ADMA_SGTBL_LEN
* qc
->tag
+ (idx
-5)];
1161 nv_adma_fill_aprd(qc
, sg
, idx
, aprd
);
1165 cpb
->next_aprd
= cpu_to_le64(((u64
)(pp
->aprd_dma
+ NV_ADMA_SGTBL_SZ
* qc
->tag
)));
1167 cpb
->next_aprd
= cpu_to_le64(0);
1170 static int nv_adma_use_reg_mode(struct ata_queued_cmd
*qc
)
1172 struct nv_adma_port_priv
*pp
= qc
->ap
->private_data
;
1174 /* ADMA engine can only be used for non-ATAPI DMA commands,
1175 or interrupt-driven no-data commands, where a result taskfile
1177 if((pp
->flags
& NV_ADMA_ATAPI_SETUP_COMPLETE
) ||
1178 (qc
->tf
.flags
& ATA_TFLAG_POLLING
) ||
1179 (qc
->flags
& ATA_QCFLAG_RESULT_TF
))
1182 if((qc
->flags
& ATA_QCFLAG_DMAMAP
) ||
1183 (qc
->tf
.protocol
== ATA_PROT_NODATA
))
1189 static void nv_adma_qc_prep(struct ata_queued_cmd
*qc
)
1191 struct nv_adma_port_priv
*pp
= qc
->ap
->private_data
;
1192 struct nv_adma_cpb
*cpb
= &pp
->cpb
[qc
->tag
];
1193 u8 ctl_flags
= NV_CPB_CTL_CPB_VALID
|
1196 if (nv_adma_use_reg_mode(qc
)) {
1197 nv_adma_register_mode(qc
->ap
);
1202 cpb
->resp_flags
= NV_CPB_RESP_DONE
;
1209 cpb
->next_cpb_idx
= 0;
1211 /* turn on NCQ flags for NCQ commands */
1212 if (qc
->tf
.protocol
== ATA_PROT_NCQ
)
1213 ctl_flags
|= NV_CPB_CTL_QUEUE
| NV_CPB_CTL_FPDMA
;
1215 VPRINTK("qc->flags = 0x%lx\n", qc
->flags
);
1217 nv_adma_tf_to_cpb(&qc
->tf
, cpb
->tf
);
1219 if(qc
->flags
& ATA_QCFLAG_DMAMAP
) {
1220 nv_adma_fill_sg(qc
, cpb
);
1221 ctl_flags
|= NV_CPB_CTL_APRD_VALID
;
1223 memset(&cpb
->aprd
[0], 0, sizeof(struct nv_adma_prd
) * 5);
1225 /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID until we are
1226 finished filling in all of the contents */
1228 cpb
->ctl_flags
= ctl_flags
;
1230 cpb
->resp_flags
= 0;
1233 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd
*qc
)
1235 struct nv_adma_port_priv
*pp
= qc
->ap
->private_data
;
1236 void __iomem
*mmio
= pp
->ctl_block
;
1237 int curr_ncq
= (qc
->tf
.protocol
== ATA_PROT_NCQ
);
1241 if (nv_adma_use_reg_mode(qc
)) {
1242 /* use ATA register mode */
1243 VPRINTK("using ATA register mode: 0x%lx\n", qc
->flags
);
1244 nv_adma_register_mode(qc
->ap
);
1245 return ata_qc_issue_prot(qc
);
1247 nv_adma_mode(qc
->ap
);
1249 /* write append register, command tag in lower 8 bits
1250 and (number of cpbs to append -1) in top 8 bits */
1253 if(curr_ncq
!= pp
->last_issue_ncq
) {
1254 /* Seems to need some delay before switching between NCQ and non-NCQ
1255 commands, else we get command timeouts and such. */
1257 pp
->last_issue_ncq
= curr_ncq
;
1260 writew(qc
->tag
, mmio
+ NV_ADMA_APPEND
);
1262 DPRINTK("Issued tag %u\n",qc
->tag
);
1267 static irqreturn_t
nv_generic_interrupt(int irq
, void *dev_instance
)
1269 struct ata_host
*host
= dev_instance
;
1271 unsigned int handled
= 0;
1272 unsigned long flags
;
1274 spin_lock_irqsave(&host
->lock
, flags
);
1276 for (i
= 0; i
< host
->n_ports
; i
++) {
1277 struct ata_port
*ap
;
1279 ap
= host
->ports
[i
];
1281 !(ap
->flags
& ATA_FLAG_DISABLED
)) {
1282 struct ata_queued_cmd
*qc
;
1284 qc
= ata_qc_from_tag(ap
, ap
->active_tag
);
1285 if (qc
&& (!(qc
->tf
.flags
& ATA_TFLAG_POLLING
)))
1286 handled
+= ata_host_intr(ap
, qc
);
1288 // No request pending? Clear interrupt status
1289 // anyway, in case there's one pending.
1290 ap
->ops
->check_status(ap
);
1295 spin_unlock_irqrestore(&host
->lock
, flags
);
1297 return IRQ_RETVAL(handled
);
1300 static irqreturn_t
nv_do_interrupt(struct ata_host
*host
, u8 irq_stat
)
1304 for (i
= 0; i
< host
->n_ports
; i
++) {
1305 struct ata_port
*ap
= host
->ports
[i
];
1307 if (ap
&& !(ap
->flags
& ATA_FLAG_DISABLED
))
1308 handled
+= nv_host_intr(ap
, irq_stat
);
1310 irq_stat
>>= NV_INT_PORT_SHIFT
;
1313 return IRQ_RETVAL(handled
);
1316 static irqreturn_t
nv_nf2_interrupt(int irq
, void *dev_instance
)
1318 struct ata_host
*host
= dev_instance
;
1322 spin_lock(&host
->lock
);
1323 irq_stat
= ioread8(host
->ports
[0]->ioaddr
.scr_addr
+ NV_INT_STATUS
);
1324 ret
= nv_do_interrupt(host
, irq_stat
);
1325 spin_unlock(&host
->lock
);
1330 static irqreturn_t
nv_ck804_interrupt(int irq
, void *dev_instance
)
1332 struct ata_host
*host
= dev_instance
;
1336 spin_lock(&host
->lock
);
1337 irq_stat
= readb(host
->iomap
[NV_MMIO_BAR
] + NV_INT_STATUS_CK804
);
1338 ret
= nv_do_interrupt(host
, irq_stat
);
1339 spin_unlock(&host
->lock
);
1344 static u32
nv_scr_read (struct ata_port
*ap
, unsigned int sc_reg
)
1346 if (sc_reg
> SCR_CONTROL
)
1349 return ioread32(ap
->ioaddr
.scr_addr
+ (sc_reg
* 4));
1352 static void nv_scr_write (struct ata_port
*ap
, unsigned int sc_reg
, u32 val
)
1354 if (sc_reg
> SCR_CONTROL
)
1357 iowrite32(val
, ap
->ioaddr
.scr_addr
+ (sc_reg
* 4));
1360 static void nv_nf2_freeze(struct ata_port
*ap
)
1362 void __iomem
*scr_addr
= ap
->host
->ports
[0]->ioaddr
.scr_addr
;
1363 int shift
= ap
->port_no
* NV_INT_PORT_SHIFT
;
1366 mask
= ioread8(scr_addr
+ NV_INT_ENABLE
);
1367 mask
&= ~(NV_INT_ALL
<< shift
);
1368 iowrite8(mask
, scr_addr
+ NV_INT_ENABLE
);
1371 static void nv_nf2_thaw(struct ata_port
*ap
)
1373 void __iomem
*scr_addr
= ap
->host
->ports
[0]->ioaddr
.scr_addr
;
1374 int shift
= ap
->port_no
* NV_INT_PORT_SHIFT
;
1377 iowrite8(NV_INT_ALL
<< shift
, scr_addr
+ NV_INT_STATUS
);
1379 mask
= ioread8(scr_addr
+ NV_INT_ENABLE
);
1380 mask
|= (NV_INT_MASK
<< shift
);
1381 iowrite8(mask
, scr_addr
+ NV_INT_ENABLE
);
1384 static void nv_ck804_freeze(struct ata_port
*ap
)
1386 void __iomem
*mmio_base
= ap
->host
->iomap
[NV_MMIO_BAR
];
1387 int shift
= ap
->port_no
* NV_INT_PORT_SHIFT
;
1390 mask
= readb(mmio_base
+ NV_INT_ENABLE_CK804
);
1391 mask
&= ~(NV_INT_ALL
<< shift
);
1392 writeb(mask
, mmio_base
+ NV_INT_ENABLE_CK804
);
1395 static void nv_ck804_thaw(struct ata_port
*ap
)
1397 void __iomem
*mmio_base
= ap
->host
->iomap
[NV_MMIO_BAR
];
1398 int shift
= ap
->port_no
* NV_INT_PORT_SHIFT
;
1401 writeb(NV_INT_ALL
<< shift
, mmio_base
+ NV_INT_STATUS_CK804
);
1403 mask
= readb(mmio_base
+ NV_INT_ENABLE_CK804
);
1404 mask
|= (NV_INT_MASK
<< shift
);
1405 writeb(mask
, mmio_base
+ NV_INT_ENABLE_CK804
);
1408 static int nv_hardreset(struct ata_port
*ap
, unsigned int *class,
1409 unsigned long deadline
)
1413 /* SATA hardreset fails to retrieve proper device signature on
1414 * some controllers. Don't classify on hardreset. For more
1415 * info, see http://bugme.osdl.org/show_bug.cgi?id=3352
1417 return sata_std_hardreset(ap
, &dummy
, deadline
);
1420 static void nv_error_handler(struct ata_port
*ap
)
1422 ata_bmdma_drive_eh(ap
, ata_std_prereset
, ata_std_softreset
,
1423 nv_hardreset
, ata_std_postreset
);
1426 static void nv_adma_error_handler(struct ata_port
*ap
)
1428 struct nv_adma_port_priv
*pp
= ap
->private_data
;
1429 if(!(pp
->flags
& NV_ADMA_PORT_REGISTER_MODE
)) {
1430 void __iomem
*mmio
= pp
->ctl_block
;
1434 if(ata_tag_valid(ap
->active_tag
) || ap
->sactive
) {
1435 u32 notifier
= readl(mmio
+ NV_ADMA_NOTIFIER
);
1436 u32 notifier_error
= readl(mmio
+ NV_ADMA_NOTIFIER_ERROR
);
1437 u32 gen_ctl
= readl(pp
->gen_block
+ NV_ADMA_GEN_CTL
);
1438 u32 status
= readw(mmio
+ NV_ADMA_STAT
);
1439 u8 cpb_count
= readb(mmio
+ NV_ADMA_CPB_COUNT
);
1440 u8 next_cpb_idx
= readb(mmio
+ NV_ADMA_NEXT_CPB_IDX
);
1442 ata_port_printk(ap
, KERN_ERR
, "EH in ADMA mode, notifier 0x%X "
1443 "notifier_error 0x%X gen_ctl 0x%X status 0x%X "
1444 "next cpb count 0x%X next cpb idx 0x%x\n",
1445 notifier
, notifier_error
, gen_ctl
, status
,
1446 cpb_count
, next_cpb_idx
);
1448 for( i
=0;i
<NV_ADMA_MAX_CPBS
;i
++) {
1449 struct nv_adma_cpb
*cpb
= &pp
->cpb
[i
];
1450 if( (ata_tag_valid(ap
->active_tag
) && i
== ap
->active_tag
) ||
1451 ap
->sactive
& (1 << i
) )
1452 ata_port_printk(ap
, KERN_ERR
,
1453 "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1454 i
, cpb
->ctl_flags
, cpb
->resp_flags
);
1458 /* Push us back into port register mode for error handling. */
1459 nv_adma_register_mode(ap
);
1461 /* Mark all of the CPBs as invalid to prevent them from being executed */
1462 for( i
=0;i
<NV_ADMA_MAX_CPBS
;i
++)
1463 pp
->cpb
[i
].ctl_flags
&= ~NV_CPB_CTL_CPB_VALID
;
1465 /* clear CPB fetch count */
1466 writew(0, mmio
+ NV_ADMA_CPB_COUNT
);
1469 tmp
= readw(mmio
+ NV_ADMA_CTL
);
1470 writew(tmp
| NV_ADMA_CTL_CHANNEL_RESET
, mmio
+ NV_ADMA_CTL
);
1471 readw( mmio
+ NV_ADMA_CTL
); /* flush posted write */
1473 writew(tmp
& ~NV_ADMA_CTL_CHANNEL_RESET
, mmio
+ NV_ADMA_CTL
);
1474 readw( mmio
+ NV_ADMA_CTL
); /* flush posted write */
1477 ata_bmdma_drive_eh(ap
, ata_std_prereset
, ata_std_softreset
,
1478 nv_hardreset
, ata_std_postreset
);
1481 static int nv_init_one (struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
1483 static int printed_version
= 0;
1484 const struct ata_port_info
*ppi
[2];
1485 struct ata_host
*host
;
1486 struct nv_host_priv
*hpriv
;
1490 unsigned long type
= ent
->driver_data
;
1492 // Make sure this is a SATA controller by counting the number of bars
1493 // (NVIDIA SATA controllers will always have six bars). Otherwise,
1494 // it's an IDE controller and we ignore it.
1495 for (bar
=0; bar
<6; bar
++)
1496 if (pci_resource_start(pdev
, bar
) == 0)
1499 if (!printed_version
++)
1500 dev_printk(KERN_DEBUG
, &pdev
->dev
, "version " DRV_VERSION
"\n");
1502 rc
= pcim_enable_device(pdev
);
1506 /* determine type and allocate host */
1507 if (type
>= CK804
&& adma_enabled
) {
1508 dev_printk(KERN_NOTICE
, &pdev
->dev
, "Using ADMA mode\n");
1512 ppi
[0] = ppi
[1] = &nv_port_info
[type
];
1513 rc
= ata_pci_prepare_native_host(pdev
, ppi
, 2, &host
);
1517 hpriv
= devm_kzalloc(&pdev
->dev
, sizeof(*hpriv
), GFP_KERNEL
);
1521 host
->private_data
= hpriv
;
1523 /* set 64bit dma masks, may fail */
1525 if (pci_set_dma_mask(pdev
, DMA_64BIT_MASK
) == 0)
1526 pci_set_consistent_dma_mask(pdev
, DMA_64BIT_MASK
);
1529 /* request and iomap NV_MMIO_BAR */
1530 rc
= pcim_iomap_regions(pdev
, 1 << NV_MMIO_BAR
, DRV_NAME
);
1534 /* configure SCR access */
1535 base
= host
->iomap
[NV_MMIO_BAR
];
1536 host
->ports
[0]->ioaddr
.scr_addr
= base
+ NV_PORT0_SCR_REG_OFFSET
;
1537 host
->ports
[1]->ioaddr
.scr_addr
= base
+ NV_PORT1_SCR_REG_OFFSET
;
1539 /* enable SATA space for CK804 */
1540 if (type
>= CK804
) {
1543 pci_read_config_byte(pdev
, NV_MCP_SATA_CFG_20
, ®val
);
1544 regval
|= NV_MCP_SATA_CFG_20_SATA_SPACE_EN
;
1545 pci_write_config_byte(pdev
, NV_MCP_SATA_CFG_20
, regval
);
1550 rc
= nv_adma_host_init(host
);
1555 pci_set_master(pdev
);
1556 return ata_host_activate(host
, pdev
->irq
, ppi
[0]->irq_handler
,
1557 IRQF_SHARED
, ppi
[0]->sht
);
1560 static void nv_remove_one (struct pci_dev
*pdev
)
1562 struct ata_host
*host
= dev_get_drvdata(&pdev
->dev
);
1563 struct nv_host_priv
*hpriv
= host
->private_data
;
1565 ata_pci_remove_one(pdev
);
1570 static int nv_pci_device_resume(struct pci_dev
*pdev
)
1572 struct ata_host
*host
= dev_get_drvdata(&pdev
->dev
);
1573 struct nv_host_priv
*hpriv
= host
->private_data
;
1576 rc
= ata_pci_device_do_resume(pdev
);
1580 if (pdev
->dev
.power
.power_state
.event
== PM_EVENT_SUSPEND
) {
1581 if(hpriv
->type
>= CK804
) {
1584 pci_read_config_byte(pdev
, NV_MCP_SATA_CFG_20
, ®val
);
1585 regval
|= NV_MCP_SATA_CFG_20_SATA_SPACE_EN
;
1586 pci_write_config_byte(pdev
, NV_MCP_SATA_CFG_20
, regval
);
1588 if(hpriv
->type
== ADMA
) {
1590 struct nv_adma_port_priv
*pp
;
1591 /* enable/disable ADMA on the ports appropriately */
1592 pci_read_config_dword(pdev
, NV_MCP_SATA_CFG_20
, &tmp32
);
1594 pp
= host
->ports
[0]->private_data
;
1595 if(pp
->flags
& NV_ADMA_ATAPI_SETUP_COMPLETE
)
1596 tmp32
&= ~(NV_MCP_SATA_CFG_20_PORT0_EN
|
1597 NV_MCP_SATA_CFG_20_PORT0_PWB_EN
);
1599 tmp32
|= (NV_MCP_SATA_CFG_20_PORT0_EN
|
1600 NV_MCP_SATA_CFG_20_PORT0_PWB_EN
);
1601 pp
= host
->ports
[1]->private_data
;
1602 if(pp
->flags
& NV_ADMA_ATAPI_SETUP_COMPLETE
)
1603 tmp32
&= ~(NV_MCP_SATA_CFG_20_PORT1_EN
|
1604 NV_MCP_SATA_CFG_20_PORT1_PWB_EN
);
1606 tmp32
|= (NV_MCP_SATA_CFG_20_PORT1_EN
|
1607 NV_MCP_SATA_CFG_20_PORT1_PWB_EN
);
1609 pci_write_config_dword(pdev
, NV_MCP_SATA_CFG_20
, tmp32
);
1613 ata_host_resume(host
);
1619 static void nv_ck804_host_stop(struct ata_host
*host
)
1621 struct pci_dev
*pdev
= to_pci_dev(host
->dev
);
1624 /* disable SATA space for CK804 */
1625 pci_read_config_byte(pdev
, NV_MCP_SATA_CFG_20
, ®val
);
1626 regval
&= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN
;
1627 pci_write_config_byte(pdev
, NV_MCP_SATA_CFG_20
, regval
);
1630 static void nv_adma_host_stop(struct ata_host
*host
)
1632 struct pci_dev
*pdev
= to_pci_dev(host
->dev
);
1635 /* disable ADMA on the ports */
1636 pci_read_config_dword(pdev
, NV_MCP_SATA_CFG_20
, &tmp32
);
1637 tmp32
&= ~(NV_MCP_SATA_CFG_20_PORT0_EN
|
1638 NV_MCP_SATA_CFG_20_PORT0_PWB_EN
|
1639 NV_MCP_SATA_CFG_20_PORT1_EN
|
1640 NV_MCP_SATA_CFG_20_PORT1_PWB_EN
);
1642 pci_write_config_dword(pdev
, NV_MCP_SATA_CFG_20
, tmp32
);
1644 nv_ck804_host_stop(host
);
1647 static int __init
nv_init(void)
1649 return pci_register_driver(&nv_pci_driver
);
1652 static void __exit
nv_exit(void)
1654 pci_unregister_driver(&nv_pci_driver
);
1657 module_init(nv_init
);
1658 module_exit(nv_exit
);
1659 module_param_named(adma
, adma_enabled
, bool, 0444);
1660 MODULE_PARM_DESC(adma
, "Enable use of ADMA (Default: true)");