pata_amd/pata_via: de-couple programming of PIO/MWDMA and UDMA timings
[deliverable/linux.git] / drivers / ata / pata_amd.c
1 /*
2 * pata_amd.c - AMD PATA for new ATA layer
3 * (C) 2005-2006 Red Hat Inc
4 * Alan Cox <alan@redhat.com>
5 *
6 * Based on pata-sil680. Errata information is taken from data sheets
7 * and the amd74xx.c driver by Vojtech Pavlik. Nvidia SATA devices are
8 * claimed by sata-nv.c.
9 *
10 * TODO:
11 * Variable system clock when/if it makes sense
12 * Power management on ports
13 *
14 *
15 * Documentation publically available.
16 */
17
18 #include <linux/kernel.h>
19 #include <linux/module.h>
20 #include <linux/pci.h>
21 #include <linux/init.h>
22 #include <linux/blkdev.h>
23 #include <linux/delay.h>
24 #include <scsi/scsi_host.h>
25 #include <linux/libata.h>
26
27 #define DRV_NAME "pata_amd"
28 #define DRV_VERSION "0.3.10"
29
30 /**
31 * timing_setup - shared timing computation and load
32 * @ap: ATA port being set up
33 * @adev: drive being configured
34 * @offset: port offset
35 * @speed: target speed
36 * @clock: clock multiplier (number of times 33MHz for this part)
37 *
38 * Perform the actual timing set up for Nvidia or AMD PATA devices.
39 * The actual devices vary so they all call into this helper function
40 * providing the clock multipler and offset (because AMD and Nvidia put
41 * the ports at different locations).
42 */
43
44 static void timing_setup(struct ata_port *ap, struct ata_device *adev, int offset, int speed, int clock)
45 {
46 static const unsigned char amd_cyc2udma[] = {
47 6, 6, 5, 4, 0, 1, 1, 2, 2, 3, 3, 3, 3, 3, 3, 7
48 };
49
50 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
51 struct ata_device *peer = ata_dev_pair(adev);
52 int dn = ap->port_no * 2 + adev->devno;
53 struct ata_timing at, apeer;
54 int T, UT;
55 const int amd_clock = 33333; /* KHz. */
56 u8 t;
57
58 T = 1000000000 / amd_clock;
59 UT = T / min_t(int, max_t(int, clock, 1), 2);
60
61 if (ata_timing_compute(adev, speed, &at, T, UT) < 0) {
62 dev_printk(KERN_ERR, &pdev->dev, "unknown mode %d.\n", speed);
63 return;
64 }
65
66 if (peer) {
67 /* This may be over conservative */
68 if (peer->dma_mode) {
69 ata_timing_compute(peer, peer->dma_mode, &apeer, T, UT);
70 ata_timing_merge(&apeer, &at, &at, ATA_TIMING_8BIT);
71 }
72 ata_timing_compute(peer, peer->pio_mode, &apeer, T, UT);
73 ata_timing_merge(&apeer, &at, &at, ATA_TIMING_8BIT);
74 }
75
76 if (speed == XFER_UDMA_5 && amd_clock <= 33333) at.udma = 1;
77 if (speed == XFER_UDMA_6 && amd_clock <= 33333) at.udma = 15;
78
79 /*
80 * Now do the setup work
81 */
82
83 /* Configure the address set up timing */
84 pci_read_config_byte(pdev, offset + 0x0C, &t);
85 t = (t & ~(3 << ((3 - dn) << 1))) | ((FIT(at.setup, 1, 4) - 1) << ((3 - dn) << 1));
86 pci_write_config_byte(pdev, offset + 0x0C , t);
87
88 /* Configure the 8bit I/O timing */
89 pci_write_config_byte(pdev, offset + 0x0E + (1 - (dn >> 1)),
90 ((FIT(at.act8b, 1, 16) - 1) << 4) | (FIT(at.rec8b, 1, 16) - 1));
91
92 /* Drive timing */
93 pci_write_config_byte(pdev, offset + 0x08 + (3 - dn),
94 ((FIT(at.active, 1, 16) - 1) << 4) | (FIT(at.recover, 1, 16) - 1));
95
96 switch (clock) {
97 case 1:
98 t = at.udma ? (0xc0 | (FIT(at.udma, 2, 5) - 2)) : 0x03;
99 break;
100
101 case 2:
102 t = at.udma ? (0xc0 | amd_cyc2udma[FIT(at.udma, 2, 10)]) : 0x03;
103 break;
104
105 case 3:
106 t = at.udma ? (0xc0 | amd_cyc2udma[FIT(at.udma, 1, 10)]) : 0x03;
107 break;
108
109 case 4:
110 t = at.udma ? (0xc0 | amd_cyc2udma[FIT(at.udma, 1, 15)]) : 0x03;
111 break;
112
113 default:
114 return;
115 }
116
117 /* UDMA timing */
118 if (at.udma)
119 pci_write_config_byte(pdev, offset + 0x10 + (3 - dn), t);
120 }
121
122 /**
123 * amd_pre_reset - perform reset handling
124 * @link: ATA link
125 * @deadline: deadline jiffies for the operation
126 *
127 * Reset sequence checking enable bits to see which ports are
128 * active.
129 */
130
131 static int amd_pre_reset(struct ata_link *link, unsigned long deadline)
132 {
133 static const struct pci_bits amd_enable_bits[] = {
134 { 0x40, 1, 0x02, 0x02 },
135 { 0x40, 1, 0x01, 0x01 }
136 };
137
138 struct ata_port *ap = link->ap;
139 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
140
141 if (!pci_test_config_bits(pdev, &amd_enable_bits[ap->port_no]))
142 return -ENOENT;
143
144 return ata_std_prereset(link, deadline);
145 }
146
147 static void amd_error_handler(struct ata_port *ap)
148 {
149 return ata_bmdma_drive_eh(ap, amd_pre_reset,
150 ata_std_softreset, NULL,
151 ata_std_postreset);
152 }
153
154 static int amd_cable_detect(struct ata_port *ap)
155 {
156 static const u32 bitmask[2] = {0x03, 0x0C};
157 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
158 u8 ata66;
159
160 pci_read_config_byte(pdev, 0x42, &ata66);
161 if (ata66 & bitmask[ap->port_no])
162 return ATA_CBL_PATA80;
163 return ATA_CBL_PATA40;
164 }
165
166 /**
167 * amd33_set_piomode - set initial PIO mode data
168 * @ap: ATA interface
169 * @adev: ATA device
170 *
171 * Program the AMD registers for PIO mode.
172 */
173
174 static void amd33_set_piomode(struct ata_port *ap, struct ata_device *adev)
175 {
176 timing_setup(ap, adev, 0x40, adev->pio_mode, 1);
177 }
178
179 static void amd66_set_piomode(struct ata_port *ap, struct ata_device *adev)
180 {
181 timing_setup(ap, adev, 0x40, adev->pio_mode, 2);
182 }
183
184 static void amd100_set_piomode(struct ata_port *ap, struct ata_device *adev)
185 {
186 timing_setup(ap, adev, 0x40, adev->pio_mode, 3);
187 }
188
189 static void amd133_set_piomode(struct ata_port *ap, struct ata_device *adev)
190 {
191 timing_setup(ap, adev, 0x40, adev->pio_mode, 4);
192 }
193
194 /**
195 * amd33_set_dmamode - set initial DMA mode data
196 * @ap: ATA interface
197 * @adev: ATA device
198 *
199 * Program the MWDMA/UDMA modes for the AMD and Nvidia
200 * chipset.
201 */
202
203 static void amd33_set_dmamode(struct ata_port *ap, struct ata_device *adev)
204 {
205 timing_setup(ap, adev, 0x40, adev->dma_mode, 1);
206 }
207
208 static void amd66_set_dmamode(struct ata_port *ap, struct ata_device *adev)
209 {
210 timing_setup(ap, adev, 0x40, adev->dma_mode, 2);
211 }
212
213 static void amd100_set_dmamode(struct ata_port *ap, struct ata_device *adev)
214 {
215 timing_setup(ap, adev, 0x40, adev->dma_mode, 3);
216 }
217
218 static void amd133_set_dmamode(struct ata_port *ap, struct ata_device *adev)
219 {
220 timing_setup(ap, adev, 0x40, adev->dma_mode, 4);
221 }
222
223
224 /**
225 * nv_probe_init - cable detection
226 * @lin: ATA link
227 *
228 * Perform cable detection. The BIOS stores this in PCI config
229 * space for us.
230 */
231
232 static int nv_pre_reset(struct ata_link *link, unsigned long deadline)
233 {
234 static const struct pci_bits nv_enable_bits[] = {
235 { 0x50, 1, 0x02, 0x02 },
236 { 0x50, 1, 0x01, 0x01 }
237 };
238
239 struct ata_port *ap = link->ap;
240 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
241
242 if (!pci_test_config_bits(pdev, &nv_enable_bits[ap->port_no]))
243 return -ENOENT;
244
245 return ata_std_prereset(link, deadline);
246 }
247
248 static void nv_error_handler(struct ata_port *ap)
249 {
250 ata_bmdma_drive_eh(ap, nv_pre_reset,
251 ata_std_softreset, NULL,
252 ata_std_postreset);
253 }
254
255 static int nv_cable_detect(struct ata_port *ap)
256 {
257 static const u8 bitmask[2] = {0x03, 0x0C};
258 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
259 u8 ata66;
260 u16 udma;
261 int cbl;
262
263 pci_read_config_byte(pdev, 0x52, &ata66);
264 if (ata66 & bitmask[ap->port_no])
265 cbl = ATA_CBL_PATA80;
266 else
267 cbl = ATA_CBL_PATA40;
268
269 /* We now have to double check because the Nvidia boxes BIOS
270 doesn't always set the cable bits but does set mode bits */
271 pci_read_config_word(pdev, 0x62 - 2 * ap->port_no, &udma);
272 if ((udma & 0xC4) == 0xC4 || (udma & 0xC400) == 0xC400)
273 cbl = ATA_CBL_PATA80;
274 /* And a triple check across suspend/resume with ACPI around */
275 if (ata_acpi_cbl_80wire(ap))
276 cbl = ATA_CBL_PATA80;
277 return cbl;
278 }
279
280 /**
281 * nv100_set_piomode - set initial PIO mode data
282 * @ap: ATA interface
283 * @adev: ATA device
284 *
285 * Program the AMD registers for PIO mode.
286 */
287
288 static void nv100_set_piomode(struct ata_port *ap, struct ata_device *adev)
289 {
290 timing_setup(ap, adev, 0x50, adev->pio_mode, 3);
291 }
292
293 static void nv133_set_piomode(struct ata_port *ap, struct ata_device *adev)
294 {
295 timing_setup(ap, adev, 0x50, adev->pio_mode, 4);
296 }
297
298 /**
299 * nv100_set_dmamode - set initial DMA mode data
300 * @ap: ATA interface
301 * @adev: ATA device
302 *
303 * Program the MWDMA/UDMA modes for the AMD and Nvidia
304 * chipset.
305 */
306
307 static void nv100_set_dmamode(struct ata_port *ap, struct ata_device *adev)
308 {
309 timing_setup(ap, adev, 0x50, adev->dma_mode, 3);
310 }
311
312 static void nv133_set_dmamode(struct ata_port *ap, struct ata_device *adev)
313 {
314 timing_setup(ap, adev, 0x50, adev->dma_mode, 4);
315 }
316
317 static struct scsi_host_template amd_sht = {
318 .module = THIS_MODULE,
319 .name = DRV_NAME,
320 .ioctl = ata_scsi_ioctl,
321 .queuecommand = ata_scsi_queuecmd,
322 .can_queue = ATA_DEF_QUEUE,
323 .this_id = ATA_SHT_THIS_ID,
324 .sg_tablesize = LIBATA_MAX_PRD,
325 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
326 .emulated = ATA_SHT_EMULATED,
327 .use_clustering = ATA_SHT_USE_CLUSTERING,
328 .proc_name = DRV_NAME,
329 .dma_boundary = ATA_DMA_BOUNDARY,
330 .slave_configure = ata_scsi_slave_config,
331 .slave_destroy = ata_scsi_slave_destroy,
332 .bios_param = ata_std_bios_param,
333 };
334
335 static struct ata_port_operations amd33_port_ops = {
336 .set_piomode = amd33_set_piomode,
337 .set_dmamode = amd33_set_dmamode,
338 .mode_filter = ata_pci_default_filter,
339 .tf_load = ata_tf_load,
340 .tf_read = ata_tf_read,
341 .check_status = ata_check_status,
342 .exec_command = ata_exec_command,
343 .dev_select = ata_std_dev_select,
344
345 .freeze = ata_bmdma_freeze,
346 .thaw = ata_bmdma_thaw,
347 .error_handler = amd_error_handler,
348 .post_internal_cmd = ata_bmdma_post_internal_cmd,
349 .cable_detect = ata_cable_40wire,
350
351 .bmdma_setup = ata_bmdma_setup,
352 .bmdma_start = ata_bmdma_start,
353 .bmdma_stop = ata_bmdma_stop,
354 .bmdma_status = ata_bmdma_status,
355
356 .qc_prep = ata_qc_prep,
357 .qc_issue = ata_qc_issue_prot,
358
359 .data_xfer = ata_data_xfer,
360
361 .irq_handler = ata_interrupt,
362 .irq_clear = ata_bmdma_irq_clear,
363 .irq_on = ata_irq_on,
364
365 .port_start = ata_sff_port_start,
366 };
367
368 static struct ata_port_operations amd66_port_ops = {
369 .set_piomode = amd66_set_piomode,
370 .set_dmamode = amd66_set_dmamode,
371 .mode_filter = ata_pci_default_filter,
372 .tf_load = ata_tf_load,
373 .tf_read = ata_tf_read,
374 .check_status = ata_check_status,
375 .exec_command = ata_exec_command,
376 .dev_select = ata_std_dev_select,
377
378 .freeze = ata_bmdma_freeze,
379 .thaw = ata_bmdma_thaw,
380 .error_handler = amd_error_handler,
381 .post_internal_cmd = ata_bmdma_post_internal_cmd,
382 .cable_detect = ata_cable_unknown,
383
384 .bmdma_setup = ata_bmdma_setup,
385 .bmdma_start = ata_bmdma_start,
386 .bmdma_stop = ata_bmdma_stop,
387 .bmdma_status = ata_bmdma_status,
388
389 .qc_prep = ata_qc_prep,
390 .qc_issue = ata_qc_issue_prot,
391
392 .data_xfer = ata_data_xfer,
393
394 .irq_handler = ata_interrupt,
395 .irq_clear = ata_bmdma_irq_clear,
396 .irq_on = ata_irq_on,
397
398 .port_start = ata_sff_port_start,
399 };
400
401 static struct ata_port_operations amd100_port_ops = {
402 .set_piomode = amd100_set_piomode,
403 .set_dmamode = amd100_set_dmamode,
404 .mode_filter = ata_pci_default_filter,
405 .tf_load = ata_tf_load,
406 .tf_read = ata_tf_read,
407 .check_status = ata_check_status,
408 .exec_command = ata_exec_command,
409 .dev_select = ata_std_dev_select,
410
411 .freeze = ata_bmdma_freeze,
412 .thaw = ata_bmdma_thaw,
413 .error_handler = amd_error_handler,
414 .post_internal_cmd = ata_bmdma_post_internal_cmd,
415 .cable_detect = ata_cable_unknown,
416
417 .bmdma_setup = ata_bmdma_setup,
418 .bmdma_start = ata_bmdma_start,
419 .bmdma_stop = ata_bmdma_stop,
420 .bmdma_status = ata_bmdma_status,
421
422 .qc_prep = ata_qc_prep,
423 .qc_issue = ata_qc_issue_prot,
424
425 .data_xfer = ata_data_xfer,
426
427 .irq_handler = ata_interrupt,
428 .irq_clear = ata_bmdma_irq_clear,
429 .irq_on = ata_irq_on,
430
431 .port_start = ata_sff_port_start,
432 };
433
434 static struct ata_port_operations amd133_port_ops = {
435 .set_piomode = amd133_set_piomode,
436 .set_dmamode = amd133_set_dmamode,
437 .mode_filter = ata_pci_default_filter,
438 .tf_load = ata_tf_load,
439 .tf_read = ata_tf_read,
440 .check_status = ata_check_status,
441 .exec_command = ata_exec_command,
442 .dev_select = ata_std_dev_select,
443
444 .freeze = ata_bmdma_freeze,
445 .thaw = ata_bmdma_thaw,
446 .error_handler = amd_error_handler,
447 .post_internal_cmd = ata_bmdma_post_internal_cmd,
448 .cable_detect = amd_cable_detect,
449
450 .bmdma_setup = ata_bmdma_setup,
451 .bmdma_start = ata_bmdma_start,
452 .bmdma_stop = ata_bmdma_stop,
453 .bmdma_status = ata_bmdma_status,
454
455 .qc_prep = ata_qc_prep,
456 .qc_issue = ata_qc_issue_prot,
457
458 .data_xfer = ata_data_xfer,
459
460 .irq_handler = ata_interrupt,
461 .irq_clear = ata_bmdma_irq_clear,
462 .irq_on = ata_irq_on,
463
464 .port_start = ata_sff_port_start,
465 };
466
467 static struct ata_port_operations nv100_port_ops = {
468 .set_piomode = nv100_set_piomode,
469 .set_dmamode = nv100_set_dmamode,
470 .mode_filter = ata_pci_default_filter,
471 .tf_load = ata_tf_load,
472 .tf_read = ata_tf_read,
473 .check_status = ata_check_status,
474 .exec_command = ata_exec_command,
475 .dev_select = ata_std_dev_select,
476
477 .freeze = ata_bmdma_freeze,
478 .thaw = ata_bmdma_thaw,
479 .error_handler = nv_error_handler,
480 .post_internal_cmd = ata_bmdma_post_internal_cmd,
481 .cable_detect = nv_cable_detect,
482
483 .bmdma_setup = ata_bmdma_setup,
484 .bmdma_start = ata_bmdma_start,
485 .bmdma_stop = ata_bmdma_stop,
486 .bmdma_status = ata_bmdma_status,
487
488 .qc_prep = ata_qc_prep,
489 .qc_issue = ata_qc_issue_prot,
490
491 .data_xfer = ata_data_xfer,
492
493 .irq_handler = ata_interrupt,
494 .irq_clear = ata_bmdma_irq_clear,
495 .irq_on = ata_irq_on,
496
497 .port_start = ata_sff_port_start,
498 };
499
500 static struct ata_port_operations nv133_port_ops = {
501 .set_piomode = nv133_set_piomode,
502 .set_dmamode = nv133_set_dmamode,
503 .mode_filter = ata_pci_default_filter,
504 .tf_load = ata_tf_load,
505 .tf_read = ata_tf_read,
506 .check_status = ata_check_status,
507 .exec_command = ata_exec_command,
508 .dev_select = ata_std_dev_select,
509
510 .freeze = ata_bmdma_freeze,
511 .thaw = ata_bmdma_thaw,
512 .error_handler = nv_error_handler,
513 .post_internal_cmd = ata_bmdma_post_internal_cmd,
514 .cable_detect = nv_cable_detect,
515
516 .bmdma_setup = ata_bmdma_setup,
517 .bmdma_start = ata_bmdma_start,
518 .bmdma_stop = ata_bmdma_stop,
519 .bmdma_status = ata_bmdma_status,
520
521 .qc_prep = ata_qc_prep,
522 .qc_issue = ata_qc_issue_prot,
523
524 .data_xfer = ata_data_xfer,
525
526 .irq_handler = ata_interrupt,
527 .irq_clear = ata_bmdma_irq_clear,
528 .irq_on = ata_irq_on,
529
530 .port_start = ata_sff_port_start,
531 };
532
533 static int amd_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
534 {
535 static const struct ata_port_info info[10] = {
536 { /* 0: AMD 7401 */
537 .sht = &amd_sht,
538 .flags = ATA_FLAG_SLAVE_POSS,
539 .pio_mask = 0x1f,
540 .mwdma_mask = 0x07, /* No SWDMA */
541 .udma_mask = 0x07, /* UDMA 33 */
542 .port_ops = &amd33_port_ops
543 },
544 { /* 1: Early AMD7409 - no swdma */
545 .sht = &amd_sht,
546 .flags = ATA_FLAG_SLAVE_POSS,
547 .pio_mask = 0x1f,
548 .mwdma_mask = 0x07,
549 .udma_mask = ATA_UDMA4, /* UDMA 66 */
550 .port_ops = &amd66_port_ops
551 },
552 { /* 2: AMD 7409, no swdma errata */
553 .sht = &amd_sht,
554 .flags = ATA_FLAG_SLAVE_POSS,
555 .pio_mask = 0x1f,
556 .mwdma_mask = 0x07,
557 .udma_mask = ATA_UDMA4, /* UDMA 66 */
558 .port_ops = &amd66_port_ops
559 },
560 { /* 3: AMD 7411 */
561 .sht = &amd_sht,
562 .flags = ATA_FLAG_SLAVE_POSS,
563 .pio_mask = 0x1f,
564 .mwdma_mask = 0x07,
565 .udma_mask = ATA_UDMA5, /* UDMA 100 */
566 .port_ops = &amd100_port_ops
567 },
568 { /* 4: AMD 7441 */
569 .sht = &amd_sht,
570 .flags = ATA_FLAG_SLAVE_POSS,
571 .pio_mask = 0x1f,
572 .mwdma_mask = 0x07,
573 .udma_mask = ATA_UDMA5, /* UDMA 100 */
574 .port_ops = &amd100_port_ops
575 },
576 { /* 5: AMD 8111*/
577 .sht = &amd_sht,
578 .flags = ATA_FLAG_SLAVE_POSS,
579 .pio_mask = 0x1f,
580 .mwdma_mask = 0x07,
581 .udma_mask = ATA_UDMA6, /* UDMA 133, no swdma */
582 .port_ops = &amd133_port_ops
583 },
584 { /* 6: AMD 8111 UDMA 100 (Serenade) */
585 .sht = &amd_sht,
586 .flags = ATA_FLAG_SLAVE_POSS,
587 .pio_mask = 0x1f,
588 .mwdma_mask = 0x07,
589 .udma_mask = ATA_UDMA5, /* UDMA 100, no swdma */
590 .port_ops = &amd133_port_ops
591 },
592 { /* 7: Nvidia Nforce */
593 .sht = &amd_sht,
594 .flags = ATA_FLAG_SLAVE_POSS,
595 .pio_mask = 0x1f,
596 .mwdma_mask = 0x07,
597 .udma_mask = ATA_UDMA5, /* UDMA 100 */
598 .port_ops = &nv100_port_ops
599 },
600 { /* 8: Nvidia Nforce2 and later */
601 .sht = &amd_sht,
602 .flags = ATA_FLAG_SLAVE_POSS,
603 .pio_mask = 0x1f,
604 .mwdma_mask = 0x07,
605 .udma_mask = ATA_UDMA6, /* UDMA 133, no swdma */
606 .port_ops = &nv133_port_ops
607 },
608 { /* 9: AMD CS5536 (Geode companion) */
609 .sht = &amd_sht,
610 .flags = ATA_FLAG_SLAVE_POSS,
611 .pio_mask = 0x1f,
612 .mwdma_mask = 0x07,
613 .udma_mask = ATA_UDMA5, /* UDMA 100 */
614 .port_ops = &amd100_port_ops
615 }
616 };
617 const struct ata_port_info *ppi[] = { NULL, NULL };
618 static int printed_version;
619 int type = id->driver_data;
620 u8 fifo;
621
622 if (!printed_version++)
623 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
624
625 pci_read_config_byte(pdev, 0x41, &fifo);
626
627 /* Check for AMD7409 without swdma errata and if found adjust type */
628 if (type == 1 && pdev->revision > 0x7)
629 type = 2;
630
631 /* Check for AMD7411 */
632 if (type == 3)
633 /* FIFO is broken */
634 pci_write_config_byte(pdev, 0x41, fifo & 0x0F);
635 else
636 pci_write_config_byte(pdev, 0x41, fifo | 0xF0);
637
638 /* Serenade ? */
639 if (type == 5 && pdev->subsystem_vendor == PCI_VENDOR_ID_AMD &&
640 pdev->subsystem_device == PCI_DEVICE_ID_AMD_SERENADE)
641 type = 6; /* UDMA 100 only */
642
643 if (type < 3)
644 ata_pci_clear_simplex(pdev);
645
646 /* And fire it up */
647 ppi[0] = &info[type];
648 return ata_pci_init_one(pdev, ppi);
649 }
650
651 #ifdef CONFIG_PM
652 static int amd_reinit_one(struct pci_dev *pdev)
653 {
654 if (pdev->vendor == PCI_VENDOR_ID_AMD) {
655 u8 fifo;
656 pci_read_config_byte(pdev, 0x41, &fifo);
657 if (pdev->device == PCI_DEVICE_ID_AMD_VIPER_7411)
658 /* FIFO is broken */
659 pci_write_config_byte(pdev, 0x41, fifo & 0x0F);
660 else
661 pci_write_config_byte(pdev, 0x41, fifo | 0xF0);
662 if (pdev->device == PCI_DEVICE_ID_AMD_VIPER_7409 ||
663 pdev->device == PCI_DEVICE_ID_AMD_COBRA_7401)
664 ata_pci_clear_simplex(pdev);
665 }
666 return ata_pci_device_resume(pdev);
667 }
668 #endif
669
670 static const struct pci_device_id amd[] = {
671 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_COBRA_7401), 0 },
672 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_VIPER_7409), 1 },
673 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_VIPER_7411), 3 },
674 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_OPUS_7441), 4 },
675 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_8111_IDE), 5 },
676 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_IDE), 7 },
677 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2_IDE), 8 },
678 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_IDE), 8 },
679 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3_IDE), 8 },
680 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_IDE), 8 },
681 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_IDE), 8 },
682 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_IDE), 8 },
683 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_IDE), 8 },
684 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE), 8 },
685 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_IDE), 8 },
686 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP65_IDE), 8 },
687 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP67_IDE), 8 },
688 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_IDE), 8 },
689 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP77_IDE), 8 },
690 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CS5536_IDE), 9 },
691
692 { },
693 };
694
695 static struct pci_driver amd_pci_driver = {
696 .name = DRV_NAME,
697 .id_table = amd,
698 .probe = amd_init_one,
699 .remove = ata_pci_remove_one,
700 #ifdef CONFIG_PM
701 .suspend = ata_pci_device_suspend,
702 .resume = amd_reinit_one,
703 #endif
704 };
705
706 static int __init amd_init(void)
707 {
708 return pci_register_driver(&amd_pci_driver);
709 }
710
711 static void __exit amd_exit(void)
712 {
713 pci_unregister_driver(&amd_pci_driver);
714 }
715
716 MODULE_AUTHOR("Alan Cox");
717 MODULE_DESCRIPTION("low-level driver for AMD PATA IDE");
718 MODULE_LICENSE("GPL");
719 MODULE_DEVICE_TABLE(pci, amd);
720 MODULE_VERSION(DRV_VERSION);
721
722 module_init(amd_init);
723 module_exit(amd_exit);
This page took 0.08992 seconds and 5 git commands to generate.