c95922f8cc5ec702b1b899ddc585fa78af39e63d
[deliverable/linux.git] / drivers / ata / pata_amd.c
1 /*
2 * pata_amd.c - AMD PATA for new ATA layer
3 * (C) 2005-2006 Red Hat Inc
4 * Alan Cox <alan@redhat.com>
5 *
6 * Based on pata-sil680. Errata information is taken from data sheets
7 * and the amd74xx.c driver by Vojtech Pavlik. Nvidia SATA devices are
8 * claimed by sata-nv.c.
9 *
10 * TODO:
11 * Variable system clock when/if it makes sense
12 * Power management on ports
13 *
14 *
15 * Documentation publically available.
16 */
17
18 #include <linux/kernel.h>
19 #include <linux/module.h>
20 #include <linux/pci.h>
21 #include <linux/init.h>
22 #include <linux/blkdev.h>
23 #include <linux/delay.h>
24 #include <scsi/scsi_host.h>
25 #include <linux/libata.h>
26
27 #define DRV_NAME "pata_amd"
28 #define DRV_VERSION "0.3.9"
29
30 /**
31 * timing_setup - shared timing computation and load
32 * @ap: ATA port being set up
33 * @adev: drive being configured
34 * @offset: port offset
35 * @speed: target speed
36 * @clock: clock multiplier (number of times 33MHz for this part)
37 *
38 * Perform the actual timing set up for Nvidia or AMD PATA devices.
39 * The actual devices vary so they all call into this helper function
40 * providing the clock multipler and offset (because AMD and Nvidia put
41 * the ports at different locations).
42 */
43
44 static void timing_setup(struct ata_port *ap, struct ata_device *adev, int offset, int speed, int clock)
45 {
46 static const unsigned char amd_cyc2udma[] = {
47 6, 6, 5, 4, 0, 1, 1, 2, 2, 3, 3, 3, 3, 3, 3, 7
48 };
49
50 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
51 struct ata_device *peer = ata_dev_pair(adev);
52 int dn = ap->port_no * 2 + adev->devno;
53 struct ata_timing at, apeer;
54 int T, UT;
55 const int amd_clock = 33333; /* KHz. */
56 u8 t;
57
58 T = 1000000000 / amd_clock;
59 UT = T / min_t(int, max_t(int, clock, 1), 2);
60
61 if (ata_timing_compute(adev, speed, &at, T, UT) < 0) {
62 dev_printk(KERN_ERR, &pdev->dev, "unknown mode %d.\n", speed);
63 return;
64 }
65
66 if (peer) {
67 /* This may be over conservative */
68 if (peer->dma_mode) {
69 ata_timing_compute(peer, peer->dma_mode, &apeer, T, UT);
70 ata_timing_merge(&apeer, &at, &at, ATA_TIMING_8BIT);
71 }
72 ata_timing_compute(peer, peer->pio_mode, &apeer, T, UT);
73 ata_timing_merge(&apeer, &at, &at, ATA_TIMING_8BIT);
74 }
75
76 if (speed == XFER_UDMA_5 && amd_clock <= 33333) at.udma = 1;
77 if (speed == XFER_UDMA_6 && amd_clock <= 33333) at.udma = 15;
78
79 /*
80 * Now do the setup work
81 */
82
83 /* Configure the address set up timing */
84 pci_read_config_byte(pdev, offset + 0x0C, &t);
85 t = (t & ~(3 << ((3 - dn) << 1))) | ((FIT(at.setup, 1, 4) - 1) << ((3 - dn) << 1));
86 pci_write_config_byte(pdev, offset + 0x0C , t);
87
88 /* Configure the 8bit I/O timing */
89 pci_write_config_byte(pdev, offset + 0x0E + (1 - (dn >> 1)),
90 ((FIT(at.act8b, 1, 16) - 1) << 4) | (FIT(at.rec8b, 1, 16) - 1));
91
92 /* Drive timing */
93 pci_write_config_byte(pdev, offset + 0x08 + (3 - dn),
94 ((FIT(at.active, 1, 16) - 1) << 4) | (FIT(at.recover, 1, 16) - 1));
95
96 switch (clock) {
97 case 1:
98 t = at.udma ? (0xc0 | (FIT(at.udma, 2, 5) - 2)) : 0x03;
99 break;
100
101 case 2:
102 t = at.udma ? (0xc0 | amd_cyc2udma[FIT(at.udma, 2, 10)]) : 0x03;
103 break;
104
105 case 3:
106 t = at.udma ? (0xc0 | amd_cyc2udma[FIT(at.udma, 1, 10)]) : 0x03;
107 break;
108
109 case 4:
110 t = at.udma ? (0xc0 | amd_cyc2udma[FIT(at.udma, 1, 15)]) : 0x03;
111 break;
112
113 default:
114 return;
115 }
116
117 /* UDMA timing */
118 pci_write_config_byte(pdev, offset + 0x10 + (3 - dn), t);
119 }
120
121 /**
122 * amd_pre_reset - perform reset handling
123 * @link: ATA link
124 * @deadline: deadline jiffies for the operation
125 *
126 * Reset sequence checking enable bits to see which ports are
127 * active.
128 */
129
130 static int amd_pre_reset(struct ata_link *link, unsigned long deadline)
131 {
132 static const struct pci_bits amd_enable_bits[] = {
133 { 0x40, 1, 0x02, 0x02 },
134 { 0x40, 1, 0x01, 0x01 }
135 };
136
137 struct ata_port *ap = link->ap;
138 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
139
140 if (!pci_test_config_bits(pdev, &amd_enable_bits[ap->port_no]))
141 return -ENOENT;
142
143 return ata_std_prereset(link, deadline);
144 }
145
146 static void amd_error_handler(struct ata_port *ap)
147 {
148 return ata_bmdma_drive_eh(ap, amd_pre_reset,
149 ata_std_softreset, NULL,
150 ata_std_postreset);
151 }
152
153 static int amd_cable_detect(struct ata_port *ap)
154 {
155 static const u32 bitmask[2] = {0x03, 0x0C};
156 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
157 u8 ata66;
158
159 pci_read_config_byte(pdev, 0x42, &ata66);
160 if (ata66 & bitmask[ap->port_no])
161 return ATA_CBL_PATA80;
162 return ATA_CBL_PATA40;
163 }
164
165 /**
166 * amd33_set_piomode - set initial PIO mode data
167 * @ap: ATA interface
168 * @adev: ATA device
169 *
170 * Program the AMD registers for PIO mode.
171 */
172
173 static void amd33_set_piomode(struct ata_port *ap, struct ata_device *adev)
174 {
175 timing_setup(ap, adev, 0x40, adev->pio_mode, 1);
176 }
177
178 static void amd66_set_piomode(struct ata_port *ap, struct ata_device *adev)
179 {
180 timing_setup(ap, adev, 0x40, adev->pio_mode, 2);
181 }
182
183 static void amd100_set_piomode(struct ata_port *ap, struct ata_device *adev)
184 {
185 timing_setup(ap, adev, 0x40, adev->pio_mode, 3);
186 }
187
188 static void amd133_set_piomode(struct ata_port *ap, struct ata_device *adev)
189 {
190 timing_setup(ap, adev, 0x40, adev->pio_mode, 4);
191 }
192
193 /**
194 * amd33_set_dmamode - set initial DMA mode data
195 * @ap: ATA interface
196 * @adev: ATA device
197 *
198 * Program the MWDMA/UDMA modes for the AMD and Nvidia
199 * chipset.
200 */
201
202 static void amd33_set_dmamode(struct ata_port *ap, struct ata_device *adev)
203 {
204 timing_setup(ap, adev, 0x40, adev->dma_mode, 1);
205 }
206
207 static void amd66_set_dmamode(struct ata_port *ap, struct ata_device *adev)
208 {
209 timing_setup(ap, adev, 0x40, adev->dma_mode, 2);
210 }
211
212 static void amd100_set_dmamode(struct ata_port *ap, struct ata_device *adev)
213 {
214 timing_setup(ap, adev, 0x40, adev->dma_mode, 3);
215 }
216
217 static void amd133_set_dmamode(struct ata_port *ap, struct ata_device *adev)
218 {
219 timing_setup(ap, adev, 0x40, adev->dma_mode, 4);
220 }
221
222
223 /**
224 * nv_probe_init - cable detection
225 * @lin: ATA link
226 *
227 * Perform cable detection. The BIOS stores this in PCI config
228 * space for us.
229 */
230
231 static int nv_pre_reset(struct ata_link *link, unsigned long deadline)
232 {
233 static const struct pci_bits nv_enable_bits[] = {
234 { 0x50, 1, 0x02, 0x02 },
235 { 0x50, 1, 0x01, 0x01 }
236 };
237
238 struct ata_port *ap = link->ap;
239 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
240
241 if (!pci_test_config_bits(pdev, &nv_enable_bits[ap->port_no]))
242 return -ENOENT;
243
244 return ata_std_prereset(link, deadline);
245 }
246
247 static void nv_error_handler(struct ata_port *ap)
248 {
249 ata_bmdma_drive_eh(ap, nv_pre_reset,
250 ata_std_softreset, NULL,
251 ata_std_postreset);
252 }
253
254 static int nv_cable_detect(struct ata_port *ap)
255 {
256 static const u8 bitmask[2] = {0x03, 0x0C};
257 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
258 u8 ata66;
259 u16 udma;
260 int cbl;
261
262 pci_read_config_byte(pdev, 0x52, &ata66);
263 if (ata66 & bitmask[ap->port_no])
264 cbl = ATA_CBL_PATA80;
265 else
266 cbl = ATA_CBL_PATA40;
267
268 /* We now have to double check because the Nvidia boxes BIOS
269 doesn't always set the cable bits but does set mode bits */
270 pci_read_config_word(pdev, 0x62 - 2 * ap->port_no, &udma);
271 if ((udma & 0xC4) == 0xC4 || (udma & 0xC400) == 0xC400)
272 cbl = ATA_CBL_PATA80;
273 /* And a triple check across suspend/resume with ACPI around */
274 if (ata_acpi_cbl_80wire(ap))
275 cbl = ATA_CBL_PATA80;
276 return cbl;
277 }
278
279 /**
280 * nv100_set_piomode - set initial PIO mode data
281 * @ap: ATA interface
282 * @adev: ATA device
283 *
284 * Program the AMD registers for PIO mode.
285 */
286
287 static void nv100_set_piomode(struct ata_port *ap, struct ata_device *adev)
288 {
289 timing_setup(ap, adev, 0x50, adev->pio_mode, 3);
290 }
291
292 static void nv133_set_piomode(struct ata_port *ap, struct ata_device *adev)
293 {
294 timing_setup(ap, adev, 0x50, adev->pio_mode, 4);
295 }
296
297 /**
298 * nv100_set_dmamode - set initial DMA mode data
299 * @ap: ATA interface
300 * @adev: ATA device
301 *
302 * Program the MWDMA/UDMA modes for the AMD and Nvidia
303 * chipset.
304 */
305
306 static void nv100_set_dmamode(struct ata_port *ap, struct ata_device *adev)
307 {
308 timing_setup(ap, adev, 0x50, adev->dma_mode, 3);
309 }
310
311 static void nv133_set_dmamode(struct ata_port *ap, struct ata_device *adev)
312 {
313 timing_setup(ap, adev, 0x50, adev->dma_mode, 4);
314 }
315
316 static struct scsi_host_template amd_sht = {
317 .module = THIS_MODULE,
318 .name = DRV_NAME,
319 .ioctl = ata_scsi_ioctl,
320 .queuecommand = ata_scsi_queuecmd,
321 .can_queue = ATA_DEF_QUEUE,
322 .this_id = ATA_SHT_THIS_ID,
323 .sg_tablesize = LIBATA_MAX_PRD,
324 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
325 .emulated = ATA_SHT_EMULATED,
326 .use_clustering = ATA_SHT_USE_CLUSTERING,
327 .proc_name = DRV_NAME,
328 .dma_boundary = ATA_DMA_BOUNDARY,
329 .slave_configure = ata_scsi_slave_config,
330 .slave_destroy = ata_scsi_slave_destroy,
331 .bios_param = ata_std_bios_param,
332 };
333
334 static struct ata_port_operations amd33_port_ops = {
335 .port_disable = ata_port_disable,
336 .set_piomode = amd33_set_piomode,
337 .set_dmamode = amd33_set_dmamode,
338 .mode_filter = ata_pci_default_filter,
339 .tf_load = ata_tf_load,
340 .tf_read = ata_tf_read,
341 .check_status = ata_check_status,
342 .exec_command = ata_exec_command,
343 .dev_select = ata_std_dev_select,
344
345 .freeze = ata_bmdma_freeze,
346 .thaw = ata_bmdma_thaw,
347 .error_handler = amd_error_handler,
348 .post_internal_cmd = ata_bmdma_post_internal_cmd,
349 .cable_detect = ata_cable_40wire,
350
351 .bmdma_setup = ata_bmdma_setup,
352 .bmdma_start = ata_bmdma_start,
353 .bmdma_stop = ata_bmdma_stop,
354 .bmdma_status = ata_bmdma_status,
355
356 .qc_prep = ata_qc_prep,
357 .qc_issue = ata_qc_issue_prot,
358
359 .data_xfer = ata_data_xfer,
360
361 .irq_handler = ata_interrupt,
362 .irq_clear = ata_bmdma_irq_clear,
363 .irq_on = ata_irq_on,
364
365 .port_start = ata_port_start,
366 };
367
368 static struct ata_port_operations amd66_port_ops = {
369 .port_disable = ata_port_disable,
370 .set_piomode = amd66_set_piomode,
371 .set_dmamode = amd66_set_dmamode,
372 .mode_filter = ata_pci_default_filter,
373 .tf_load = ata_tf_load,
374 .tf_read = ata_tf_read,
375 .check_status = ata_check_status,
376 .exec_command = ata_exec_command,
377 .dev_select = ata_std_dev_select,
378
379 .freeze = ata_bmdma_freeze,
380 .thaw = ata_bmdma_thaw,
381 .error_handler = amd_error_handler,
382 .post_internal_cmd = ata_bmdma_post_internal_cmd,
383 .cable_detect = ata_cable_unknown,
384
385 .bmdma_setup = ata_bmdma_setup,
386 .bmdma_start = ata_bmdma_start,
387 .bmdma_stop = ata_bmdma_stop,
388 .bmdma_status = ata_bmdma_status,
389
390 .qc_prep = ata_qc_prep,
391 .qc_issue = ata_qc_issue_prot,
392
393 .data_xfer = ata_data_xfer,
394
395 .irq_handler = ata_interrupt,
396 .irq_clear = ata_bmdma_irq_clear,
397 .irq_on = ata_irq_on,
398
399 .port_start = ata_port_start,
400 };
401
402 static struct ata_port_operations amd100_port_ops = {
403 .port_disable = ata_port_disable,
404 .set_piomode = amd100_set_piomode,
405 .set_dmamode = amd100_set_dmamode,
406 .mode_filter = ata_pci_default_filter,
407 .tf_load = ata_tf_load,
408 .tf_read = ata_tf_read,
409 .check_status = ata_check_status,
410 .exec_command = ata_exec_command,
411 .dev_select = ata_std_dev_select,
412
413 .freeze = ata_bmdma_freeze,
414 .thaw = ata_bmdma_thaw,
415 .error_handler = amd_error_handler,
416 .post_internal_cmd = ata_bmdma_post_internal_cmd,
417 .cable_detect = ata_cable_unknown,
418
419 .bmdma_setup = ata_bmdma_setup,
420 .bmdma_start = ata_bmdma_start,
421 .bmdma_stop = ata_bmdma_stop,
422 .bmdma_status = ata_bmdma_status,
423
424 .qc_prep = ata_qc_prep,
425 .qc_issue = ata_qc_issue_prot,
426
427 .data_xfer = ata_data_xfer,
428
429 .irq_handler = ata_interrupt,
430 .irq_clear = ata_bmdma_irq_clear,
431 .irq_on = ata_irq_on,
432
433 .port_start = ata_port_start,
434 };
435
436 static struct ata_port_operations amd133_port_ops = {
437 .port_disable = ata_port_disable,
438 .set_piomode = amd133_set_piomode,
439 .set_dmamode = amd133_set_dmamode,
440 .mode_filter = ata_pci_default_filter,
441 .tf_load = ata_tf_load,
442 .tf_read = ata_tf_read,
443 .check_status = ata_check_status,
444 .exec_command = ata_exec_command,
445 .dev_select = ata_std_dev_select,
446
447 .freeze = ata_bmdma_freeze,
448 .thaw = ata_bmdma_thaw,
449 .error_handler = amd_error_handler,
450 .post_internal_cmd = ata_bmdma_post_internal_cmd,
451 .cable_detect = amd_cable_detect,
452
453 .bmdma_setup = ata_bmdma_setup,
454 .bmdma_start = ata_bmdma_start,
455 .bmdma_stop = ata_bmdma_stop,
456 .bmdma_status = ata_bmdma_status,
457
458 .qc_prep = ata_qc_prep,
459 .qc_issue = ata_qc_issue_prot,
460
461 .data_xfer = ata_data_xfer,
462
463 .irq_handler = ata_interrupt,
464 .irq_clear = ata_bmdma_irq_clear,
465 .irq_on = ata_irq_on,
466
467 .port_start = ata_port_start,
468 };
469
470 static struct ata_port_operations nv100_port_ops = {
471 .port_disable = ata_port_disable,
472 .set_piomode = nv100_set_piomode,
473 .set_dmamode = nv100_set_dmamode,
474 .mode_filter = ata_pci_default_filter,
475 .tf_load = ata_tf_load,
476 .tf_read = ata_tf_read,
477 .check_status = ata_check_status,
478 .exec_command = ata_exec_command,
479 .dev_select = ata_std_dev_select,
480
481 .freeze = ata_bmdma_freeze,
482 .thaw = ata_bmdma_thaw,
483 .error_handler = nv_error_handler,
484 .post_internal_cmd = ata_bmdma_post_internal_cmd,
485 .cable_detect = nv_cable_detect,
486
487 .bmdma_setup = ata_bmdma_setup,
488 .bmdma_start = ata_bmdma_start,
489 .bmdma_stop = ata_bmdma_stop,
490 .bmdma_status = ata_bmdma_status,
491
492 .qc_prep = ata_qc_prep,
493 .qc_issue = ata_qc_issue_prot,
494
495 .data_xfer = ata_data_xfer,
496
497 .irq_handler = ata_interrupt,
498 .irq_clear = ata_bmdma_irq_clear,
499 .irq_on = ata_irq_on,
500
501 .port_start = ata_port_start,
502 };
503
504 static struct ata_port_operations nv133_port_ops = {
505 .port_disable = ata_port_disable,
506 .set_piomode = nv133_set_piomode,
507 .set_dmamode = nv133_set_dmamode,
508 .mode_filter = ata_pci_default_filter,
509 .tf_load = ata_tf_load,
510 .tf_read = ata_tf_read,
511 .check_status = ata_check_status,
512 .exec_command = ata_exec_command,
513 .dev_select = ata_std_dev_select,
514
515 .freeze = ata_bmdma_freeze,
516 .thaw = ata_bmdma_thaw,
517 .error_handler = nv_error_handler,
518 .post_internal_cmd = ata_bmdma_post_internal_cmd,
519 .cable_detect = nv_cable_detect,
520
521 .bmdma_setup = ata_bmdma_setup,
522 .bmdma_start = ata_bmdma_start,
523 .bmdma_stop = ata_bmdma_stop,
524 .bmdma_status = ata_bmdma_status,
525
526 .qc_prep = ata_qc_prep,
527 .qc_issue = ata_qc_issue_prot,
528
529 .data_xfer = ata_data_xfer,
530
531 .irq_handler = ata_interrupt,
532 .irq_clear = ata_bmdma_irq_clear,
533 .irq_on = ata_irq_on,
534
535 .port_start = ata_port_start,
536 };
537
538 static int amd_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
539 {
540 static const struct ata_port_info info[10] = {
541 { /* 0: AMD 7401 */
542 .sht = &amd_sht,
543 .flags = ATA_FLAG_SLAVE_POSS,
544 .pio_mask = 0x1f,
545 .mwdma_mask = 0x07, /* No SWDMA */
546 .udma_mask = 0x07, /* UDMA 33 */
547 .port_ops = &amd33_port_ops
548 },
549 { /* 1: Early AMD7409 - no swdma */
550 .sht = &amd_sht,
551 .flags = ATA_FLAG_SLAVE_POSS,
552 .pio_mask = 0x1f,
553 .mwdma_mask = 0x07,
554 .udma_mask = ATA_UDMA4, /* UDMA 66 */
555 .port_ops = &amd66_port_ops
556 },
557 { /* 2: AMD 7409, no swdma errata */
558 .sht = &amd_sht,
559 .flags = ATA_FLAG_SLAVE_POSS,
560 .pio_mask = 0x1f,
561 .mwdma_mask = 0x07,
562 .udma_mask = ATA_UDMA4, /* UDMA 66 */
563 .port_ops = &amd66_port_ops
564 },
565 { /* 3: AMD 7411 */
566 .sht = &amd_sht,
567 .flags = ATA_FLAG_SLAVE_POSS,
568 .pio_mask = 0x1f,
569 .mwdma_mask = 0x07,
570 .udma_mask = ATA_UDMA5, /* UDMA 100 */
571 .port_ops = &amd100_port_ops
572 },
573 { /* 4: AMD 7441 */
574 .sht = &amd_sht,
575 .flags = ATA_FLAG_SLAVE_POSS,
576 .pio_mask = 0x1f,
577 .mwdma_mask = 0x07,
578 .udma_mask = ATA_UDMA5, /* UDMA 100 */
579 .port_ops = &amd100_port_ops
580 },
581 { /* 5: AMD 8111*/
582 .sht = &amd_sht,
583 .flags = ATA_FLAG_SLAVE_POSS,
584 .pio_mask = 0x1f,
585 .mwdma_mask = 0x07,
586 .udma_mask = ATA_UDMA6, /* UDMA 133, no swdma */
587 .port_ops = &amd133_port_ops
588 },
589 { /* 6: AMD 8111 UDMA 100 (Serenade) */
590 .sht = &amd_sht,
591 .flags = ATA_FLAG_SLAVE_POSS,
592 .pio_mask = 0x1f,
593 .mwdma_mask = 0x07,
594 .udma_mask = ATA_UDMA5, /* UDMA 100, no swdma */
595 .port_ops = &amd133_port_ops
596 },
597 { /* 7: Nvidia Nforce */
598 .sht = &amd_sht,
599 .flags = ATA_FLAG_SLAVE_POSS,
600 .pio_mask = 0x1f,
601 .mwdma_mask = 0x07,
602 .udma_mask = ATA_UDMA5, /* UDMA 100 */
603 .port_ops = &nv100_port_ops
604 },
605 { /* 8: Nvidia Nforce2 and later */
606 .sht = &amd_sht,
607 .flags = ATA_FLAG_SLAVE_POSS,
608 .pio_mask = 0x1f,
609 .mwdma_mask = 0x07,
610 .udma_mask = ATA_UDMA6, /* UDMA 133, no swdma */
611 .port_ops = &nv133_port_ops
612 },
613 { /* 9: AMD CS5536 (Geode companion) */
614 .sht = &amd_sht,
615 .flags = ATA_FLAG_SLAVE_POSS,
616 .pio_mask = 0x1f,
617 .mwdma_mask = 0x07,
618 .udma_mask = ATA_UDMA5, /* UDMA 100 */
619 .port_ops = &amd100_port_ops
620 }
621 };
622 const struct ata_port_info *ppi[] = { NULL, NULL };
623 static int printed_version;
624 int type = id->driver_data;
625 u8 fifo;
626
627 if (!printed_version++)
628 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
629
630 pci_read_config_byte(pdev, 0x41, &fifo);
631
632 /* Check for AMD7409 without swdma errata and if found adjust type */
633 if (type == 1 && pdev->revision > 0x7)
634 type = 2;
635
636 /* Check for AMD7411 */
637 if (type == 3)
638 /* FIFO is broken */
639 pci_write_config_byte(pdev, 0x41, fifo & 0x0F);
640 else
641 pci_write_config_byte(pdev, 0x41, fifo | 0xF0);
642
643 /* Serenade ? */
644 if (type == 5 && pdev->subsystem_vendor == PCI_VENDOR_ID_AMD &&
645 pdev->subsystem_device == PCI_DEVICE_ID_AMD_SERENADE)
646 type = 6; /* UDMA 100 only */
647
648 if (type < 3)
649 ata_pci_clear_simplex(pdev);
650
651 /* And fire it up */
652 ppi[0] = &info[type];
653 return ata_pci_init_one(pdev, ppi);
654 }
655
656 #ifdef CONFIG_PM
657 static int amd_reinit_one(struct pci_dev *pdev)
658 {
659 if (pdev->vendor == PCI_VENDOR_ID_AMD) {
660 u8 fifo;
661 pci_read_config_byte(pdev, 0x41, &fifo);
662 if (pdev->device == PCI_DEVICE_ID_AMD_VIPER_7411)
663 /* FIFO is broken */
664 pci_write_config_byte(pdev, 0x41, fifo & 0x0F);
665 else
666 pci_write_config_byte(pdev, 0x41, fifo | 0xF0);
667 if (pdev->device == PCI_DEVICE_ID_AMD_VIPER_7409 ||
668 pdev->device == PCI_DEVICE_ID_AMD_COBRA_7401)
669 ata_pci_clear_simplex(pdev);
670 }
671 return ata_pci_device_resume(pdev);
672 }
673 #endif
674
675 static const struct pci_device_id amd[] = {
676 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_COBRA_7401), 0 },
677 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_VIPER_7409), 1 },
678 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_VIPER_7411), 3 },
679 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_OPUS_7441), 4 },
680 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_8111_IDE), 5 },
681 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_IDE), 7 },
682 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2_IDE), 8 },
683 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_IDE), 8 },
684 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3_IDE), 8 },
685 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_IDE), 8 },
686 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_IDE), 8 },
687 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_IDE), 8 },
688 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_IDE), 8 },
689 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE), 8 },
690 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_IDE), 8 },
691 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP65_IDE), 8 },
692 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP67_IDE), 8 },
693 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_IDE), 8 },
694 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP77_IDE), 8 },
695 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CS5536_IDE), 9 },
696
697 { },
698 };
699
700 static struct pci_driver amd_pci_driver = {
701 .name = DRV_NAME,
702 .id_table = amd,
703 .probe = amd_init_one,
704 .remove = ata_pci_remove_one,
705 #ifdef CONFIG_PM
706 .suspend = ata_pci_device_suspend,
707 .resume = amd_reinit_one,
708 #endif
709 };
710
711 static int __init amd_init(void)
712 {
713 return pci_register_driver(&amd_pci_driver);
714 }
715
716 static void __exit amd_exit(void)
717 {
718 pci_unregister_driver(&amd_pci_driver);
719 }
720
721 MODULE_AUTHOR("Alan Cox");
722 MODULE_DESCRIPTION("low-level driver for AMD PATA IDE");
723 MODULE_LICENSE("GPL");
724 MODULE_DEVICE_TABLE(pci, amd);
725 MODULE_VERSION(DRV_VERSION);
726
727 module_init(amd_init);
728 module_exit(amd_exit);
This page took 0.063546 seconds and 4 git commands to generate.