4 * Basic PIO and command management functionality.
6 * This code was split off from ide.c. See ide.c for history and original
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2, or (at your option) any
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
19 * For the avoidance of doubt the "preferred form" of this code is one which
20 * is in an open non patent encumbered format. Where cryptographic key signing
21 * forms part of the process of creating an executable the information
22 * including keys needed to generate an equivalently functional executable
23 * are deemed to be part of the source code.
27 #include <linux/module.h>
28 #include <linux/types.h>
29 #include <linux/string.h>
30 #include <linux/kernel.h>
31 #include <linux/timer.h>
33 #include <linux/interrupt.h>
34 #include <linux/major.h>
35 #include <linux/errno.h>
36 #include <linux/genhd.h>
37 #include <linux/blkpg.h>
38 #include <linux/slab.h>
39 #include <linux/init.h>
40 #include <linux/pci.h>
41 #include <linux/delay.h>
42 #include <linux/ide.h>
43 #include <linux/hdreg.h>
44 #include <linux/completion.h>
45 #include <linux/reboot.h>
46 #include <linux/cdrom.h>
47 #include <linux/seq_file.h>
48 #include <linux/device.h>
49 #include <linux/kmod.h>
50 #include <linux/scatterlist.h>
51 #include <linux/bitops.h>
53 #include <asm/byteorder.h>
55 #include <asm/uaccess.h>
58 static int __ide_end_request(ide_drive_t
*drive
, struct request
*rq
,
59 int uptodate
, unsigned int nr_bytes
, int dequeue
)
65 error
= uptodate
? uptodate
: -EIO
;
68 * if failfast is set on a request, override number of sectors and
69 * complete the whole request right now
71 if (blk_noretry_request(rq
) && error
)
72 nr_bytes
= rq
->hard_nr_sectors
<< 9;
74 if (!blk_fs_request(rq
) && error
&& !rq
->errors
)
78 * decide whether to reenable DMA -- 3 is a random magic for now,
79 * if we DMA timeout more than 3 times, just stay in PIO
81 if ((drive
->dev_flags
& IDE_DFLAG_DMA_PIO_RETRY
) &&
82 drive
->retry_pio
<= 3) {
83 drive
->dev_flags
&= ~IDE_DFLAG_DMA_PIO_RETRY
;
87 if (!__blk_end_request(rq
, error
, nr_bytes
)) {
89 HWGROUP(drive
)->rq
= NULL
;
97 * ide_end_request - complete an IDE I/O
98 * @drive: IDE device for the I/O
100 * @nr_sectors: number of sectors completed
102 * This is our end_request wrapper function. We complete the I/O
103 * update random number input and dequeue the request, which if
104 * it was tagged may be out of order.
107 int ide_end_request (ide_drive_t
*drive
, int uptodate
, int nr_sectors
)
109 unsigned int nr_bytes
= nr_sectors
<< 9;
110 struct request
*rq
= drive
->hwif
->hwgroup
->rq
;
115 if (blk_pc_request(rq
))
116 nr_bytes
= rq
->data_len
;
118 nr_bytes
= rq
->hard_cur_sectors
<< 9;
121 spin_lock_irqsave(&ide_lock
, flags
);
122 ret
= __ide_end_request(drive
, rq
, uptodate
, nr_bytes
, 1);
123 spin_unlock_irqrestore(&ide_lock
, flags
);
127 EXPORT_SYMBOL(ide_end_request
);
129 static void ide_complete_power_step(ide_drive_t
*drive
, struct request
*rq
)
131 struct request_pm_state
*pm
= rq
->data
;
134 printk(KERN_INFO
"%s: complete_power_step(step: %d)\n",
135 drive
->name
, pm
->pm_step
);
137 if (drive
->media
!= ide_disk
)
140 switch (pm
->pm_step
) {
141 case IDE_PM_FLUSH_CACHE
: /* Suspend step 1 (flush cache) */
142 if (pm
->pm_state
== PM_EVENT_FREEZE
)
143 pm
->pm_step
= IDE_PM_COMPLETED
;
145 pm
->pm_step
= IDE_PM_STANDBY
;
147 case IDE_PM_STANDBY
: /* Suspend step 2 (standby) */
148 pm
->pm_step
= IDE_PM_COMPLETED
;
150 case IDE_PM_RESTORE_PIO
: /* Resume step 1 (restore PIO) */
151 pm
->pm_step
= IDE_PM_IDLE
;
153 case IDE_PM_IDLE
: /* Resume step 2 (idle)*/
154 pm
->pm_step
= IDE_PM_RESTORE_DMA
;
159 static ide_startstop_t
ide_start_power_step(ide_drive_t
*drive
, struct request
*rq
)
161 struct request_pm_state
*pm
= rq
->data
;
162 ide_task_t
*args
= rq
->special
;
164 memset(args
, 0, sizeof(*args
));
166 switch (pm
->pm_step
) {
167 case IDE_PM_FLUSH_CACHE
: /* Suspend step 1 (flush cache) */
168 if (drive
->media
!= ide_disk
)
170 /* Not supported? Switch to next step now. */
171 if (ata_id_flush_enabled(drive
->id
) == 0 ||
172 (drive
->dev_flags
& IDE_DFLAG_WCACHE
) == 0) {
173 ide_complete_power_step(drive
, rq
);
176 if (ata_id_flush_ext_enabled(drive
->id
))
177 args
->tf
.command
= ATA_CMD_FLUSH_EXT
;
179 args
->tf
.command
= ATA_CMD_FLUSH
;
181 case IDE_PM_STANDBY
: /* Suspend step 2 (standby) */
182 args
->tf
.command
= ATA_CMD_STANDBYNOW1
;
184 case IDE_PM_RESTORE_PIO
: /* Resume step 1 (restore PIO) */
185 ide_set_max_pio(drive
);
187 * skip IDE_PM_IDLE for ATAPI devices
189 if (drive
->media
!= ide_disk
)
190 pm
->pm_step
= IDE_PM_RESTORE_DMA
;
192 ide_complete_power_step(drive
, rq
);
194 case IDE_PM_IDLE
: /* Resume step 2 (idle) */
195 args
->tf
.command
= ATA_CMD_IDLEIMMEDIATE
;
197 case IDE_PM_RESTORE_DMA
: /* Resume step 3 (restore DMA) */
199 * Right now, all we do is call ide_set_dma(drive),
200 * we could be smarter and check for current xfer_speed
201 * in struct drive etc...
203 if (drive
->hwif
->dma_ops
== NULL
)
206 * TODO: respect IDE_DFLAG_USING_DMA
212 pm
->pm_step
= IDE_PM_COMPLETED
;
216 args
->tf_flags
= IDE_TFLAG_TF
| IDE_TFLAG_DEVICE
;
217 args
->data_phase
= TASKFILE_NO_DATA
;
218 return do_rw_taskfile(drive
, args
);
222 * ide_end_dequeued_request - complete an IDE I/O
223 * @drive: IDE device for the I/O
225 * @nr_sectors: number of sectors completed
227 * Complete an I/O that is no longer on the request queue. This
228 * typically occurs when we pull the request and issue a REQUEST_SENSE.
229 * We must still finish the old request but we must not tamper with the
230 * queue in the meantime.
232 * NOTE: This path does not handle barrier, but barrier is not supported
236 int ide_end_dequeued_request(ide_drive_t
*drive
, struct request
*rq
,
237 int uptodate
, int nr_sectors
)
242 BUG_ON(!blk_rq_started(rq
));
244 spin_lock_irqsave(&ide_lock
, flags
);
245 ret
= __ide_end_request(drive
, rq
, uptodate
, nr_sectors
<< 9, 0);
246 spin_unlock_irqrestore(&ide_lock
, flags
);
250 EXPORT_SYMBOL_GPL(ide_end_dequeued_request
);
254 * ide_complete_pm_request - end the current Power Management request
255 * @drive: target drive
258 * This function cleans up the current PM request and stops the queue
261 static void ide_complete_pm_request (ide_drive_t
*drive
, struct request
*rq
)
266 printk("%s: completing PM request, %s\n", drive
->name
,
267 blk_pm_suspend_request(rq
) ? "suspend" : "resume");
269 spin_lock_irqsave(&ide_lock
, flags
);
270 if (blk_pm_suspend_request(rq
)) {
271 blk_stop_queue(drive
->queue
);
273 drive
->dev_flags
&= ~IDE_DFLAG_BLOCKED
;
274 blk_start_queue(drive
->queue
);
276 spin_unlock_irqrestore(&ide_lock
, flags
);
278 drive
->hwif
->hwgroup
->rq
= NULL
;
280 spin_lock_irqsave(&ide_lock
, flags
);
281 if (__blk_end_request(rq
, 0, 0))
283 spin_unlock_irqrestore(&ide_lock
, flags
);
287 * ide_end_drive_cmd - end an explicit drive command
292 * Clean up after success/failure of an explicit drive command.
293 * These get thrown onto the queue so they are synchronized with
294 * real I/O operations on the drive.
296 * In LBA48 mode we have to read the register set twice to get
297 * all the extra information out.
300 void ide_end_drive_cmd (ide_drive_t
*drive
, u8 stat
, u8 err
)
302 ide_hwgroup_t
*hwgroup
= drive
->hwif
->hwgroup
;
303 struct request
*rq
= hwgroup
->rq
;
306 if (rq
->cmd_type
== REQ_TYPE_ATA_TASKFILE
) {
307 ide_task_t
*task
= (ide_task_t
*)rq
->special
;
310 rq
->errors
= !OK_STAT(stat
, ATA_DRDY
, BAD_STAT
);
313 struct ide_taskfile
*tf
= &task
->tf
;
318 drive
->hwif
->tp_ops
->tf_read(drive
, task
);
320 if (task
->tf_flags
& IDE_TFLAG_DYN
)
323 } else if (blk_pm_request(rq
)) {
324 struct request_pm_state
*pm
= rq
->data
;
326 ide_complete_power_step(drive
, rq
);
327 if (pm
->pm_step
== IDE_PM_COMPLETED
)
328 ide_complete_pm_request(drive
, rq
);
336 spin_lock_irqsave(&ide_lock
, flags
);
337 if (unlikely(__blk_end_request(rq
, (rq
->errors
? -EIO
: 0),
340 spin_unlock_irqrestore(&ide_lock
, flags
);
342 EXPORT_SYMBOL(ide_end_drive_cmd
);
344 static void ide_kill_rq(ide_drive_t
*drive
, struct request
*rq
)
349 drv
= *(ide_driver_t
**)rq
->rq_disk
->private_data
;
350 drv
->end_request(drive
, 0, 0);
352 ide_end_request(drive
, 0, 0);
355 static ide_startstop_t
ide_ata_error(ide_drive_t
*drive
, struct request
*rq
, u8 stat
, u8 err
)
357 ide_hwif_t
*hwif
= drive
->hwif
;
359 if ((stat
& ATA_BUSY
) ||
360 ((stat
& ATA_DF
) && (drive
->dev_flags
& IDE_DFLAG_NOWERR
) == 0)) {
361 /* other bits are useless when BUSY */
362 rq
->errors
|= ERROR_RESET
;
363 } else if (stat
& ATA_ERR
) {
364 /* err has different meaning on cdrom and tape */
365 if (err
== ATA_ABORTED
) {
366 if ((drive
->dev_flags
& IDE_DFLAG_LBA
) &&
367 /* some newer drives don't support ATA_CMD_INIT_DEV_PARAMS */
368 hwif
->tp_ops
->read_status(hwif
) == ATA_CMD_INIT_DEV_PARAMS
)
370 } else if ((err
& BAD_CRC
) == BAD_CRC
) {
371 /* UDMA crc error, just retry the operation */
373 } else if (err
& (ATA_BBK
| ATA_UNC
)) {
374 /* retries won't help these */
375 rq
->errors
= ERROR_MAX
;
376 } else if (err
& ATA_TRK0NF
) {
377 /* help it find track zero */
378 rq
->errors
|= ERROR_RECAL
;
382 if ((stat
& ATA_DRQ
) && rq_data_dir(rq
) == READ
&&
383 (hwif
->host_flags
& IDE_HFLAG_ERROR_STOPS_FIFO
) == 0) {
384 int nsect
= drive
->mult_count
? drive
->mult_count
: 1;
386 ide_pad_transfer(drive
, READ
, nsect
* SECTOR_SIZE
);
389 if (rq
->errors
>= ERROR_MAX
|| blk_noretry_request(rq
)) {
390 ide_kill_rq(drive
, rq
);
394 if (hwif
->tp_ops
->read_status(hwif
) & (ATA_BUSY
| ATA_DRQ
))
395 rq
->errors
|= ERROR_RESET
;
397 if ((rq
->errors
& ERROR_RESET
) == ERROR_RESET
) {
399 return ide_do_reset(drive
);
402 if ((rq
->errors
& ERROR_RECAL
) == ERROR_RECAL
)
403 drive
->special
.b
.recalibrate
= 1;
410 static ide_startstop_t
ide_atapi_error(ide_drive_t
*drive
, struct request
*rq
, u8 stat
, u8 err
)
412 ide_hwif_t
*hwif
= drive
->hwif
;
414 if ((stat
& ATA_BUSY
) ||
415 ((stat
& ATA_DF
) && (drive
->dev_flags
& IDE_DFLAG_NOWERR
) == 0)) {
416 /* other bits are useless when BUSY */
417 rq
->errors
|= ERROR_RESET
;
419 /* add decoding error stuff */
422 if (hwif
->tp_ops
->read_status(hwif
) & (ATA_BUSY
| ATA_DRQ
))
424 hwif
->tp_ops
->exec_command(hwif
, ATA_CMD_IDLEIMMEDIATE
);
426 if (rq
->errors
>= ERROR_MAX
) {
427 ide_kill_rq(drive
, rq
);
429 if ((rq
->errors
& ERROR_RESET
) == ERROR_RESET
) {
431 return ide_do_reset(drive
);
440 __ide_error(ide_drive_t
*drive
, struct request
*rq
, u8 stat
, u8 err
)
442 if (drive
->media
== ide_disk
)
443 return ide_ata_error(drive
, rq
, stat
, err
);
444 return ide_atapi_error(drive
, rq
, stat
, err
);
447 EXPORT_SYMBOL_GPL(__ide_error
);
450 * ide_error - handle an error on the IDE
451 * @drive: drive the error occurred on
452 * @msg: message to report
455 * ide_error() takes action based on the error returned by the drive.
456 * For normal I/O that may well include retries. We deal with
457 * both new-style (taskfile) and old style command handling here.
458 * In the case of taskfile command handling there is work left to
462 ide_startstop_t
ide_error (ide_drive_t
*drive
, const char *msg
, u8 stat
)
467 err
= ide_dump_status(drive
, msg
, stat
);
469 if ((rq
= HWGROUP(drive
)->rq
) == NULL
)
472 /* retry only "normal" I/O: */
473 if (!blk_fs_request(rq
)) {
475 ide_end_drive_cmd(drive
, stat
, err
);
482 drv
= *(ide_driver_t
**)rq
->rq_disk
->private_data
;
483 return drv
->error(drive
, rq
, stat
, err
);
485 return __ide_error(drive
, rq
, stat
, err
);
488 EXPORT_SYMBOL_GPL(ide_error
);
490 static void ide_tf_set_specify_cmd(ide_drive_t
*drive
, struct ide_taskfile
*tf
)
492 tf
->nsect
= drive
->sect
;
493 tf
->lbal
= drive
->sect
;
494 tf
->lbam
= drive
->cyl
;
495 tf
->lbah
= drive
->cyl
>> 8;
496 tf
->device
= (drive
->head
- 1) | drive
->select
;
497 tf
->command
= ATA_CMD_INIT_DEV_PARAMS
;
500 static void ide_tf_set_restore_cmd(ide_drive_t
*drive
, struct ide_taskfile
*tf
)
502 tf
->nsect
= drive
->sect
;
503 tf
->command
= ATA_CMD_RESTORE
;
506 static void ide_tf_set_setmult_cmd(ide_drive_t
*drive
, struct ide_taskfile
*tf
)
508 tf
->nsect
= drive
->mult_req
;
509 tf
->command
= ATA_CMD_SET_MULTI
;
512 static ide_startstop_t
ide_disk_special(ide_drive_t
*drive
)
514 special_t
*s
= &drive
->special
;
517 memset(&args
, 0, sizeof(ide_task_t
));
518 args
.data_phase
= TASKFILE_NO_DATA
;
520 if (s
->b
.set_geometry
) {
521 s
->b
.set_geometry
= 0;
522 ide_tf_set_specify_cmd(drive
, &args
.tf
);
523 } else if (s
->b
.recalibrate
) {
524 s
->b
.recalibrate
= 0;
525 ide_tf_set_restore_cmd(drive
, &args
.tf
);
526 } else if (s
->b
.set_multmode
) {
527 s
->b
.set_multmode
= 0;
528 ide_tf_set_setmult_cmd(drive
, &args
.tf
);
530 int special
= s
->all
;
532 printk(KERN_ERR
"%s: bad special flag: 0x%02x\n", drive
->name
, special
);
536 args
.tf_flags
= IDE_TFLAG_TF
| IDE_TFLAG_DEVICE
|
537 IDE_TFLAG_CUSTOM_HANDLER
;
539 do_rw_taskfile(drive
, &args
);
545 * do_special - issue some special commands
546 * @drive: drive the command is for
548 * do_special() is used to issue ATA_CMD_INIT_DEV_PARAMS,
549 * ATA_CMD_RESTORE and ATA_CMD_SET_MULTI commands to a drive.
551 * It used to do much more, but has been scaled back.
554 static ide_startstop_t
do_special (ide_drive_t
*drive
)
556 special_t
*s
= &drive
->special
;
559 printk("%s: do_special: 0x%02x\n", drive
->name
, s
->all
);
561 if (drive
->media
== ide_disk
)
562 return ide_disk_special(drive
);
569 void ide_map_sg(ide_drive_t
*drive
, struct request
*rq
)
571 ide_hwif_t
*hwif
= drive
->hwif
;
572 struct scatterlist
*sg
= hwif
->sg_table
;
574 if (hwif
->sg_mapped
) /* needed by ide-scsi */
577 if (rq
->cmd_type
!= REQ_TYPE_ATA_TASKFILE
) {
578 hwif
->sg_nents
= blk_rq_map_sg(drive
->queue
, rq
, sg
);
580 sg_init_one(sg
, rq
->buffer
, rq
->nr_sectors
* SECTOR_SIZE
);
585 EXPORT_SYMBOL_GPL(ide_map_sg
);
587 void ide_init_sg_cmd(ide_drive_t
*drive
, struct request
*rq
)
589 ide_hwif_t
*hwif
= drive
->hwif
;
591 hwif
->nsect
= hwif
->nleft
= rq
->nr_sectors
;
596 EXPORT_SYMBOL_GPL(ide_init_sg_cmd
);
599 * execute_drive_command - issue special drive command
600 * @drive: the drive to issue the command on
601 * @rq: the request structure holding the command
603 * execute_drive_cmd() issues a special drive command, usually
604 * initiated by ioctl() from the external hdparm program. The
605 * command can be a drive command, drive task or taskfile
606 * operation. Weirdly you can call it with NULL to wait for
607 * all commands to finish. Don't do this as that is due to change
610 static ide_startstop_t
execute_drive_cmd (ide_drive_t
*drive
,
613 ide_hwif_t
*hwif
= HWIF(drive
);
614 ide_task_t
*task
= rq
->special
;
617 hwif
->data_phase
= task
->data_phase
;
619 switch (hwif
->data_phase
) {
620 case TASKFILE_MULTI_OUT
:
622 case TASKFILE_MULTI_IN
:
624 ide_init_sg_cmd(drive
, rq
);
625 ide_map_sg(drive
, rq
);
630 return do_rw_taskfile(drive
, task
);
634 * NULL is actually a valid way of waiting for
635 * all current requests to be flushed from the queue.
638 printk("%s: DRIVE_CMD (null)\n", drive
->name
);
640 ide_end_drive_cmd(drive
, hwif
->tp_ops
->read_status(hwif
),
641 ide_read_error(drive
));
646 int ide_devset_execute(ide_drive_t
*drive
, const struct ide_devset
*setting
,
649 struct request_queue
*q
= drive
->queue
;
653 if (!(setting
->flags
& DS_SYNC
))
654 return setting
->set(drive
, arg
);
656 rq
= blk_get_request(q
, READ
, __GFP_WAIT
);
657 rq
->cmd_type
= REQ_TYPE_SPECIAL
;
659 rq
->cmd
[0] = REQ_DEVSET_EXEC
;
660 *(int *)&rq
->cmd
[1] = arg
;
661 rq
->special
= setting
->set
;
663 if (blk_execute_rq(q
, NULL
, rq
, 0))
669 EXPORT_SYMBOL_GPL(ide_devset_execute
);
671 static ide_startstop_t
ide_special_rq(ide_drive_t
*drive
, struct request
*rq
)
675 if (cmd
== REQ_PARK_HEADS
|| cmd
== REQ_UNPARK_HEADS
) {
677 struct ide_taskfile
*tf
= &task
.tf
;
679 memset(&task
, 0, sizeof(task
));
680 if (cmd
== REQ_PARK_HEADS
) {
681 drive
->sleep
= *(unsigned long *)rq
->special
;
682 drive
->dev_flags
|= IDE_DFLAG_SLEEPING
;
683 tf
->command
= ATA_CMD_IDLEIMMEDIATE
;
688 task
.tf_flags
|= IDE_TFLAG_CUSTOM_HANDLER
;
689 } else /* cmd == REQ_UNPARK_HEADS */
690 tf
->command
= ATA_CMD_CHK_POWER
;
692 task
.tf_flags
|= IDE_TFLAG_TF
| IDE_TFLAG_DEVICE
;
694 drive
->hwif
->data_phase
= task
.data_phase
= TASKFILE_NO_DATA
;
695 return do_rw_taskfile(drive
, &task
);
699 case REQ_DEVSET_EXEC
:
701 int err
, (*setfunc
)(ide_drive_t
*, int) = rq
->special
;
703 err
= setfunc(drive
, *(int *)&rq
->cmd
[1]);
708 ide_end_request(drive
, err
, 0);
711 case REQ_DRIVE_RESET
:
712 return ide_do_reset(drive
);
714 blk_dump_rq_flags(rq
, "ide_special_rq - bad request");
715 ide_end_request(drive
, 0, 0);
720 static void ide_check_pm_state(ide_drive_t
*drive
, struct request
*rq
)
722 struct request_pm_state
*pm
= rq
->data
;
724 if (blk_pm_suspend_request(rq
) &&
725 pm
->pm_step
== IDE_PM_START_SUSPEND
)
726 /* Mark drive blocked when starting the suspend sequence. */
727 drive
->dev_flags
|= IDE_DFLAG_BLOCKED
;
728 else if (blk_pm_resume_request(rq
) &&
729 pm
->pm_step
== IDE_PM_START_RESUME
) {
731 * The first thing we do on wakeup is to wait for BSY bit to
732 * go away (with a looong timeout) as a drive on this hwif may
733 * just be POSTing itself.
734 * We do that before even selecting as the "other" device on
735 * the bus may be broken enough to walk on our toes at this
738 ide_hwif_t
*hwif
= drive
->hwif
;
741 printk("%s: Wakeup request inited, waiting for !BSY...\n", drive
->name
);
743 rc
= ide_wait_not_busy(hwif
, 35000);
745 printk(KERN_WARNING
"%s: bus not ready on wakeup\n", drive
->name
);
747 hwif
->tp_ops
->set_irq(hwif
, 1);
748 rc
= ide_wait_not_busy(hwif
, 100000);
750 printk(KERN_WARNING
"%s: drive not ready on wakeup\n", drive
->name
);
755 * start_request - start of I/O and command issuing for IDE
757 * start_request() initiates handling of a new I/O request. It
758 * accepts commands and I/O (read/write) requests.
760 * FIXME: this function needs a rename
763 static ide_startstop_t
start_request (ide_drive_t
*drive
, struct request
*rq
)
765 ide_startstop_t startstop
;
767 BUG_ON(!blk_rq_started(rq
));
770 printk("%s: start_request: current=0x%08lx\n",
771 HWIF(drive
)->name
, (unsigned long) rq
);
774 /* bail early if we've exceeded max_failures */
775 if (drive
->max_failures
&& (drive
->failures
> drive
->max_failures
)) {
776 rq
->cmd_flags
|= REQ_FAILED
;
780 if (blk_pm_request(rq
))
781 ide_check_pm_state(drive
, rq
);
784 if (ide_wait_stat(&startstop
, drive
, drive
->ready_stat
,
785 ATA_BUSY
| ATA_DRQ
, WAIT_READY
)) {
786 printk(KERN_ERR
"%s: drive not ready for command\n", drive
->name
);
789 if (!drive
->special
.all
) {
793 * We reset the drive so we need to issue a SETFEATURES.
794 * Do it _after_ do_special() restored device parameters.
796 if (drive
->current_speed
== 0xff)
797 ide_config_drive_speed(drive
, drive
->desired_speed
);
799 if (rq
->cmd_type
== REQ_TYPE_ATA_TASKFILE
)
800 return execute_drive_cmd(drive
, rq
);
801 else if (blk_pm_request(rq
)) {
802 struct request_pm_state
*pm
= rq
->data
;
804 printk("%s: start_power_step(step: %d)\n",
805 drive
->name
, pm
->pm_step
);
807 startstop
= ide_start_power_step(drive
, rq
);
808 if (startstop
== ide_stopped
&&
809 pm
->pm_step
== IDE_PM_COMPLETED
)
810 ide_complete_pm_request(drive
, rq
);
812 } else if (!rq
->rq_disk
&& blk_special_request(rq
))
814 * TODO: Once all ULDs have been modified to
815 * check for specific op codes rather than
816 * blindly accepting any special request, the
817 * check for ->rq_disk above may be replaced
818 * by a more suitable mechanism or even
821 return ide_special_rq(drive
, rq
);
823 drv
= *(ide_driver_t
**)rq
->rq_disk
->private_data
;
825 return drv
->do_request(drive
, rq
, rq
->sector
);
827 return do_special(drive
);
829 ide_kill_rq(drive
, rq
);
834 * ide_stall_queue - pause an IDE device
835 * @drive: drive to stall
836 * @timeout: time to stall for (jiffies)
838 * ide_stall_queue() can be used by a drive to give excess bandwidth back
839 * to the hwgroup by sleeping for timeout jiffies.
842 void ide_stall_queue (ide_drive_t
*drive
, unsigned long timeout
)
844 if (timeout
> WAIT_WORSTCASE
)
845 timeout
= WAIT_WORSTCASE
;
846 drive
->sleep
= timeout
+ jiffies
;
847 drive
->dev_flags
|= IDE_DFLAG_SLEEPING
;
850 EXPORT_SYMBOL(ide_stall_queue
);
852 #define WAKEUP(drive) ((drive)->service_start + 2 * (drive)->service_time)
855 * choose_drive - select a drive to service
856 * @hwgroup: hardware group to select on
858 * choose_drive() selects the next drive which will be serviced.
859 * This is necessary because the IDE layer can't issue commands
860 * to both drives on the same cable, unlike SCSI.
863 static inline ide_drive_t
*choose_drive (ide_hwgroup_t
*hwgroup
)
865 ide_drive_t
*drive
, *best
;
869 drive
= hwgroup
->drive
;
872 * drive is doing pre-flush, ordered write, post-flush sequence. even
873 * though that is 3 requests, it must be seen as a single transaction.
874 * we must not preempt this drive until that is complete
876 if (blk_queue_flushing(drive
->queue
)) {
878 * small race where queue could get replugged during
879 * the 3-request flush cycle, just yank the plug since
880 * we want it to finish asap
882 blk_remove_plug(drive
->queue
);
887 u8 dev_s
= !!(drive
->dev_flags
& IDE_DFLAG_SLEEPING
);
888 u8 best_s
= (best
&& !!(best
->dev_flags
& IDE_DFLAG_SLEEPING
));
890 if ((dev_s
== 0 || time_after_eq(jiffies
, drive
->sleep
)) &&
891 !elv_queue_empty(drive
->queue
)) {
893 (dev_s
&& (best_s
== 0 || time_before(drive
->sleep
, best
->sleep
))) ||
894 (best_s
== 0 && time_before(WAKEUP(drive
), WAKEUP(best
)))) {
895 if (!blk_queue_plugged(drive
->queue
))
899 } while ((drive
= drive
->next
) != hwgroup
->drive
);
901 if (best
&& (best
->dev_flags
& IDE_DFLAG_NICE1
) &&
902 (best
->dev_flags
& IDE_DFLAG_SLEEPING
) == 0 &&
903 best
!= hwgroup
->drive
&& best
->service_time
> WAIT_MIN_SLEEP
) {
904 long t
= (signed long)(WAKEUP(best
) - jiffies
);
905 if (t
>= WAIT_MIN_SLEEP
) {
907 * We *may* have some time to spare, but first let's see if
908 * someone can potentially benefit from our nice mood today..
912 if ((drive
->dev_flags
& IDE_DFLAG_SLEEPING
) == 0
913 && time_before(jiffies
- best
->service_time
, WAKEUP(drive
))
914 && time_before(WAKEUP(drive
), jiffies
+ t
))
916 ide_stall_queue(best
, min_t(long, t
, 10 * WAIT_MIN_SLEEP
));
919 } while ((drive
= drive
->next
) != best
);
926 * Issue a new request to a drive from hwgroup
927 * Caller must have already done spin_lock_irqsave(&ide_lock, ..);
929 * A hwgroup is a serialized group of IDE interfaces. Usually there is
930 * exactly one hwif (interface) per hwgroup, but buggy controllers (eg. CMD640)
931 * may have both interfaces in a single hwgroup to "serialize" access.
932 * Or possibly multiple ISA interfaces can share a common IRQ by being grouped
933 * together into one hwgroup for serialized access.
935 * Note also that several hwgroups can end up sharing a single IRQ,
936 * possibly along with many other devices. This is especially common in
937 * PCI-based systems with off-board IDE controller cards.
939 * The IDE driver uses the single global ide_lock spinlock to protect
940 * access to the request queues, and to protect the hwgroup->busy flag.
942 * The first thread into the driver for a particular hwgroup sets the
943 * hwgroup->busy flag to indicate that this hwgroup is now active,
944 * and then initiates processing of the top request from the request queue.
946 * Other threads attempting entry notice the busy setting, and will simply
947 * queue their new requests and exit immediately. Note that hwgroup->busy
948 * remains set even when the driver is merely awaiting the next interrupt.
949 * Thus, the meaning is "this hwgroup is busy processing a request".
951 * When processing of a request completes, the completing thread or IRQ-handler
952 * will start the next request from the queue. If no more work remains,
953 * the driver will clear the hwgroup->busy flag and exit.
955 * The ide_lock (spinlock) is used to protect all access to the
956 * hwgroup->busy flag, but is otherwise not needed for most processing in
957 * the driver. This makes the driver much more friendlier to shared IRQs
958 * than previous designs, while remaining 100% (?) SMP safe and capable.
960 static void ide_do_request (ide_hwgroup_t
*hwgroup
, int masked_irq
)
965 ide_startstop_t startstop
;
968 /* caller must own ide_lock */
969 BUG_ON(!irqs_disabled());
971 while (!hwgroup
->busy
) {
974 ide_get_lock(ide_intr
, hwgroup
);
975 drive
= choose_drive(hwgroup
);
978 unsigned long sleep
= 0; /* shut up, gcc */
980 drive
= hwgroup
->drive
;
982 if ((drive
->dev_flags
& IDE_DFLAG_SLEEPING
) &&
984 time_before(drive
->sleep
, sleep
))) {
986 sleep
= drive
->sleep
;
988 } while ((drive
= drive
->next
) != hwgroup
->drive
);
991 * Take a short snooze, and then wake up this hwgroup again.
992 * This gives other hwgroups on the same a chance to
993 * play fairly with us, just in case there are big differences
994 * in relative throughputs.. don't want to hog the cpu too much.
996 if (time_before(sleep
, jiffies
+ WAIT_MIN_SLEEP
))
997 sleep
= jiffies
+ WAIT_MIN_SLEEP
;
999 if (timer_pending(&hwgroup
->timer
))
1000 printk(KERN_CRIT
"ide_set_handler: timer already active\n");
1002 /* so that ide_timer_expiry knows what to do */
1003 hwgroup
->sleeping
= 1;
1004 hwgroup
->req_gen_timer
= hwgroup
->req_gen
;
1005 mod_timer(&hwgroup
->timer
, sleep
);
1006 /* we purposely leave hwgroup->busy==1
1009 /* Ugly, but how can we sleep for the lock
1010 * otherwise? perhaps from tq_disk?
1013 /* for atari only */
1018 /* no more work for this hwgroup (for now) */
1023 if (hwgroup
->hwif
->sharing_irq
&& hwif
!= hwgroup
->hwif
) {
1025 * set nIEN for previous hwif, drives in the
1026 * quirk_list may not like intr setups/cleanups
1028 if (drive
->quirk_list
!= 1)
1029 hwif
->tp_ops
->set_irq(hwif
, 0);
1031 hwgroup
->hwif
= hwif
;
1032 hwgroup
->drive
= drive
;
1033 drive
->dev_flags
&= ~(IDE_DFLAG_SLEEPING
| IDE_DFLAG_PARKED
);
1034 drive
->service_start
= jiffies
;
1036 if (blk_queue_plugged(drive
->queue
)) {
1037 printk(KERN_ERR
"ide: huh? queue was plugged!\n");
1042 * we know that the queue isn't empty, but this can happen
1043 * if the q->prep_rq_fn() decides to kill a request
1045 rq
= elv_next_request(drive
->queue
);
1052 * Sanity: don't accept a request that isn't a PM request
1053 * if we are currently power managed. This is very important as
1054 * blk_stop_queue() doesn't prevent the elv_next_request()
1055 * above to return us whatever is in the queue. Since we call
1056 * ide_do_request() ourselves, we end up taking requests while
1057 * the queue is blocked...
1059 * We let requests forced at head of queue with ide-preempt
1060 * though. I hope that doesn't happen too much, hopefully not
1061 * unless the subdriver triggers such a thing in its own PM
1064 * We count how many times we loop here to make sure we service
1065 * all drives in the hwgroup without looping for ever
1067 if ((drive
->dev_flags
& IDE_DFLAG_BLOCKED
) &&
1068 blk_pm_request(rq
) == 0 &&
1069 (rq
->cmd_flags
& REQ_PREEMPT
) == 0) {
1070 drive
= drive
->next
? drive
->next
: hwgroup
->drive
;
1071 if (loops
++ < 4 && !blk_queue_plugged(drive
->queue
))
1073 /* We clear busy, there should be no pending ATA command at this point. */
1081 * Some systems have trouble with IDE IRQs arriving while
1082 * the driver is still setting things up. So, here we disable
1083 * the IRQ used by this interface while the request is being started.
1084 * This may look bad at first, but pretty much the same thing
1085 * happens anyway when any interrupt comes in, IDE or otherwise
1086 * -- the kernel masks the IRQ while it is being handled.
1088 if (masked_irq
!= IDE_NO_IRQ
&& hwif
->irq
!= masked_irq
)
1089 disable_irq_nosync(hwif
->irq
);
1090 spin_unlock(&ide_lock
);
1091 local_irq_enable_in_hardirq();
1092 /* allow other IRQs while we start this request */
1093 startstop
= start_request(drive
, rq
);
1094 spin_lock_irq(&ide_lock
);
1095 if (masked_irq
!= IDE_NO_IRQ
&& hwif
->irq
!= masked_irq
)
1096 enable_irq(hwif
->irq
);
1097 if (startstop
== ide_stopped
)
1103 * Passes the stuff to ide_do_request
1105 void do_ide_request(struct request_queue
*q
)
1107 ide_drive_t
*drive
= q
->queuedata
;
1109 ide_do_request(HWGROUP(drive
), IDE_NO_IRQ
);
1113 * un-busy the hwgroup etc, and clear any pending DMA status. we want to
1114 * retry the current request in pio mode instead of risking tossing it
1117 static ide_startstop_t
ide_dma_timeout_retry(ide_drive_t
*drive
, int error
)
1119 ide_hwif_t
*hwif
= HWIF(drive
);
1121 ide_startstop_t ret
= ide_stopped
;
1124 * end current dma transaction
1128 printk(KERN_WARNING
"%s: DMA timeout error\n", drive
->name
);
1129 (void)hwif
->dma_ops
->dma_end(drive
);
1130 ret
= ide_error(drive
, "dma timeout error",
1131 hwif
->tp_ops
->read_status(hwif
));
1133 printk(KERN_WARNING
"%s: DMA timeout retry\n", drive
->name
);
1134 hwif
->dma_ops
->dma_timeout(drive
);
1138 * disable dma for now, but remember that we did so because of
1139 * a timeout -- we'll reenable after we finish this next request
1140 * (or rather the first chunk of it) in pio.
1142 drive
->dev_flags
|= IDE_DFLAG_DMA_PIO_RETRY
;
1144 ide_dma_off_quietly(drive
);
1147 * un-busy drive etc (hwgroup->busy is cleared on return) and
1148 * make sure request is sane
1150 rq
= HWGROUP(drive
)->rq
;
1155 HWGROUP(drive
)->rq
= NULL
;
1162 rq
->sector
= rq
->bio
->bi_sector
;
1163 rq
->current_nr_sectors
= bio_iovec(rq
->bio
)->bv_len
>> 9;
1164 rq
->hard_cur_sectors
= rq
->current_nr_sectors
;
1165 rq
->buffer
= bio_data(rq
->bio
);
1171 * ide_timer_expiry - handle lack of an IDE interrupt
1172 * @data: timer callback magic (hwgroup)
1174 * An IDE command has timed out before the expected drive return
1175 * occurred. At this point we attempt to clean up the current
1176 * mess. If the current handler includes an expiry handler then
1177 * we invoke the expiry handler, and providing it is happy the
1178 * work is done. If that fails we apply generic recovery rules
1179 * invoking the handler and checking the drive DMA status. We
1180 * have an excessively incestuous relationship with the DMA
1181 * logic that wants cleaning up.
1184 void ide_timer_expiry (unsigned long data
)
1186 ide_hwgroup_t
*hwgroup
= (ide_hwgroup_t
*) data
;
1187 ide_handler_t
*handler
;
1188 ide_expiry_t
*expiry
;
1189 unsigned long flags
;
1190 unsigned long wait
= -1;
1192 spin_lock_irqsave(&ide_lock
, flags
);
1194 if (((handler
= hwgroup
->handler
) == NULL
) ||
1195 (hwgroup
->req_gen
!= hwgroup
->req_gen_timer
)) {
1197 * Either a marginal timeout occurred
1198 * (got the interrupt just as timer expired),
1199 * or we were "sleeping" to give other devices a chance.
1200 * Either way, we don't really want to complain about anything.
1202 if (hwgroup
->sleeping
) {
1203 hwgroup
->sleeping
= 0;
1207 ide_drive_t
*drive
= hwgroup
->drive
;
1209 printk(KERN_ERR
"ide_timer_expiry: hwgroup->drive was NULL\n");
1210 hwgroup
->handler
= NULL
;
1213 ide_startstop_t startstop
= ide_stopped
;
1214 if (!hwgroup
->busy
) {
1215 hwgroup
->busy
= 1; /* paranoia */
1216 printk(KERN_ERR
"%s: ide_timer_expiry: hwgroup->busy was 0 ??\n", drive
->name
);
1218 if ((expiry
= hwgroup
->expiry
) != NULL
) {
1220 if ((wait
= expiry(drive
)) > 0) {
1222 hwgroup
->timer
.expires
= jiffies
+ wait
;
1223 hwgroup
->req_gen_timer
= hwgroup
->req_gen
;
1224 add_timer(&hwgroup
->timer
);
1225 spin_unlock_irqrestore(&ide_lock
, flags
);
1229 hwgroup
->handler
= NULL
;
1231 * We need to simulate a real interrupt when invoking
1232 * the handler() function, which means we need to
1233 * globally mask the specific IRQ:
1235 spin_unlock(&ide_lock
);
1237 /* disable_irq_nosync ?? */
1238 disable_irq(hwif
->irq
);
1240 * as if we were handling an interrupt */
1241 local_irq_disable();
1242 if (hwgroup
->polling
) {
1243 startstop
= handler(drive
);
1244 } else if (drive_is_ready(drive
)) {
1245 if (drive
->waiting_for_dma
)
1246 hwif
->dma_ops
->dma_lost_irq(drive
);
1247 (void)ide_ack_intr(hwif
);
1248 printk(KERN_WARNING
"%s: lost interrupt\n", drive
->name
);
1249 startstop
= handler(drive
);
1251 if (drive
->waiting_for_dma
) {
1252 startstop
= ide_dma_timeout_retry(drive
, wait
);
1255 ide_error(drive
, "irq timeout",
1256 hwif
->tp_ops
->read_status(hwif
));
1258 drive
->service_time
= jiffies
- drive
->service_start
;
1259 spin_lock_irq(&ide_lock
);
1260 enable_irq(hwif
->irq
);
1261 if (startstop
== ide_stopped
)
1265 ide_do_request(hwgroup
, IDE_NO_IRQ
);
1266 spin_unlock_irqrestore(&ide_lock
, flags
);
1270 * unexpected_intr - handle an unexpected IDE interrupt
1271 * @irq: interrupt line
1272 * @hwgroup: hwgroup being processed
1274 * There's nothing really useful we can do with an unexpected interrupt,
1275 * other than reading the status register (to clear it), and logging it.
1276 * There should be no way that an irq can happen before we're ready for it,
1277 * so we needn't worry much about losing an "important" interrupt here.
1279 * On laptops (and "green" PCs), an unexpected interrupt occurs whenever
1280 * the drive enters "idle", "standby", or "sleep" mode, so if the status
1281 * looks "good", we just ignore the interrupt completely.
1283 * This routine assumes __cli() is in effect when called.
1285 * If an unexpected interrupt happens on irq15 while we are handling irq14
1286 * and if the two interfaces are "serialized" (CMD640), then it looks like
1287 * we could screw up by interfering with a new request being set up for
1290 * In reality, this is a non-issue. The new command is not sent unless
1291 * the drive is ready to accept one, in which case we know the drive is
1292 * not trying to interrupt us. And ide_set_handler() is always invoked
1293 * before completing the issuance of any new drive command, so we will not
1294 * be accidentally invoked as a result of any valid command completion
1297 * Note that we must walk the entire hwgroup here. We know which hwif
1298 * is doing the current command, but we don't know which hwif burped
1302 static void unexpected_intr (int irq
, ide_hwgroup_t
*hwgroup
)
1305 ide_hwif_t
*hwif
= hwgroup
->hwif
;
1308 * handle the unexpected interrupt
1311 if (hwif
->irq
== irq
) {
1312 stat
= hwif
->tp_ops
->read_status(hwif
);
1314 if (!OK_STAT(stat
, ATA_DRDY
, BAD_STAT
)) {
1315 /* Try to not flood the console with msgs */
1316 static unsigned long last_msgtime
, count
;
1318 if (time_after(jiffies
, last_msgtime
+ HZ
)) {
1319 last_msgtime
= jiffies
;
1320 printk(KERN_ERR
"%s%s: unexpected interrupt, "
1321 "status=0x%02x, count=%ld\n",
1323 (hwif
->next
==hwgroup
->hwif
) ? "" : "(?)", stat
, count
);
1327 } while ((hwif
= hwif
->next
) != hwgroup
->hwif
);
1331 * ide_intr - default IDE interrupt handler
1332 * @irq: interrupt number
1333 * @dev_id: hwif group
1334 * @regs: unused weirdness from the kernel irq layer
1336 * This is the default IRQ handler for the IDE layer. You should
1337 * not need to override it. If you do be aware it is subtle in
1340 * hwgroup->hwif is the interface in the group currently performing
1341 * a command. hwgroup->drive is the drive and hwgroup->handler is
1342 * the IRQ handler to call. As we issue a command the handlers
1343 * step through multiple states, reassigning the handler to the
1344 * next step in the process. Unlike a smart SCSI controller IDE
1345 * expects the main processor to sequence the various transfer
1346 * stages. We also manage a poll timer to catch up with most
1347 * timeout situations. There are still a few where the handlers
1348 * don't ever decide to give up.
1350 * The handler eventually returns ide_stopped to indicate the
1351 * request completed. At this point we issue the next request
1352 * on the hwgroup and the process begins again.
1355 irqreturn_t
ide_intr (int irq
, void *dev_id
)
1357 unsigned long flags
;
1358 ide_hwgroup_t
*hwgroup
= (ide_hwgroup_t
*)dev_id
;
1361 ide_handler_t
*handler
;
1362 ide_startstop_t startstop
;
1363 irqreturn_t irq_ret
= IRQ_NONE
;
1365 spin_lock_irqsave(&ide_lock
, flags
);
1366 hwif
= hwgroup
->hwif
;
1368 if (!ide_ack_intr(hwif
))
1371 if ((handler
= hwgroup
->handler
) == NULL
|| hwgroup
->polling
) {
1373 * Not expecting an interrupt from this drive.
1374 * That means this could be:
1375 * (1) an interrupt from another PCI device
1376 * sharing the same PCI INT# as us.
1377 * or (2) a drive just entered sleep or standby mode,
1378 * and is interrupting to let us know.
1379 * or (3) a spurious interrupt of unknown origin.
1381 * For PCI, we cannot tell the difference,
1382 * so in that case we just ignore it and hope it goes away.
1384 * FIXME: unexpected_intr should be hwif-> then we can
1385 * remove all the ifdef PCI crap
1387 #ifdef CONFIG_BLK_DEV_IDEPCI
1388 if (hwif
->chipset
!= ide_pci
)
1389 #endif /* CONFIG_BLK_DEV_IDEPCI */
1392 * Probably not a shared PCI interrupt,
1393 * so we can safely try to do something about it:
1395 unexpected_intr(irq
, hwgroup
);
1396 #ifdef CONFIG_BLK_DEV_IDEPCI
1399 * Whack the status register, just in case
1400 * we have a leftover pending IRQ.
1402 (void)hwif
->tp_ops
->read_status(hwif
);
1403 #endif /* CONFIG_BLK_DEV_IDEPCI */
1408 drive
= hwgroup
->drive
;
1411 * This should NEVER happen, and there isn't much
1412 * we could do about it here.
1414 * [Note - this can occur if the drive is hot unplugged]
1419 if (!drive_is_ready(drive
))
1421 * This happens regularly when we share a PCI IRQ with
1422 * another device. Unfortunately, it can also happen
1423 * with some buggy drives that trigger the IRQ before
1424 * their status register is up to date. Hopefully we have
1425 * enough advance overhead that the latter isn't a problem.
1429 if (!hwgroup
->busy
) {
1430 hwgroup
->busy
= 1; /* paranoia */
1431 printk(KERN_ERR
"%s: ide_intr: hwgroup->busy was 0 ??\n", drive
->name
);
1433 hwgroup
->handler
= NULL
;
1435 del_timer(&hwgroup
->timer
);
1436 spin_unlock(&ide_lock
);
1438 if (hwif
->port_ops
&& hwif
->port_ops
->clear_irq
)
1439 hwif
->port_ops
->clear_irq(drive
);
1441 if (drive
->dev_flags
& IDE_DFLAG_UNMASK
)
1442 local_irq_enable_in_hardirq();
1444 /* service this interrupt, may set handler for next interrupt */
1445 startstop
= handler(drive
);
1447 spin_lock_irq(&ide_lock
);
1449 * Note that handler() may have set things up for another
1450 * interrupt to occur soon, but it cannot happen until
1451 * we exit from this routine, because it will be the
1452 * same irq as is currently being serviced here, and Linux
1453 * won't allow another of the same (on any CPU) until we return.
1455 drive
->service_time
= jiffies
- drive
->service_start
;
1456 if (startstop
== ide_stopped
) {
1457 if (hwgroup
->handler
== NULL
) { /* paranoia */
1459 ide_do_request(hwgroup
, hwif
->irq
);
1461 printk(KERN_ERR
"%s: ide_intr: huh? expected NULL handler "
1462 "on exit\n", drive
->name
);
1466 irq_ret
= IRQ_HANDLED
;
1468 spin_unlock_irqrestore(&ide_lock
, flags
);
1473 * ide_do_drive_cmd - issue IDE special command
1474 * @drive: device to issue command
1475 * @rq: request to issue
1477 * This function issues a special IDE device request
1478 * onto the request queue.
1480 * the rq is queued at the head of the request queue, displacing
1481 * the currently-being-processed request and this function
1482 * returns immediately without waiting for the new rq to be
1483 * completed. This is VERY DANGEROUS, and is intended for
1484 * careful use by the ATAPI tape/cdrom driver code.
1487 void ide_do_drive_cmd(ide_drive_t
*drive
, struct request
*rq
)
1489 ide_hwgroup_t
*hwgroup
= drive
->hwif
->hwgroup
;
1490 unsigned long flags
;
1494 spin_lock_irqsave(&ide_lock
, flags
);
1495 __elv_add_request(drive
->queue
, rq
, ELEVATOR_INSERT_FRONT
, 0);
1496 blk_start_queueing(drive
->queue
);
1497 spin_unlock_irqrestore(&ide_lock
, flags
);
1500 EXPORT_SYMBOL(ide_do_drive_cmd
);
1502 void ide_pktcmd_tf_load(ide_drive_t
*drive
, u32 tf_flags
, u16 bcount
, u8 dma
)
1504 ide_hwif_t
*hwif
= drive
->hwif
;
1507 memset(&task
, 0, sizeof(task
));
1508 task
.tf_flags
= IDE_TFLAG_OUT_LBAH
| IDE_TFLAG_OUT_LBAM
|
1509 IDE_TFLAG_OUT_FEATURE
| tf_flags
;
1510 task
.tf
.feature
= dma
; /* Use PIO/DMA */
1511 task
.tf
.lbam
= bcount
& 0xff;
1512 task
.tf
.lbah
= (bcount
>> 8) & 0xff;
1514 ide_tf_dump(drive
->name
, &task
.tf
);
1515 hwif
->tp_ops
->set_irq(hwif
, 1);
1516 SELECT_MASK(drive
, 0);
1517 hwif
->tp_ops
->tf_load(drive
, &task
);
1520 EXPORT_SYMBOL_GPL(ide_pktcmd_tf_load
);
1522 void ide_pad_transfer(ide_drive_t
*drive
, int write
, int len
)
1524 ide_hwif_t
*hwif
= drive
->hwif
;
1529 hwif
->tp_ops
->output_data(drive
, NULL
, buf
, min(4, len
));
1531 hwif
->tp_ops
->input_data(drive
, NULL
, buf
, min(4, len
));
1535 EXPORT_SYMBOL_GPL(ide_pad_transfer
);