4 * Basic PIO and command management functionality.
6 * This code was split off from ide.c. See ide.c for history and original
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2, or (at your option) any
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
19 * For the avoidance of doubt the "preferred form" of this code is one which
20 * is in an open non patent encumbered format. Where cryptographic key signing
21 * forms part of the process of creating an executable the information
22 * including keys needed to generate an equivalently functional executable
23 * are deemed to be part of the source code.
27 #include <linux/module.h>
28 #include <linux/types.h>
29 #include <linux/string.h>
30 #include <linux/kernel.h>
31 #include <linux/timer.h>
33 #include <linux/interrupt.h>
34 #include <linux/major.h>
35 #include <linux/errno.h>
36 #include <linux/genhd.h>
37 #include <linux/blkpg.h>
38 #include <linux/slab.h>
39 #include <linux/init.h>
40 #include <linux/pci.h>
41 #include <linux/delay.h>
42 #include <linux/ide.h>
43 #include <linux/hdreg.h>
44 #include <linux/completion.h>
45 #include <linux/reboot.h>
46 #include <linux/cdrom.h>
47 #include <linux/seq_file.h>
48 #include <linux/device.h>
49 #include <linux/kmod.h>
50 #include <linux/scatterlist.h>
51 #include <linux/bitops.h>
53 #include <asm/byteorder.h>
55 #include <asm/uaccess.h>
58 static int __ide_end_request(ide_drive_t
*drive
, struct request
*rq
,
59 int uptodate
, unsigned int nr_bytes
, int dequeue
)
65 error
= uptodate
? uptodate
: -EIO
;
68 * if failfast is set on a request, override number of sectors and
69 * complete the whole request right now
71 if (blk_noretry_request(rq
) && error
)
72 nr_bytes
= rq
->hard_nr_sectors
<< 9;
74 if (!blk_fs_request(rq
) && error
&& !rq
->errors
)
78 * decide whether to reenable DMA -- 3 is a random magic for now,
79 * if we DMA timeout more than 3 times, just stay in PIO
81 if ((drive
->dev_flags
& IDE_DFLAG_DMA_PIO_RETRY
) &&
82 drive
->retry_pio
<= 3) {
83 drive
->dev_flags
&= ~IDE_DFLAG_DMA_PIO_RETRY
;
87 if (!blk_end_request(rq
, error
, nr_bytes
))
90 if (ret
== 0 && dequeue
)
91 drive
->hwif
->rq
= NULL
;
97 * ide_end_request - complete an IDE I/O
98 * @drive: IDE device for the I/O
100 * @nr_sectors: number of sectors completed
102 * This is our end_request wrapper function. We complete the I/O
103 * update random number input and dequeue the request, which if
104 * it was tagged may be out of order.
107 int ide_end_request (ide_drive_t
*drive
, int uptodate
, int nr_sectors
)
109 unsigned int nr_bytes
= nr_sectors
<< 9;
110 struct request
*rq
= drive
->hwif
->rq
;
113 if (blk_pc_request(rq
))
114 nr_bytes
= rq
->data_len
;
116 nr_bytes
= rq
->hard_cur_sectors
<< 9;
119 return __ide_end_request(drive
, rq
, uptodate
, nr_bytes
, 1);
121 EXPORT_SYMBOL(ide_end_request
);
124 * ide_end_dequeued_request - complete an IDE I/O
125 * @drive: IDE device for the I/O
127 * @nr_sectors: number of sectors completed
129 * Complete an I/O that is no longer on the request queue. This
130 * typically occurs when we pull the request and issue a REQUEST_SENSE.
131 * We must still finish the old request but we must not tamper with the
132 * queue in the meantime.
134 * NOTE: This path does not handle barrier, but barrier is not supported
138 int ide_end_dequeued_request(ide_drive_t
*drive
, struct request
*rq
,
139 int uptodate
, int nr_sectors
)
141 BUG_ON(!blk_rq_started(rq
));
143 return __ide_end_request(drive
, rq
, uptodate
, nr_sectors
<< 9, 0);
145 EXPORT_SYMBOL_GPL(ide_end_dequeued_request
);
147 void ide_complete_task(ide_drive_t
*drive
, ide_task_t
*task
, u8 stat
, u8 err
)
149 struct ide_taskfile
*tf
= &task
->tf
;
150 struct request
*rq
= task
->rq
;
155 drive
->hwif
->tp_ops
->tf_read(drive
, task
);
157 if (rq
&& rq
->cmd_type
== REQ_TYPE_ATA_TASKFILE
)
158 memcpy(rq
->special
, task
, sizeof(*task
));
160 if (task
->tf_flags
& IDE_TFLAG_DYN
)
164 void ide_complete_rq(ide_drive_t
*drive
, u8 err
)
166 ide_hwif_t
*hwif
= drive
->hwif
;
167 struct request
*rq
= hwif
->rq
;
173 if (unlikely(blk_end_request(rq
, (rq
->errors
? -EIO
: 0),
177 EXPORT_SYMBOL(ide_complete_rq
);
179 void ide_kill_rq(ide_drive_t
*drive
, struct request
*rq
)
181 u8 drv_req
= blk_special_request(rq
) && rq
->rq_disk
;
182 u8 media
= drive
->media
;
184 drive
->failed_pc
= NULL
;
186 if ((media
== ide_floppy
&& drv_req
) || media
== ide_tape
)
187 rq
->errors
= IDE_DRV_ERROR_GENERAL
;
189 if ((media
== ide_floppy
|| media
== ide_tape
) && drv_req
)
190 ide_complete_rq(drive
, 0);
192 ide_end_request(drive
, 0, 0);
195 static void ide_tf_set_specify_cmd(ide_drive_t
*drive
, struct ide_taskfile
*tf
)
197 tf
->nsect
= drive
->sect
;
198 tf
->lbal
= drive
->sect
;
199 tf
->lbam
= drive
->cyl
;
200 tf
->lbah
= drive
->cyl
>> 8;
201 tf
->device
= (drive
->head
- 1) | drive
->select
;
202 tf
->command
= ATA_CMD_INIT_DEV_PARAMS
;
205 static void ide_tf_set_restore_cmd(ide_drive_t
*drive
, struct ide_taskfile
*tf
)
207 tf
->nsect
= drive
->sect
;
208 tf
->command
= ATA_CMD_RESTORE
;
211 static void ide_tf_set_setmult_cmd(ide_drive_t
*drive
, struct ide_taskfile
*tf
)
213 tf
->nsect
= drive
->mult_req
;
214 tf
->command
= ATA_CMD_SET_MULTI
;
217 static ide_startstop_t
ide_disk_special(ide_drive_t
*drive
)
219 special_t
*s
= &drive
->special
;
222 memset(&args
, 0, sizeof(ide_task_t
));
223 args
.data_phase
= TASKFILE_NO_DATA
;
225 if (s
->b
.set_geometry
) {
226 s
->b
.set_geometry
= 0;
227 ide_tf_set_specify_cmd(drive
, &args
.tf
);
228 } else if (s
->b
.recalibrate
) {
229 s
->b
.recalibrate
= 0;
230 ide_tf_set_restore_cmd(drive
, &args
.tf
);
231 } else if (s
->b
.set_multmode
) {
232 s
->b
.set_multmode
= 0;
233 ide_tf_set_setmult_cmd(drive
, &args
.tf
);
235 int special
= s
->all
;
237 printk(KERN_ERR
"%s: bad special flag: 0x%02x\n", drive
->name
, special
);
241 args
.tf_flags
= IDE_TFLAG_TF
| IDE_TFLAG_DEVICE
|
242 IDE_TFLAG_CUSTOM_HANDLER
;
244 do_rw_taskfile(drive
, &args
);
250 * do_special - issue some special commands
251 * @drive: drive the command is for
253 * do_special() is used to issue ATA_CMD_INIT_DEV_PARAMS,
254 * ATA_CMD_RESTORE and ATA_CMD_SET_MULTI commands to a drive.
256 * It used to do much more, but has been scaled back.
259 static ide_startstop_t
do_special (ide_drive_t
*drive
)
261 special_t
*s
= &drive
->special
;
264 printk("%s: do_special: 0x%02x\n", drive
->name
, s
->all
);
266 if (drive
->media
== ide_disk
)
267 return ide_disk_special(drive
);
274 void ide_map_sg(ide_drive_t
*drive
, struct request
*rq
)
276 ide_hwif_t
*hwif
= drive
->hwif
;
277 struct scatterlist
*sg
= hwif
->sg_table
;
279 if (rq
->cmd_type
== REQ_TYPE_ATA_TASKFILE
) {
280 sg_init_one(sg
, rq
->buffer
, rq
->nr_sectors
* SECTOR_SIZE
);
282 } else if (!rq
->bio
) {
283 sg_init_one(sg
, rq
->data
, rq
->data_len
);
286 hwif
->sg_nents
= blk_rq_map_sg(drive
->queue
, rq
, sg
);
290 EXPORT_SYMBOL_GPL(ide_map_sg
);
292 void ide_init_sg_cmd(ide_drive_t
*drive
, struct request
*rq
)
294 ide_hwif_t
*hwif
= drive
->hwif
;
296 hwif
->nsect
= hwif
->nleft
= rq
->nr_sectors
;
301 EXPORT_SYMBOL_GPL(ide_init_sg_cmd
);
304 * execute_drive_command - issue special drive command
305 * @drive: the drive to issue the command on
306 * @rq: the request structure holding the command
308 * execute_drive_cmd() issues a special drive command, usually
309 * initiated by ioctl() from the external hdparm program. The
310 * command can be a drive command, drive task or taskfile
311 * operation. Weirdly you can call it with NULL to wait for
312 * all commands to finish. Don't do this as that is due to change
315 static ide_startstop_t
execute_drive_cmd (ide_drive_t
*drive
,
318 ide_task_t
*task
= rq
->special
;
321 switch (task
->data_phase
) {
322 case TASKFILE_MULTI_OUT
:
324 case TASKFILE_MULTI_IN
:
326 ide_init_sg_cmd(drive
, rq
);
327 ide_map_sg(drive
, rq
);
332 return do_rw_taskfile(drive
, task
);
336 * NULL is actually a valid way of waiting for
337 * all current requests to be flushed from the queue.
340 printk("%s: DRIVE_CMD (null)\n", drive
->name
);
342 ide_complete_rq(drive
, 0);
347 static ide_startstop_t
ide_special_rq(ide_drive_t
*drive
, struct request
*rq
)
353 case REQ_UNPARK_HEADS
:
354 return ide_do_park_unpark(drive
, rq
);
355 case REQ_DEVSET_EXEC
:
356 return ide_do_devset(drive
, rq
);
357 case REQ_DRIVE_RESET
:
358 return ide_do_reset(drive
);
360 blk_dump_rq_flags(rq
, "ide_special_rq - bad request");
361 ide_end_request(drive
, 0, 0);
367 * start_request - start of I/O and command issuing for IDE
369 * start_request() initiates handling of a new I/O request. It
370 * accepts commands and I/O (read/write) requests.
372 * FIXME: this function needs a rename
375 static ide_startstop_t
start_request (ide_drive_t
*drive
, struct request
*rq
)
377 ide_startstop_t startstop
;
379 BUG_ON(!blk_rq_started(rq
));
382 printk("%s: start_request: current=0x%08lx\n",
383 drive
->hwif
->name
, (unsigned long) rq
);
386 /* bail early if we've exceeded max_failures */
387 if (drive
->max_failures
&& (drive
->failures
> drive
->max_failures
)) {
388 rq
->cmd_flags
|= REQ_FAILED
;
392 if (blk_pm_request(rq
))
393 ide_check_pm_state(drive
, rq
);
396 if (ide_wait_stat(&startstop
, drive
, drive
->ready_stat
,
397 ATA_BUSY
| ATA_DRQ
, WAIT_READY
)) {
398 printk(KERN_ERR
"%s: drive not ready for command\n", drive
->name
);
401 if (!drive
->special
.all
) {
402 struct ide_driver
*drv
;
405 * We reset the drive so we need to issue a SETFEATURES.
406 * Do it _after_ do_special() restored device parameters.
408 if (drive
->current_speed
== 0xff)
409 ide_config_drive_speed(drive
, drive
->desired_speed
);
411 if (rq
->cmd_type
== REQ_TYPE_ATA_TASKFILE
)
412 return execute_drive_cmd(drive
, rq
);
413 else if (blk_pm_request(rq
)) {
414 struct request_pm_state
*pm
= rq
->data
;
416 printk("%s: start_power_step(step: %d)\n",
417 drive
->name
, pm
->pm_step
);
419 startstop
= ide_start_power_step(drive
, rq
);
420 if (startstop
== ide_stopped
&&
421 pm
->pm_step
== IDE_PM_COMPLETED
)
422 ide_complete_pm_rq(drive
, rq
);
424 } else if (!rq
->rq_disk
&& blk_special_request(rq
))
426 * TODO: Once all ULDs have been modified to
427 * check for specific op codes rather than
428 * blindly accepting any special request, the
429 * check for ->rq_disk above may be replaced
430 * by a more suitable mechanism or even
433 return ide_special_rq(drive
, rq
);
435 drv
= *(struct ide_driver
**)rq
->rq_disk
->private_data
;
437 return drv
->do_request(drive
, rq
, rq
->sector
);
439 return do_special(drive
);
441 ide_kill_rq(drive
, rq
);
446 * ide_stall_queue - pause an IDE device
447 * @drive: drive to stall
448 * @timeout: time to stall for (jiffies)
450 * ide_stall_queue() can be used by a drive to give excess bandwidth back
451 * to the port by sleeping for timeout jiffies.
454 void ide_stall_queue (ide_drive_t
*drive
, unsigned long timeout
)
456 if (timeout
> WAIT_WORSTCASE
)
457 timeout
= WAIT_WORSTCASE
;
458 drive
->sleep
= timeout
+ jiffies
;
459 drive
->dev_flags
|= IDE_DFLAG_SLEEPING
;
461 EXPORT_SYMBOL(ide_stall_queue
);
463 static inline int ide_lock_port(ide_hwif_t
*hwif
)
473 static inline void ide_unlock_port(ide_hwif_t
*hwif
)
478 static inline int ide_lock_host(struct ide_host
*host
, ide_hwif_t
*hwif
)
482 if (host
->host_flags
& IDE_HFLAG_SERIALIZE
) {
483 rc
= test_and_set_bit_lock(IDE_HOST_BUSY
, &host
->host_busy
);
486 host
->get_lock(ide_intr
, hwif
);
492 static inline void ide_unlock_host(struct ide_host
*host
)
494 if (host
->host_flags
& IDE_HFLAG_SERIALIZE
) {
495 if (host
->release_lock
)
496 host
->release_lock();
497 clear_bit_unlock(IDE_HOST_BUSY
, &host
->host_busy
);
502 * Issue a new request to a device.
504 void do_ide_request(struct request_queue
*q
)
506 ide_drive_t
*drive
= q
->queuedata
;
507 ide_hwif_t
*hwif
= drive
->hwif
;
508 struct ide_host
*host
= hwif
->host
;
509 struct request
*rq
= NULL
;
510 ide_startstop_t startstop
;
513 * drive is doing pre-flush, ordered write, post-flush sequence. even
514 * though that is 3 requests, it must be seen as a single transaction.
515 * we must not preempt this drive until that is complete
517 if (blk_queue_flushing(q
))
519 * small race where queue could get replugged during
520 * the 3-request flush cycle, just yank the plug since
521 * we want it to finish asap
525 spin_unlock_irq(q
->queue_lock
);
527 if (ide_lock_host(host
, hwif
))
530 spin_lock_irq(&hwif
->lock
);
532 if (!ide_lock_port(hwif
)) {
533 ide_hwif_t
*prev_port
;
535 prev_port
= hwif
->host
->cur_port
;
538 if (drive
->dev_flags
& IDE_DFLAG_SLEEPING
) {
539 if (time_before(drive
->sleep
, jiffies
)) {
540 ide_unlock_port(hwif
);
545 if ((hwif
->host
->host_flags
& IDE_HFLAG_SERIALIZE
) &&
548 * set nIEN for previous port, drives in the
549 * quirk_list may not like intr setups/cleanups
551 if (prev_port
&& prev_port
->cur_dev
->quirk_list
== 0)
552 prev_port
->tp_ops
->set_irq(prev_port
, 0);
554 hwif
->host
->cur_port
= hwif
;
556 hwif
->cur_dev
= drive
;
557 drive
->dev_flags
&= ~(IDE_DFLAG_SLEEPING
| IDE_DFLAG_PARKED
);
559 spin_unlock_irq(&hwif
->lock
);
560 spin_lock_irq(q
->queue_lock
);
562 * we know that the queue isn't empty, but this can happen
563 * if the q->prep_rq_fn() decides to kill a request
565 rq
= elv_next_request(drive
->queue
);
566 spin_unlock_irq(q
->queue_lock
);
567 spin_lock_irq(&hwif
->lock
);
570 ide_unlock_port(hwif
);
575 * Sanity: don't accept a request that isn't a PM request
576 * if we are currently power managed. This is very important as
577 * blk_stop_queue() doesn't prevent the elv_next_request()
578 * above to return us whatever is in the queue. Since we call
579 * ide_do_request() ourselves, we end up taking requests while
580 * the queue is blocked...
582 * We let requests forced at head of queue with ide-preempt
583 * though. I hope that doesn't happen too much, hopefully not
584 * unless the subdriver triggers such a thing in its own PM
587 if ((drive
->dev_flags
& IDE_DFLAG_BLOCKED
) &&
588 blk_pm_request(rq
) == 0 &&
589 (rq
->cmd_flags
& REQ_PREEMPT
) == 0) {
590 /* there should be no pending command at this point */
591 ide_unlock_port(hwif
);
597 spin_unlock_irq(&hwif
->lock
);
598 startstop
= start_request(drive
, rq
);
599 spin_lock_irq(&hwif
->lock
);
601 if (startstop
== ide_stopped
)
606 spin_unlock_irq(&hwif
->lock
);
608 ide_unlock_host(host
);
609 spin_lock_irq(q
->queue_lock
);
613 spin_unlock_irq(&hwif
->lock
);
614 ide_unlock_host(host
);
616 spin_lock_irq(q
->queue_lock
);
618 if (!elv_queue_empty(q
))
622 static void ide_plug_device(ide_drive_t
*drive
)
624 struct request_queue
*q
= drive
->queue
;
627 spin_lock_irqsave(q
->queue_lock
, flags
);
628 if (!elv_queue_empty(q
))
630 spin_unlock_irqrestore(q
->queue_lock
, flags
);
633 static int drive_is_ready(ide_drive_t
*drive
)
635 ide_hwif_t
*hwif
= drive
->hwif
;
638 if (drive
->waiting_for_dma
)
639 return hwif
->dma_ops
->dma_test_irq(drive
);
641 if (hwif
->io_ports
.ctl_addr
&&
642 (hwif
->host_flags
& IDE_HFLAG_BROKEN_ALTSTATUS
) == 0)
643 stat
= hwif
->tp_ops
->read_altstatus(hwif
);
645 /* Note: this may clear a pending IRQ!! */
646 stat
= hwif
->tp_ops
->read_status(hwif
);
649 /* drive busy: definitely not interrupting */
652 /* drive ready: *might* be interrupting */
657 * ide_timer_expiry - handle lack of an IDE interrupt
658 * @data: timer callback magic (hwif)
660 * An IDE command has timed out before the expected drive return
661 * occurred. At this point we attempt to clean up the current
662 * mess. If the current handler includes an expiry handler then
663 * we invoke the expiry handler, and providing it is happy the
664 * work is done. If that fails we apply generic recovery rules
665 * invoking the handler and checking the drive DMA status. We
666 * have an excessively incestuous relationship with the DMA
667 * logic that wants cleaning up.
670 void ide_timer_expiry (unsigned long data
)
672 ide_hwif_t
*hwif
= (ide_hwif_t
*)data
;
673 ide_drive_t
*uninitialized_var(drive
);
674 ide_handler_t
*handler
;
679 spin_lock_irqsave(&hwif
->lock
, flags
);
681 handler
= hwif
->handler
;
683 if (handler
== NULL
|| hwif
->req_gen
!= hwif
->req_gen_timer
) {
685 * Either a marginal timeout occurred
686 * (got the interrupt just as timer expired),
687 * or we were "sleeping" to give other devices a chance.
688 * Either way, we don't really want to complain about anything.
691 ide_expiry_t
*expiry
= hwif
->expiry
;
692 ide_startstop_t startstop
= ide_stopped
;
694 drive
= hwif
->cur_dev
;
697 wait
= expiry(drive
);
698 if (wait
> 0) { /* continue */
700 hwif
->timer
.expires
= jiffies
+ wait
;
701 hwif
->req_gen_timer
= hwif
->req_gen
;
702 add_timer(&hwif
->timer
);
703 spin_unlock_irqrestore(&hwif
->lock
, flags
);
707 hwif
->handler
= NULL
;
709 * We need to simulate a real interrupt when invoking
710 * the handler() function, which means we need to
711 * globally mask the specific IRQ:
713 spin_unlock(&hwif
->lock
);
714 /* disable_irq_nosync ?? */
715 disable_irq(hwif
->irq
);
716 /* local CPU only, as if we were handling an interrupt */
719 startstop
= handler(drive
);
720 } else if (drive_is_ready(drive
)) {
721 if (drive
->waiting_for_dma
)
722 hwif
->dma_ops
->dma_lost_irq(drive
);
724 hwif
->ack_intr(hwif
);
725 printk(KERN_WARNING
"%s: lost interrupt\n",
727 startstop
= handler(drive
);
729 if (drive
->waiting_for_dma
)
730 startstop
= ide_dma_timeout_retry(drive
, wait
);
732 startstop
= ide_error(drive
, "irq timeout",
733 hwif
->tp_ops
->read_status(hwif
));
735 spin_lock_irq(&hwif
->lock
);
736 enable_irq(hwif
->irq
);
737 if (startstop
== ide_stopped
) {
738 ide_unlock_port(hwif
);
742 spin_unlock_irqrestore(&hwif
->lock
, flags
);
745 ide_unlock_host(hwif
->host
);
746 ide_plug_device(drive
);
751 * unexpected_intr - handle an unexpected IDE interrupt
752 * @irq: interrupt line
753 * @hwif: port being processed
755 * There's nothing really useful we can do with an unexpected interrupt,
756 * other than reading the status register (to clear it), and logging it.
757 * There should be no way that an irq can happen before we're ready for it,
758 * so we needn't worry much about losing an "important" interrupt here.
760 * On laptops (and "green" PCs), an unexpected interrupt occurs whenever
761 * the drive enters "idle", "standby", or "sleep" mode, so if the status
762 * looks "good", we just ignore the interrupt completely.
764 * This routine assumes __cli() is in effect when called.
766 * If an unexpected interrupt happens on irq15 while we are handling irq14
767 * and if the two interfaces are "serialized" (CMD640), then it looks like
768 * we could screw up by interfering with a new request being set up for
771 * In reality, this is a non-issue. The new command is not sent unless
772 * the drive is ready to accept one, in which case we know the drive is
773 * not trying to interrupt us. And ide_set_handler() is always invoked
774 * before completing the issuance of any new drive command, so we will not
775 * be accidentally invoked as a result of any valid command completion
779 static void unexpected_intr(int irq
, ide_hwif_t
*hwif
)
781 u8 stat
= hwif
->tp_ops
->read_status(hwif
);
783 if (!OK_STAT(stat
, ATA_DRDY
, BAD_STAT
)) {
784 /* Try to not flood the console with msgs */
785 static unsigned long last_msgtime
, count
;
788 if (time_after(jiffies
, last_msgtime
+ HZ
)) {
789 last_msgtime
= jiffies
;
790 printk(KERN_ERR
"%s: unexpected interrupt, "
791 "status=0x%02x, count=%ld\n",
792 hwif
->name
, stat
, count
);
798 * ide_intr - default IDE interrupt handler
799 * @irq: interrupt number
801 * @regs: unused weirdness from the kernel irq layer
803 * This is the default IRQ handler for the IDE layer. You should
804 * not need to override it. If you do be aware it is subtle in
807 * hwif is the interface in the group currently performing
808 * a command. hwif->cur_dev is the drive and hwif->handler is
809 * the IRQ handler to call. As we issue a command the handlers
810 * step through multiple states, reassigning the handler to the
811 * next step in the process. Unlike a smart SCSI controller IDE
812 * expects the main processor to sequence the various transfer
813 * stages. We also manage a poll timer to catch up with most
814 * timeout situations. There are still a few where the handlers
815 * don't ever decide to give up.
817 * The handler eventually returns ide_stopped to indicate the
818 * request completed. At this point we issue the next request
819 * on the port and the process begins again.
822 irqreturn_t
ide_intr (int irq
, void *dev_id
)
824 ide_hwif_t
*hwif
= (ide_hwif_t
*)dev_id
;
825 struct ide_host
*host
= hwif
->host
;
826 ide_drive_t
*uninitialized_var(drive
);
827 ide_handler_t
*handler
;
829 ide_startstop_t startstop
;
830 irqreturn_t irq_ret
= IRQ_NONE
;
833 if (host
->host_flags
& IDE_HFLAG_SERIALIZE
) {
834 if (hwif
!= host
->cur_port
)
838 spin_lock_irqsave(&hwif
->lock
, flags
);
840 if (hwif
->ack_intr
&& hwif
->ack_intr(hwif
) == 0)
843 handler
= hwif
->handler
;
845 if (handler
== NULL
|| hwif
->polling
) {
847 * Not expecting an interrupt from this drive.
848 * That means this could be:
849 * (1) an interrupt from another PCI device
850 * sharing the same PCI INT# as us.
851 * or (2) a drive just entered sleep or standby mode,
852 * and is interrupting to let us know.
853 * or (3) a spurious interrupt of unknown origin.
855 * For PCI, we cannot tell the difference,
856 * so in that case we just ignore it and hope it goes away.
858 if ((host
->irq_flags
& IRQF_SHARED
) == 0) {
860 * Probably not a shared PCI interrupt,
861 * so we can safely try to do something about it:
863 unexpected_intr(irq
, hwif
);
866 * Whack the status register, just in case
867 * we have a leftover pending IRQ.
869 (void)hwif
->tp_ops
->read_status(hwif
);
874 drive
= hwif
->cur_dev
;
876 if (!drive_is_ready(drive
))
878 * This happens regularly when we share a PCI IRQ with
879 * another device. Unfortunately, it can also happen
880 * with some buggy drives that trigger the IRQ before
881 * their status register is up to date. Hopefully we have
882 * enough advance overhead that the latter isn't a problem.
886 hwif
->handler
= NULL
;
888 del_timer(&hwif
->timer
);
889 spin_unlock(&hwif
->lock
);
891 if (hwif
->port_ops
&& hwif
->port_ops
->clear_irq
)
892 hwif
->port_ops
->clear_irq(drive
);
894 if (drive
->dev_flags
& IDE_DFLAG_UNMASK
)
895 local_irq_enable_in_hardirq();
897 /* service this interrupt, may set handler for next interrupt */
898 startstop
= handler(drive
);
900 spin_lock_irq(&hwif
->lock
);
902 * Note that handler() may have set things up for another
903 * interrupt to occur soon, but it cannot happen until
904 * we exit from this routine, because it will be the
905 * same irq as is currently being serviced here, and Linux
906 * won't allow another of the same (on any CPU) until we return.
908 if (startstop
== ide_stopped
) {
909 BUG_ON(hwif
->handler
);
910 ide_unlock_port(hwif
);
913 irq_ret
= IRQ_HANDLED
;
915 spin_unlock_irqrestore(&hwif
->lock
, flags
);
918 ide_unlock_host(hwif
->host
);
919 ide_plug_device(drive
);
924 EXPORT_SYMBOL_GPL(ide_intr
);
926 void ide_pad_transfer(ide_drive_t
*drive
, int write
, int len
)
928 ide_hwif_t
*hwif
= drive
->hwif
;
933 hwif
->tp_ops
->output_data(drive
, NULL
, buf
, min(4, len
));
935 hwif
->tp_ops
->input_data(drive
, NULL
, buf
, min(4, len
));
939 EXPORT_SYMBOL_GPL(ide_pad_transfer
);