2 * Copyright (C) 2000-2002 Michael Cornwell <cornwell@acm.org>
3 * Copyright (C) 2000-2002 Andre Hedrick <andre@linux-ide.org>
4 * Copyright (C) 2001-2002 Klaus Smolin
5 * IBM Storage Technology Division
6 * Copyright (C) 2003-2004, 2007 Bartlomiej Zolnierkiewicz
8 * The big the bad and the ugly.
11 #include <linux/types.h>
12 #include <linux/string.h>
13 #include <linux/kernel.h>
14 #include <linux/sched.h>
15 #include <linux/interrupt.h>
16 #include <linux/errno.h>
17 #include <linux/slab.h>
18 #include <linux/delay.h>
19 #include <linux/hdreg.h>
20 #include <linux/ide.h>
21 #include <linux/scatterlist.h>
23 #include <asm/uaccess.h>
26 void ide_tf_dump(const char *s
, struct ide_taskfile
*tf
)
29 printk("%s: tf: feat 0x%02x nsect 0x%02x lbal 0x%02x "
30 "lbam 0x%02x lbah 0x%02x dev 0x%02x cmd 0x%02x\n",
31 s
, tf
->feature
, tf
->nsect
, tf
->lbal
,
32 tf
->lbam
, tf
->lbah
, tf
->device
, tf
->command
);
33 printk("%s: hob: nsect 0x%02x lbal 0x%02x "
34 "lbam 0x%02x lbah 0x%02x\n",
35 s
, tf
->hob_nsect
, tf
->hob_lbal
,
36 tf
->hob_lbam
, tf
->hob_lbah
);
40 int taskfile_lib_get_identify (ide_drive_t
*drive
, u8
*buf
)
44 memset(&cmd
, 0, sizeof(cmd
));
46 if (drive
->media
== ide_disk
)
47 cmd
.tf
.command
= ATA_CMD_ID_ATA
;
49 cmd
.tf
.command
= ATA_CMD_ID_ATAPI
;
50 cmd
.tf_flags
= IDE_TFLAG_TF
| IDE_TFLAG_DEVICE
;
51 cmd
.protocol
= ATA_PROT_PIO
;
53 return ide_raw_taskfile(drive
, &cmd
, buf
, 1);
56 static ide_startstop_t
task_no_data_intr(ide_drive_t
*);
57 static ide_startstop_t
pre_task_out_intr(ide_drive_t
*, struct ide_cmd
*);
58 static ide_startstop_t
task_pio_intr(ide_drive_t
*);
60 ide_startstop_t
do_rw_taskfile(ide_drive_t
*drive
, struct ide_cmd
*orig_cmd
)
62 ide_hwif_t
*hwif
= drive
->hwif
;
63 struct ide_cmd
*cmd
= &hwif
->cmd
;
64 struct ide_taskfile
*tf
= &cmd
->tf
;
65 ide_handler_t
*handler
= NULL
;
66 const struct ide_tp_ops
*tp_ops
= hwif
->tp_ops
;
67 const struct ide_dma_ops
*dma_ops
= hwif
->dma_ops
;
69 if (orig_cmd
->protocol
== ATA_PROT_PIO
&&
70 (orig_cmd
->tf_flags
& IDE_TFLAG_MULTI_PIO
) &&
71 drive
->mult_count
== 0) {
72 printk(KERN_ERR
"%s: multimode not set!\n", drive
->name
);
76 if (orig_cmd
->ftf_flags
& IDE_FTFLAG_FLAGGED
)
77 orig_cmd
->ftf_flags
|= IDE_FTFLAG_SET_IN_FLAGS
;
79 memcpy(cmd
, orig_cmd
, sizeof(*cmd
));
81 if ((cmd
->tf_flags
& IDE_TFLAG_DMA_PIO_FALLBACK
) == 0) {
82 ide_tf_dump(drive
->name
, tf
);
83 tp_ops
->set_irq(hwif
, 1);
84 SELECT_MASK(drive
, 0);
85 tp_ops
->tf_load(drive
, cmd
);
88 switch (cmd
->protocol
) {
90 if (cmd
->tf_flags
& IDE_TFLAG_WRITE
) {
91 tp_ops
->exec_command(hwif
, tf
->command
);
92 ndelay(400); /* FIXME */
93 return pre_task_out_intr(drive
, cmd
);
95 handler
= task_pio_intr
;
99 handler
= task_no_data_intr
;
100 ide_execute_command(drive
, tf
->command
, handler
,
101 WAIT_WORSTCASE
, NULL
);
104 if ((drive
->dev_flags
& IDE_DFLAG_USING_DMA
) == 0 ||
105 ide_build_sglist(drive
, hwif
->rq
) == 0 ||
106 dma_ops
->dma_setup(drive
))
108 dma_ops
->dma_exec_cmd(drive
, tf
->command
);
109 dma_ops
->dma_start(drive
);
113 EXPORT_SYMBOL_GPL(do_rw_taskfile
);
115 static ide_startstop_t
task_no_data_intr(ide_drive_t
*drive
)
117 ide_hwif_t
*hwif
= drive
->hwif
;
118 struct ide_cmd
*cmd
= &hwif
->cmd
;
119 struct ide_taskfile
*tf
= &cmd
->tf
;
120 int custom
= (cmd
->tf_flags
& IDE_TFLAG_CUSTOM_HANDLER
) ? 1 : 0;
121 int retries
= (custom
&& tf
->command
== ATA_CMD_INIT_DEV_PARAMS
) ? 5 : 1;
124 local_irq_enable_in_hardirq();
127 stat
= hwif
->tp_ops
->read_status(hwif
);
128 if ((stat
& ATA_BUSY
) == 0 || retries
-- == 0)
133 if (!OK_STAT(stat
, ATA_DRDY
, BAD_STAT
)) {
134 if (custom
&& tf
->command
== ATA_CMD_SET_MULTI
) {
135 drive
->mult_req
= drive
->mult_count
= 0;
136 drive
->special
.b
.recalibrate
= 1;
137 (void)ide_dump_status(drive
, __func__
, stat
);
139 } else if (custom
&& tf
->command
== ATA_CMD_INIT_DEV_PARAMS
) {
140 if ((stat
& (ATA_ERR
| ATA_DRQ
)) == 0) {
141 ide_set_handler(drive
, &task_no_data_intr
,
142 WAIT_WORSTCASE
, NULL
);
146 return ide_error(drive
, "task_no_data_intr", stat
);
149 if (custom
&& tf
->command
== ATA_CMD_SET_MULTI
)
150 drive
->mult_count
= drive
->mult_req
;
152 if (custom
== 0 || tf
->command
== ATA_CMD_IDLEIMMEDIATE
||
153 tf
->command
== ATA_CMD_CHK_POWER
) {
154 struct request
*rq
= hwif
->rq
;
156 if (blk_pm_request(rq
))
157 ide_complete_pm_rq(drive
, rq
);
159 ide_finish_cmd(drive
, cmd
, stat
);
165 static u8
wait_drive_not_busy(ide_drive_t
*drive
)
167 ide_hwif_t
*hwif
= drive
->hwif
;
172 * Last sector was transfered, wait until device is ready. This can
173 * take up to 6 ms on some ATAPI devices, so we will wait max 10 ms.
175 for (retries
= 0; retries
< 1000; retries
++) {
176 stat
= hwif
->tp_ops
->read_status(hwif
);
185 printk(KERN_ERR
"%s: drive still BUSY!\n", drive
->name
);
190 static void ide_pio_sector(ide_drive_t
*drive
, struct ide_cmd
*cmd
,
193 ide_hwif_t
*hwif
= drive
->hwif
;
194 struct scatterlist
*sg
= hwif
->sg_table
;
195 struct scatterlist
*cursg
= cmd
->cursg
;
197 #ifdef CONFIG_HIGHMEM
209 page
= sg_page(cursg
);
210 offset
= cursg
->offset
+ cmd
->cursg_ofs
* SECTOR_SIZE
;
212 /* get the current page and offset */
213 page
= nth_page(page
, (offset
>> PAGE_SHIFT
));
216 #ifdef CONFIG_HIGHMEM
217 local_irq_save(flags
);
219 buf
= kmap_atomic(page
, KM_BIO_SRC_IRQ
) + offset
;
224 if ((cmd
->cursg_ofs
* SECTOR_SIZE
) == cursg
->length
) {
225 cmd
->cursg
= sg_next(cmd
->cursg
);
229 /* do the actual data transfer */
231 hwif
->tp_ops
->output_data(drive
, cmd
, buf
, SECTOR_SIZE
);
233 hwif
->tp_ops
->input_data(drive
, cmd
, buf
, SECTOR_SIZE
);
235 kunmap_atomic(buf
, KM_BIO_SRC_IRQ
);
236 #ifdef CONFIG_HIGHMEM
237 local_irq_restore(flags
);
241 static void ide_pio_multi(ide_drive_t
*drive
, struct ide_cmd
*cmd
,
246 nsect
= min_t(unsigned int, cmd
->nleft
, drive
->mult_count
);
248 ide_pio_sector(drive
, cmd
, write
);
251 static void ide_pio_datablock(ide_drive_t
*drive
, struct ide_cmd
*cmd
,
254 u8 saved_io_32bit
= drive
->io_32bit
;
256 if (cmd
->tf_flags
& IDE_TFLAG_FS
)
259 if (cmd
->tf_flags
& IDE_TFLAG_IO_16BIT
)
262 touch_softlockup_watchdog();
264 if (cmd
->tf_flags
& IDE_TFLAG_MULTI_PIO
)
265 ide_pio_multi(drive
, cmd
, write
);
267 ide_pio_sector(drive
, cmd
, write
);
269 drive
->io_32bit
= saved_io_32bit
;
272 static void ide_error_cmd(ide_drive_t
*drive
, struct ide_cmd
*cmd
)
274 if (cmd
->tf_flags
& IDE_TFLAG_FS
) {
275 int sectors
= cmd
->nsect
- cmd
->nleft
;
277 if (cmd
->protocol
== ATA_PROT_PIO
&&
278 ((cmd
->tf_flags
& IDE_TFLAG_WRITE
) || cmd
->nleft
== 0)) {
279 if (cmd
->tf_flags
& IDE_TFLAG_MULTI_PIO
)
280 sectors
-= drive
->mult_count
;
286 ide_end_request(drive
, 1, sectors
);
290 void ide_finish_cmd(ide_drive_t
*drive
, struct ide_cmd
*cmd
, u8 stat
)
292 struct request
*rq
= drive
->hwif
->rq
;
293 u8 err
= ide_read_error(drive
);
295 ide_complete_cmd(drive
, cmd
, stat
, err
);
297 ide_complete_rq(drive
, err
? -EIO
: 0, blk_rq_bytes(rq
));
301 * Handler for command with PIO data phase.
303 static ide_startstop_t
task_pio_intr(ide_drive_t
*drive
)
305 ide_hwif_t
*hwif
= drive
->hwif
;
306 struct ide_cmd
*cmd
= &drive
->hwif
->cmd
;
307 u8 stat
= hwif
->tp_ops
->read_status(hwif
);
308 u8 write
= !!(cmd
->tf_flags
& IDE_TFLAG_WRITE
);
315 /* Didn't want any data? Odd. */
316 if ((stat
& ATA_DRQ
) == 0) {
317 /* Command all done? */
318 if (OK_STAT(stat
, ATA_DRDY
, ATA_BUSY
))
321 /* Assume it was a spurious irq */
325 if (!OK_STAT(stat
, DRIVE_READY
, drive
->bad_wstat
))
328 /* Deal with unexpected ATA data phase. */
329 if (((stat
& ATA_DRQ
) == 0) ^ (cmd
->nleft
== 0))
333 if (write
&& cmd
->nleft
== 0)
336 /* Still data left to transfer. */
337 ide_pio_datablock(drive
, cmd
, write
);
339 /* Are we done? Check status and finish transfer. */
340 if (write
== 0 && cmd
->nleft
== 0) {
341 stat
= wait_drive_not_busy(drive
);
342 if (!OK_STAT(stat
, 0, BAD_STAT
))
348 /* Still data left to transfer. */
349 ide_set_handler(drive
, &task_pio_intr
, WAIT_WORSTCASE
, NULL
);
352 if ((cmd
->tf_flags
& IDE_TFLAG_FS
) == 0)
353 ide_finish_cmd(drive
, cmd
, stat
);
355 ide_end_request(drive
, 1, cmd
->rq
->nr_sectors
);
358 ide_error_cmd(drive
, cmd
);
359 return ide_error(drive
, __func__
, stat
);
362 static ide_startstop_t
pre_task_out_intr(ide_drive_t
*drive
,
365 ide_startstop_t startstop
;
367 if (ide_wait_stat(&startstop
, drive
, ATA_DRQ
,
368 drive
->bad_wstat
, WAIT_DRQ
)) {
369 printk(KERN_ERR
"%s: no DRQ after issuing %sWRITE%s\n",
371 (cmd
->tf_flags
& IDE_TFLAG_MULTI_PIO
) ? "MULT" : "",
372 (drive
->dev_flags
& IDE_DFLAG_LBA48
) ? "_EXT" : "");
376 if ((drive
->dev_flags
& IDE_DFLAG_UNMASK
) == 0)
379 ide_set_handler(drive
, &task_pio_intr
, WAIT_WORSTCASE
, NULL
);
381 ide_pio_datablock(drive
, cmd
, 1);
386 int ide_raw_taskfile(ide_drive_t
*drive
, struct ide_cmd
*cmd
, u8
*buf
,
392 rq
= blk_get_request(drive
->queue
, READ
, __GFP_WAIT
);
393 rq
->cmd_type
= REQ_TYPE_ATA_TASKFILE
;
397 * (ks) We transfer currently only whole sectors.
398 * This is suffient for now. But, it would be great,
399 * if we would find a solution to transfer any size.
400 * To support special commands like READ LONG.
402 rq
->hard_nr_sectors
= rq
->nr_sectors
= nsect
;
403 rq
->hard_cur_sectors
= rq
->current_nr_sectors
= nsect
;
405 if (cmd
->tf_flags
& IDE_TFLAG_WRITE
)
406 rq
->cmd_flags
|= REQ_RW
;
411 error
= blk_execute_rq(drive
->queue
, NULL
, rq
, 0);
417 EXPORT_SYMBOL(ide_raw_taskfile
);
419 int ide_no_data_taskfile(ide_drive_t
*drive
, struct ide_cmd
*cmd
)
421 cmd
->protocol
= ATA_PROT_NODATA
;
423 return ide_raw_taskfile(drive
, cmd
, NULL
, 0);
425 EXPORT_SYMBOL_GPL(ide_no_data_taskfile
);
427 #ifdef CONFIG_IDE_TASK_IOCTL
428 int ide_taskfile_ioctl(ide_drive_t
*drive
, unsigned long arg
)
430 ide_task_request_t
*req_task
;
436 int tasksize
= sizeof(struct ide_task_request_s
);
437 unsigned int taskin
= 0;
438 unsigned int taskout
= 0;
440 char __user
*buf
= (char __user
*)arg
;
442 // printk("IDE Taskfile ...\n");
444 req_task
= kzalloc(tasksize
, GFP_KERNEL
);
445 if (req_task
== NULL
) return -ENOMEM
;
446 if (copy_from_user(req_task
, buf
, tasksize
)) {
451 taskout
= req_task
->out_size
;
452 taskin
= req_task
->in_size
;
454 if (taskin
> 65536 || taskout
> 65536) {
460 int outtotal
= tasksize
;
461 outbuf
= kzalloc(taskout
, GFP_KERNEL
);
462 if (outbuf
== NULL
) {
466 if (copy_from_user(outbuf
, buf
+ outtotal
, taskout
)) {
473 int intotal
= tasksize
+ taskout
;
474 inbuf
= kzalloc(taskin
, GFP_KERNEL
);
479 if (copy_from_user(inbuf
, buf
+ intotal
, taskin
)) {
485 memset(&cmd
, 0, sizeof(cmd
));
487 memcpy(&cmd
.tf_array
[0], req_task
->hob_ports
,
488 HDIO_DRIVE_HOB_HDR_SIZE
- 2);
489 memcpy(&cmd
.tf_array
[6], req_task
->io_ports
,
490 HDIO_DRIVE_TASK_HDR_SIZE
);
492 cmd
.tf_flags
= IDE_TFLAG_IO_16BIT
| IDE_TFLAG_DEVICE
|
495 if (drive
->dev_flags
& IDE_DFLAG_LBA48
)
496 cmd
.tf_flags
|= (IDE_TFLAG_LBA48
| IDE_TFLAG_IN_HOB
);
498 if (req_task
->out_flags
.all
) {
499 cmd
.ftf_flags
|= IDE_FTFLAG_FLAGGED
;
501 if (req_task
->out_flags
.b
.data
)
502 cmd
.ftf_flags
|= IDE_FTFLAG_OUT_DATA
;
504 if (req_task
->out_flags
.b
.nsector_hob
)
505 cmd
.tf_flags
|= IDE_TFLAG_OUT_HOB_NSECT
;
506 if (req_task
->out_flags
.b
.sector_hob
)
507 cmd
.tf_flags
|= IDE_TFLAG_OUT_HOB_LBAL
;
508 if (req_task
->out_flags
.b
.lcyl_hob
)
509 cmd
.tf_flags
|= IDE_TFLAG_OUT_HOB_LBAM
;
510 if (req_task
->out_flags
.b
.hcyl_hob
)
511 cmd
.tf_flags
|= IDE_TFLAG_OUT_HOB_LBAH
;
513 if (req_task
->out_flags
.b
.error_feature
)
514 cmd
.tf_flags
|= IDE_TFLAG_OUT_FEATURE
;
515 if (req_task
->out_flags
.b
.nsector
)
516 cmd
.tf_flags
|= IDE_TFLAG_OUT_NSECT
;
517 if (req_task
->out_flags
.b
.sector
)
518 cmd
.tf_flags
|= IDE_TFLAG_OUT_LBAL
;
519 if (req_task
->out_flags
.b
.lcyl
)
520 cmd
.tf_flags
|= IDE_TFLAG_OUT_LBAM
;
521 if (req_task
->out_flags
.b
.hcyl
)
522 cmd
.tf_flags
|= IDE_TFLAG_OUT_LBAH
;
524 cmd
.tf_flags
|= IDE_TFLAG_OUT_TF
;
525 if (cmd
.tf_flags
& IDE_TFLAG_LBA48
)
526 cmd
.tf_flags
|= IDE_TFLAG_OUT_HOB
;
529 if (req_task
->in_flags
.b
.data
)
530 cmd
.ftf_flags
|= IDE_FTFLAG_IN_DATA
;
532 if (req_task
->req_cmd
== IDE_DRIVE_TASK_RAW_WRITE
) {
533 /* fixup data phase if needed */
534 if (req_task
->data_phase
== TASKFILE_IN_DMAQ
||
535 req_task
->data_phase
== TASKFILE_IN_DMA
)
536 cmd
.tf_flags
|= IDE_TFLAG_WRITE
;
539 cmd
.protocol
= ATA_PROT_DMA
;
541 switch (req_task
->data_phase
) {
542 case TASKFILE_MULTI_OUT
:
543 if (!drive
->mult_count
) {
544 /* (hs): give up if multcount is not set */
545 printk(KERN_ERR
"%s: %s Multimode Write " \
546 "multcount is not set\n",
547 drive
->name
, __func__
);
551 cmd
.tf_flags
|= IDE_TFLAG_MULTI_PIO
;
554 cmd
.protocol
= ATA_PROT_PIO
;
556 case TASKFILE_OUT_DMAQ
:
557 case TASKFILE_OUT_DMA
:
558 cmd
.tf_flags
|= IDE_TFLAG_WRITE
;
559 nsect
= taskout
/ SECTOR_SIZE
;
562 case TASKFILE_MULTI_IN
:
563 if (!drive
->mult_count
) {
564 /* (hs): give up if multcount is not set */
565 printk(KERN_ERR
"%s: %s Multimode Read failure " \
566 "multcount is not set\n",
567 drive
->name
, __func__
);
571 cmd
.tf_flags
|= IDE_TFLAG_MULTI_PIO
;
574 cmd
.protocol
= ATA_PROT_PIO
;
576 case TASKFILE_IN_DMAQ
:
577 case TASKFILE_IN_DMA
:
578 nsect
= taskin
/ SECTOR_SIZE
;
581 case TASKFILE_NO_DATA
:
582 cmd
.protocol
= ATA_PROT_NODATA
;
589 if (req_task
->req_cmd
== IDE_DRIVE_TASK_NO_DATA
)
592 nsect
= (cmd
.tf
.hob_nsect
<< 8) | cmd
.tf
.nsect
;
595 printk(KERN_ERR
"%s: in/out command without data\n",
602 err
= ide_raw_taskfile(drive
, &cmd
, data_buf
, nsect
);
604 memcpy(req_task
->hob_ports
, &cmd
.tf_array
[0],
605 HDIO_DRIVE_HOB_HDR_SIZE
- 2);
606 memcpy(req_task
->io_ports
, &cmd
.tf_array
[6],
607 HDIO_DRIVE_TASK_HDR_SIZE
);
609 if ((cmd
.ftf_flags
& IDE_FTFLAG_SET_IN_FLAGS
) &&
610 req_task
->in_flags
.all
== 0) {
611 req_task
->in_flags
.all
= IDE_TASKFILE_STD_IN_FLAGS
;
612 if (drive
->dev_flags
& IDE_DFLAG_LBA48
)
613 req_task
->in_flags
.all
|= (IDE_HOB_STD_IN_FLAGS
<< 8);
616 if (copy_to_user(buf
, req_task
, tasksize
)) {
621 int outtotal
= tasksize
;
622 if (copy_to_user(buf
+ outtotal
, outbuf
, taskout
)) {
628 int intotal
= tasksize
+ taskout
;
629 if (copy_to_user(buf
+ intotal
, inbuf
, taskin
)) {
639 // printk("IDE Taskfile ioctl ended. rc = %i\n", err);