Merge branch 'upstream'
[deliverable/linux.git] / include / linux / libata.h
1 /*
2 * Copyright 2003-2005 Red Hat, Inc. All rights reserved.
3 * Copyright 2003-2005 Jeff Garzik
4 *
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2, or (at your option)
9 * any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; see the file COPYING. If not, write to
18 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
19 *
20 *
21 * libata documentation is available via 'make {ps|pdf}docs',
22 * as Documentation/DocBook/libata.*
23 *
24 */
25
26 #ifndef __LINUX_LIBATA_H__
27 #define __LINUX_LIBATA_H__
28
29 #include <linux/delay.h>
30 #include <linux/interrupt.h>
31 #include <linux/pci.h>
32 #include <linux/dma-mapping.h>
33 #include <asm/io.h>
34 #include <linux/ata.h>
35 #include <linux/workqueue.h>
36
37 /*
38 * compile-time options: to be removed as soon as all the drivers are
39 * converted to the new debugging mechanism
40 */
41 #undef ATA_DEBUG /* debugging output */
42 #undef ATA_VERBOSE_DEBUG /* yet more debugging output */
43 #undef ATA_IRQ_TRAP /* define to ack screaming irqs */
44 #undef ATA_NDEBUG /* define to disable quick runtime checks */
45 #undef ATA_ENABLE_PATA /* define to enable PATA support in some
46 * low-level drivers */
47 #undef ATAPI_ENABLE_DMADIR /* enables ATAPI DMADIR bridge support */
48
49
50 /* note: prints function name for you */
51 #ifdef ATA_DEBUG
52 #define DPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args)
53 #ifdef ATA_VERBOSE_DEBUG
54 #define VPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args)
55 #else
56 #define VPRINTK(fmt, args...)
57 #endif /* ATA_VERBOSE_DEBUG */
58 #else
59 #define DPRINTK(fmt, args...)
60 #define VPRINTK(fmt, args...)
61 #endif /* ATA_DEBUG */
62
63 #define BPRINTK(fmt, args...) if (ap->flags & ATA_FLAG_DEBUGMSG) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args)
64
65 /* NEW: debug levels */
66 #define HAVE_LIBATA_MSG 1
67
68 enum {
69 ATA_MSG_DRV = 0x0001,
70 ATA_MSG_INFO = 0x0002,
71 ATA_MSG_PROBE = 0x0004,
72 ATA_MSG_WARN = 0x0008,
73 ATA_MSG_MALLOC = 0x0010,
74 ATA_MSG_CTL = 0x0020,
75 ATA_MSG_INTR = 0x0040,
76 ATA_MSG_ERR = 0x0080,
77 };
78
79 #define ata_msg_drv(p) ((p)->msg_enable & ATA_MSG_DRV)
80 #define ata_msg_info(p) ((p)->msg_enable & ATA_MSG_INFO)
81 #define ata_msg_probe(p) ((p)->msg_enable & ATA_MSG_PROBE)
82 #define ata_msg_warn(p) ((p)->msg_enable & ATA_MSG_WARN)
83 #define ata_msg_malloc(p) ((p)->msg_enable & ATA_MSG_MALLOC)
84 #define ata_msg_ctl(p) ((p)->msg_enable & ATA_MSG_CTL)
85 #define ata_msg_intr(p) ((p)->msg_enable & ATA_MSG_INTR)
86 #define ata_msg_err(p) ((p)->msg_enable & ATA_MSG_ERR)
87
88 static inline u32 ata_msg_init(int dval, int default_msg_enable_bits)
89 {
90 if (dval < 0 || dval >= (sizeof(u32) * 8))
91 return default_msg_enable_bits; /* should be 0x1 - only driver info msgs */
92 if (!dval)
93 return 0;
94 return (1 << dval) - 1;
95 }
96
97 /* defines only for the constants which don't work well as enums */
98 #define ATA_TAG_POISON 0xfafbfcfdU
99
100 /* move to PCI layer? */
101 static inline struct device *pci_dev_to_dev(struct pci_dev *pdev)
102 {
103 return &pdev->dev;
104 }
105
106 enum {
107 /* various global constants */
108 LIBATA_MAX_PRD = ATA_MAX_PRD / 2,
109 ATA_MAX_PORTS = 8,
110 ATA_DEF_QUEUE = 1,
111 ATA_MAX_QUEUE = 1,
112 ATA_MAX_SECTORS = 200, /* FIXME */
113 ATA_MAX_BUS = 2,
114 ATA_DEF_BUSY_WAIT = 10000,
115 ATA_SHORT_PAUSE = (HZ >> 6) + 1,
116
117 ATA_SHT_EMULATED = 1,
118 ATA_SHT_CMD_PER_LUN = 1,
119 ATA_SHT_THIS_ID = -1,
120 ATA_SHT_USE_CLUSTERING = 1,
121
122 /* struct ata_device stuff */
123 ATA_DFLAG_LBA48 = (1 << 0), /* device supports LBA48 */
124 ATA_DFLAG_PIO = (1 << 1), /* device currently in PIO mode */
125 ATA_DFLAG_LBA = (1 << 2), /* device supports LBA */
126 ATA_DFLAG_CDB_INTR = (1 << 3), /* device asserts INTRQ when ready for CDB */
127
128 ATA_DEV_UNKNOWN = 0, /* unknown device */
129 ATA_DEV_ATA = 1, /* ATA device */
130 ATA_DEV_ATA_UNSUP = 2, /* ATA device (unsupported) */
131 ATA_DEV_ATAPI = 3, /* ATAPI device */
132 ATA_DEV_ATAPI_UNSUP = 4, /* ATAPI device (unsupported) */
133 ATA_DEV_NONE = 5, /* no device */
134
135 /* struct ata_port flags */
136 ATA_FLAG_SLAVE_POSS = (1 << 1), /* host supports slave dev */
137 /* (doesn't imply presence) */
138 ATA_FLAG_PORT_DISABLED = (1 << 2), /* port is disabled, ignore it */
139 ATA_FLAG_SATA = (1 << 3),
140 ATA_FLAG_NO_LEGACY = (1 << 4), /* no legacy mode check */
141 ATA_FLAG_SRST = (1 << 5), /* (obsolete) use ATA SRST, not E.D.D. */
142 ATA_FLAG_MMIO = (1 << 6), /* use MMIO, not PIO */
143 ATA_FLAG_SATA_RESET = (1 << 7), /* (obsolete) use COMRESET */
144 ATA_FLAG_PIO_DMA = (1 << 8), /* PIO cmds via DMA */
145 ATA_FLAG_PIO_POLLING = (1 << 9), /* use polling PIO if LLD
146 * doesn't handle PIO interrupts */
147 ATA_FLAG_DEBUGMSG = (1 << 10),
148 ATA_FLAG_NO_ATAPI = (1 << 11), /* No ATAPI support */
149
150 ATA_FLAG_SUSPENDED = (1 << 12), /* port is suspended */
151
152 ATA_FLAG_PIO_LBA48 = (1 << 13), /* Host DMA engine is LBA28 only */
153 ATA_FLAG_IRQ_MASK = (1 << 14), /* Mask IRQ in PIO xfers */
154
155 ATA_FLAG_FLUSH_PIO_TASK = (1 << 15), /* Flush PIO task */
156 ATA_FLAG_IN_EH = (1 << 16), /* EH in progress */
157
158 ATA_QCFLAG_ACTIVE = (1 << 1), /* cmd not yet ack'd to scsi lyer */
159 ATA_QCFLAG_SG = (1 << 3), /* have s/g table? */
160 ATA_QCFLAG_SINGLE = (1 << 4), /* no s/g, just a single buffer */
161 ATA_QCFLAG_DMAMAP = ATA_QCFLAG_SG | ATA_QCFLAG_SINGLE,
162 ATA_QCFLAG_EH_SCHEDULED = (1 << 5), /* EH scheduled */
163
164 /* various lengths of time */
165 ATA_TMOUT_EDD = 5 * HZ, /* heuristic */
166 ATA_TMOUT_PIO = 30 * HZ,
167 ATA_TMOUT_BOOT = 30 * HZ, /* heuristic */
168 ATA_TMOUT_BOOT_QUICK = 7 * HZ, /* heuristic */
169 ATA_TMOUT_DATAOUT = 30 * HZ,
170 ATA_TMOUT_DATAOUT_QUICK = 5 * HZ,
171 ATA_TMOUT_CDB = 30 * HZ,
172 ATA_TMOUT_CDB_QUICK = 5 * HZ,
173 ATA_TMOUT_INTERNAL = 30 * HZ,
174 ATA_TMOUT_INTERNAL_QUICK = 5 * HZ,
175
176 /* ATA bus states */
177 BUS_UNKNOWN = 0,
178 BUS_DMA = 1,
179 BUS_IDLE = 2,
180 BUS_NOINTR = 3,
181 BUS_NODATA = 4,
182 BUS_TIMER = 5,
183 BUS_PIO = 6,
184 BUS_EDD = 7,
185 BUS_IDENTIFY = 8,
186 BUS_PACKET = 9,
187
188 /* SATA port states */
189 PORT_UNKNOWN = 0,
190 PORT_ENABLED = 1,
191 PORT_DISABLED = 2,
192
193 /* encoding various smaller bitmaps into a single
194 * unsigned long bitmap
195 */
196 ATA_SHIFT_UDMA = 0,
197 ATA_SHIFT_MWDMA = 8,
198 ATA_SHIFT_PIO = 11,
199
200 /* size of buffer to pad xfers ending on unaligned boundaries */
201 ATA_DMA_PAD_SZ = 4,
202 ATA_DMA_PAD_BUF_SZ = ATA_DMA_PAD_SZ * ATA_MAX_QUEUE,
203
204 /* Masks for port functions */
205 ATA_PORT_PRIMARY = (1 << 0),
206 ATA_PORT_SECONDARY = (1 << 1),
207 };
208
209 enum hsm_task_states {
210 HSM_ST_UNKNOWN, /* state unknown */
211 HSM_ST_IDLE, /* no command on going */
212 HSM_ST_POLL, /* same as HSM_ST, waits longer */
213 HSM_ST_TMOUT, /* timeout */
214 HSM_ST, /* (waiting the device to) transfer data */
215 HSM_ST_LAST, /* (waiting the device to) complete command */
216 HSM_ST_LAST_POLL, /* same as HSM_ST_LAST, waits longer */
217 HSM_ST_ERR, /* error */
218 HSM_ST_FIRST, /* (waiting the device to)
219 write CDB or first data block */
220 };
221
222 enum ata_completion_errors {
223 AC_ERR_DEV = (1 << 0), /* device reported error */
224 AC_ERR_HSM = (1 << 1), /* host state machine violation */
225 AC_ERR_TIMEOUT = (1 << 2), /* timeout */
226 AC_ERR_MEDIA = (1 << 3), /* media error */
227 AC_ERR_ATA_BUS = (1 << 4), /* ATA bus error */
228 AC_ERR_HOST_BUS = (1 << 5), /* host bus error */
229 AC_ERR_SYSTEM = (1 << 6), /* system error */
230 AC_ERR_INVALID = (1 << 7), /* invalid argument */
231 AC_ERR_OTHER = (1 << 8), /* unknown */
232 };
233
234 /* forward declarations */
235 struct scsi_device;
236 struct ata_port_operations;
237 struct ata_port;
238 struct ata_queued_cmd;
239
240 /* typedefs */
241 typedef void (*ata_qc_cb_t) (struct ata_queued_cmd *qc);
242 typedef void (*ata_probeinit_fn_t)(struct ata_port *);
243 typedef int (*ata_reset_fn_t)(struct ata_port *, int, unsigned int *);
244 typedef void (*ata_postreset_fn_t)(struct ata_port *ap, unsigned int *);
245
246 struct ata_ioports {
247 unsigned long cmd_addr;
248 unsigned long data_addr;
249 unsigned long error_addr;
250 unsigned long feature_addr;
251 unsigned long nsect_addr;
252 unsigned long lbal_addr;
253 unsigned long lbam_addr;
254 unsigned long lbah_addr;
255 unsigned long device_addr;
256 unsigned long status_addr;
257 unsigned long command_addr;
258 unsigned long altstatus_addr;
259 unsigned long ctl_addr;
260 unsigned long bmdma_addr;
261 unsigned long scr_addr;
262 };
263
264 struct ata_probe_ent {
265 struct list_head node;
266 struct device *dev;
267 const struct ata_port_operations *port_ops;
268 struct scsi_host_template *sht;
269 struct ata_ioports port[ATA_MAX_PORTS];
270 unsigned int n_ports;
271 unsigned int hard_port_no;
272 unsigned int pio_mask;
273 unsigned int mwdma_mask;
274 unsigned int udma_mask;
275 unsigned int legacy_mode;
276 unsigned long irq;
277 unsigned int irq_flags;
278 unsigned long host_flags;
279 void __iomem *mmio_base;
280 void *private_data;
281 };
282
283 struct ata_host_set {
284 spinlock_t lock;
285 struct device *dev;
286 unsigned long irq;
287 void __iomem *mmio_base;
288 unsigned int n_ports;
289 void *private_data;
290 const struct ata_port_operations *ops;
291 struct ata_port * ports[0];
292 };
293
294 struct ata_queued_cmd {
295 struct ata_port *ap;
296 struct ata_device *dev;
297
298 struct scsi_cmnd *scsicmd;
299 void (*scsidone)(struct scsi_cmnd *);
300
301 struct ata_taskfile tf;
302 u8 cdb[ATAPI_CDB_LEN];
303
304 unsigned long flags; /* ATA_QCFLAG_xxx */
305 unsigned int tag;
306 unsigned int n_elem;
307 unsigned int orig_n_elem;
308
309 int dma_dir;
310
311 unsigned int pad_len;
312
313 unsigned int nsect;
314 unsigned int cursect;
315
316 unsigned int nbytes;
317 unsigned int curbytes;
318
319 unsigned int cursg;
320 unsigned int cursg_ofs;
321
322 struct scatterlist sgent;
323 struct scatterlist pad_sgent;
324 void *buf_virt;
325
326 /* DO NOT iterate over __sg manually, use ata_for_each_sg() */
327 struct scatterlist *__sg;
328
329 unsigned int err_mask;
330
331 ata_qc_cb_t complete_fn;
332
333 void *private_data;
334 };
335
336 struct ata_host_stats {
337 unsigned long unhandled_irq;
338 unsigned long idle_irq;
339 unsigned long rw_reqbuf;
340 };
341
342 struct ata_device {
343 u64 n_sectors; /* size of device, if ATA */
344 unsigned long flags; /* ATA_DFLAG_xxx */
345 unsigned int class; /* ATA_DEV_xxx */
346 unsigned int devno; /* 0 or 1 */
347 u16 id[ATA_ID_WORDS]; /* IDENTIFY xxx DEVICE data */
348 u8 pio_mode;
349 u8 dma_mode;
350 u8 xfer_mode;
351 unsigned int xfer_shift; /* ATA_SHIFT_xxx */
352
353 unsigned int multi_count; /* sectors count for
354 READ/WRITE MULTIPLE */
355 unsigned int max_sectors; /* per-device max sectors */
356 unsigned int cdb_len;
357
358 /* for CHS addressing */
359 u16 cylinders; /* Number of cylinders */
360 u16 heads; /* Number of heads */
361 u16 sectors; /* Number of sectors per track */
362 };
363
364 struct ata_port {
365 struct Scsi_Host *host; /* our co-allocated scsi host */
366 const struct ata_port_operations *ops;
367 unsigned long flags; /* ATA_FLAG_xxx */
368 unsigned int id; /* unique id req'd by scsi midlyr */
369 unsigned int port_no; /* unique port #; from zero */
370 unsigned int hard_port_no; /* hardware port #; from zero */
371
372 struct ata_prd *prd; /* our SG list */
373 dma_addr_t prd_dma; /* and its DMA mapping */
374
375 void *pad; /* array of DMA pad buffers */
376 dma_addr_t pad_dma;
377
378 struct ata_ioports ioaddr; /* ATA cmd/ctl/dma register blocks */
379
380 u8 ctl; /* cache of ATA control register */
381 u8 last_ctl; /* Cache last written value */
382 unsigned int pio_mask;
383 unsigned int mwdma_mask;
384 unsigned int udma_mask;
385 unsigned int cbl; /* cable type; ATA_CBL_xxx */
386
387 struct ata_device device[ATA_MAX_DEVICES];
388
389 struct ata_queued_cmd qcmd[ATA_MAX_QUEUE];
390 unsigned long qactive;
391 unsigned int active_tag;
392
393 struct ata_host_stats stats;
394 struct ata_host_set *host_set;
395
396 struct work_struct pio_task;
397 unsigned int hsm_task_state;
398 unsigned long pio_task_timeout;
399
400 u32 msg_enable;
401 struct list_head eh_done_q;
402
403 void *private_data;
404 };
405
406 struct ata_port_operations {
407 void (*port_disable) (struct ata_port *);
408
409 void (*dev_config) (struct ata_port *, struct ata_device *);
410
411 void (*set_piomode) (struct ata_port *, struct ata_device *);
412 void (*set_dmamode) (struct ata_port *, struct ata_device *);
413
414 void (*tf_load) (struct ata_port *ap, const struct ata_taskfile *tf);
415 void (*tf_read) (struct ata_port *ap, struct ata_taskfile *tf);
416
417 void (*exec_command)(struct ata_port *ap, const struct ata_taskfile *tf);
418 u8 (*check_status)(struct ata_port *ap);
419 u8 (*check_altstatus)(struct ata_port *ap);
420 void (*dev_select)(struct ata_port *ap, unsigned int device);
421
422 void (*phy_reset) (struct ata_port *ap); /* obsolete */
423 int (*probe_reset) (struct ata_port *ap, unsigned int *classes);
424
425 void (*post_set_mode) (struct ata_port *ap);
426
427 int (*check_atapi_dma) (struct ata_queued_cmd *qc);
428
429 void (*bmdma_setup) (struct ata_queued_cmd *qc);
430 void (*bmdma_start) (struct ata_queued_cmd *qc);
431
432 void (*qc_prep) (struct ata_queued_cmd *qc);
433 unsigned int (*qc_issue) (struct ata_queued_cmd *qc);
434
435 void (*eng_timeout) (struct ata_port *ap);
436
437 irqreturn_t (*irq_handler)(int, void *, struct pt_regs *);
438 void (*irq_clear) (struct ata_port *);
439
440 u32 (*scr_read) (struct ata_port *ap, unsigned int sc_reg);
441 void (*scr_write) (struct ata_port *ap, unsigned int sc_reg,
442 u32 val);
443
444 int (*port_start) (struct ata_port *ap);
445 void (*port_stop) (struct ata_port *ap);
446
447 void (*host_stop) (struct ata_host_set *host_set);
448
449 void (*bmdma_stop) (struct ata_queued_cmd *qc);
450 u8 (*bmdma_status) (struct ata_port *ap);
451 };
452
453 struct ata_port_info {
454 struct scsi_host_template *sht;
455 unsigned long host_flags;
456 unsigned long pio_mask;
457 unsigned long mwdma_mask;
458 unsigned long udma_mask;
459 const struct ata_port_operations *port_ops;
460 void *private_data;
461 };
462
463 struct ata_timing {
464 unsigned short mode; /* ATA mode */
465 unsigned short setup; /* t1 */
466 unsigned short act8b; /* t2 for 8-bit I/O */
467 unsigned short rec8b; /* t2i for 8-bit I/O */
468 unsigned short cyc8b; /* t0 for 8-bit I/O */
469 unsigned short active; /* t2 or tD */
470 unsigned short recover; /* t2i or tK */
471 unsigned short cycle; /* t0 */
472 unsigned short udma; /* t2CYCTYP/2 */
473 };
474
475 #define FIT(v,vmin,vmax) max_t(short,min_t(short,v,vmax),vmin)
476
477 extern void ata_port_probe(struct ata_port *);
478 extern void __sata_phy_reset(struct ata_port *ap);
479 extern void sata_phy_reset(struct ata_port *ap);
480 extern void ata_bus_reset(struct ata_port *ap);
481 extern int ata_drive_probe_reset(struct ata_port *ap,
482 ata_probeinit_fn_t probeinit,
483 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
484 ata_postreset_fn_t postreset, unsigned int *classes);
485 extern void ata_std_probeinit(struct ata_port *ap);
486 extern int ata_std_softreset(struct ata_port *ap, int verbose,
487 unsigned int *classes);
488 extern int sata_std_hardreset(struct ata_port *ap, int verbose,
489 unsigned int *class);
490 extern void ata_std_postreset(struct ata_port *ap, unsigned int *classes);
491 extern void ata_port_disable(struct ata_port *);
492 extern void ata_std_ports(struct ata_ioports *ioaddr);
493 #ifdef CONFIG_PCI
494 extern int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
495 unsigned int n_ports);
496 extern void ata_pci_remove_one (struct pci_dev *pdev);
497 extern int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t state);
498 extern int ata_pci_device_resume(struct pci_dev *pdev);
499 #endif /* CONFIG_PCI */
500 extern int ata_device_add(const struct ata_probe_ent *ent);
501 extern void ata_host_set_remove(struct ata_host_set *host_set);
502 extern int ata_scsi_detect(struct scsi_host_template *sht);
503 extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
504 extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *));
505 extern enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd);
506 extern int ata_scsi_error(struct Scsi_Host *host);
507 extern void ata_eh_qc_complete(struct ata_queued_cmd *qc);
508 extern void ata_eh_qc_retry(struct ata_queued_cmd *qc);
509 extern int ata_scsi_release(struct Scsi_Host *host);
510 extern unsigned int ata_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc);
511 extern int ata_scsi_device_resume(struct scsi_device *);
512 extern int ata_scsi_device_suspend(struct scsi_device *);
513 extern int ata_device_resume(struct ata_port *, struct ata_device *);
514 extern int ata_device_suspend(struct ata_port *, struct ata_device *);
515 extern int ata_ratelimit(void);
516 extern unsigned int ata_busy_sleep(struct ata_port *ap,
517 unsigned long timeout_pat,
518 unsigned long timeout);
519
520 /*
521 * Default driver ops implementations
522 */
523 extern void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf);
524 extern void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
525 extern void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp);
526 extern void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf);
527 extern void ata_noop_dev_select (struct ata_port *ap, unsigned int device);
528 extern void ata_std_dev_select (struct ata_port *ap, unsigned int device);
529 extern u8 ata_check_status(struct ata_port *ap);
530 extern u8 ata_altstatus(struct ata_port *ap);
531 extern void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf);
532 extern int ata_std_probe_reset(struct ata_port *ap, unsigned int *classes);
533 extern int ata_port_start (struct ata_port *ap);
534 extern void ata_port_stop (struct ata_port *ap);
535 extern void ata_host_stop (struct ata_host_set *host_set);
536 extern irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs);
537 extern void ata_qc_prep(struct ata_queued_cmd *qc);
538 extern unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc);
539 extern void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf,
540 unsigned int buflen);
541 extern void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
542 unsigned int n_elem);
543 extern unsigned int ata_dev_classify(const struct ata_taskfile *tf);
544 extern void ata_id_string(const u16 *id, unsigned char *s,
545 unsigned int ofs, unsigned int len);
546 extern void ata_id_c_string(const u16 *id, unsigned char *s,
547 unsigned int ofs, unsigned int len);
548 extern void ata_dev_config(struct ata_port *ap, unsigned int i);
549 extern void ata_bmdma_setup (struct ata_queued_cmd *qc);
550 extern void ata_bmdma_start (struct ata_queued_cmd *qc);
551 extern void ata_bmdma_stop(struct ata_queued_cmd *qc);
552 extern u8 ata_bmdma_status(struct ata_port *ap);
553 extern void ata_bmdma_irq_clear(struct ata_port *ap);
554 extern void __ata_qc_complete(struct ata_queued_cmd *qc);
555 extern void ata_eng_timeout(struct ata_port *ap);
556 extern void ata_scsi_simulate(struct ata_port *ap, struct ata_device *dev,
557 struct scsi_cmnd *cmd,
558 void (*done)(struct scsi_cmnd *));
559 extern int ata_std_bios_param(struct scsi_device *sdev,
560 struct block_device *bdev,
561 sector_t capacity, int geom[]);
562 extern int ata_scsi_slave_config(struct scsi_device *sdev);
563
564 /*
565 * Timing helpers
566 */
567
568 extern unsigned int ata_pio_need_iordy(const struct ata_device *);
569 extern int ata_timing_compute(struct ata_device *, unsigned short,
570 struct ata_timing *, int, int);
571 extern void ata_timing_merge(const struct ata_timing *,
572 const struct ata_timing *, struct ata_timing *,
573 unsigned int);
574
575 enum {
576 ATA_TIMING_SETUP = (1 << 0),
577 ATA_TIMING_ACT8B = (1 << 1),
578 ATA_TIMING_REC8B = (1 << 2),
579 ATA_TIMING_CYC8B = (1 << 3),
580 ATA_TIMING_8BIT = ATA_TIMING_ACT8B | ATA_TIMING_REC8B |
581 ATA_TIMING_CYC8B,
582 ATA_TIMING_ACTIVE = (1 << 4),
583 ATA_TIMING_RECOVER = (1 << 5),
584 ATA_TIMING_CYCLE = (1 << 6),
585 ATA_TIMING_UDMA = (1 << 7),
586 ATA_TIMING_ALL = ATA_TIMING_SETUP | ATA_TIMING_ACT8B |
587 ATA_TIMING_REC8B | ATA_TIMING_CYC8B |
588 ATA_TIMING_ACTIVE | ATA_TIMING_RECOVER |
589 ATA_TIMING_CYCLE | ATA_TIMING_UDMA,
590 };
591
592
593 #ifdef CONFIG_PCI
594 struct pci_bits {
595 unsigned int reg; /* PCI config register to read */
596 unsigned int width; /* 1 (8 bit), 2 (16 bit), 4 (32 bit) */
597 unsigned long mask;
598 unsigned long val;
599 };
600
601 extern void ata_pci_host_stop (struct ata_host_set *host_set);
602 extern struct ata_probe_ent *
603 ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int portmask);
604 extern int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits);
605
606 #endif /* CONFIG_PCI */
607
608
609 static inline int
610 ata_sg_is_last(struct scatterlist *sg, struct ata_queued_cmd *qc)
611 {
612 if (sg == &qc->pad_sgent)
613 return 1;
614 if (qc->pad_len)
615 return 0;
616 if (((sg - qc->__sg) + 1) == qc->n_elem)
617 return 1;
618 return 0;
619 }
620
621 static inline struct scatterlist *
622 ata_qc_next_sg(struct scatterlist *sg, struct ata_queued_cmd *qc)
623 {
624 if (sg == &qc->pad_sgent)
625 return NULL;
626 if (++sg - qc->__sg < qc->n_elem)
627 return sg;
628 return qc->pad_len ? &qc->pad_sgent : NULL;
629 }
630
631 #define ata_for_each_sg(sg, qc) \
632 for (sg = qc->__sg; sg; sg = ata_qc_next_sg(sg, qc))
633
634 static inline unsigned int ata_tag_valid(unsigned int tag)
635 {
636 return (tag < ATA_MAX_QUEUE) ? 1 : 0;
637 }
638
639 static inline unsigned int ata_dev_present(const struct ata_device *dev)
640 {
641 return ((dev->class == ATA_DEV_ATA) ||
642 (dev->class == ATA_DEV_ATAPI));
643 }
644
645 static inline u8 ata_chk_status(struct ata_port *ap)
646 {
647 return ap->ops->check_status(ap);
648 }
649
650
651 /**
652 * ata_pause - Flush writes and pause 400 nanoseconds.
653 * @ap: Port to wait for.
654 *
655 * LOCKING:
656 * Inherited from caller.
657 */
658
659 static inline void ata_pause(struct ata_port *ap)
660 {
661 ata_altstatus(ap);
662 ndelay(400);
663 }
664
665
666 /**
667 * ata_busy_wait - Wait for a port status register
668 * @ap: Port to wait for.
669 *
670 * Waits up to max*10 microseconds for the selected bits in the port's
671 * status register to be cleared.
672 * Returns final value of status register.
673 *
674 * LOCKING:
675 * Inherited from caller.
676 */
677
678 static inline u8 ata_busy_wait(struct ata_port *ap, unsigned int bits,
679 unsigned int max)
680 {
681 u8 status;
682
683 do {
684 udelay(10);
685 status = ata_chk_status(ap);
686 max--;
687 } while ((status & bits) && (max > 0));
688
689 return status;
690 }
691
692
693 /**
694 * ata_wait_idle - Wait for a port to be idle.
695 * @ap: Port to wait for.
696 *
697 * Waits up to 10ms for port's BUSY and DRQ signals to clear.
698 * Returns final value of status register.
699 *
700 * LOCKING:
701 * Inherited from caller.
702 */
703
704 static inline u8 ata_wait_idle(struct ata_port *ap)
705 {
706 u8 status = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
707
708 if (status & (ATA_BUSY | ATA_DRQ)) {
709 unsigned long l = ap->ioaddr.status_addr;
710 if (ata_msg_warn(ap))
711 printk(KERN_WARNING "ATA: abnormal status 0x%X on port 0x%lX\n",
712 status, l);
713 }
714
715 return status;
716 }
717
718 static inline void ata_qc_set_polling(struct ata_queued_cmd *qc)
719 {
720 qc->tf.ctl |= ATA_NIEN;
721 }
722
723 static inline struct ata_queued_cmd *ata_qc_from_tag (struct ata_port *ap,
724 unsigned int tag)
725 {
726 if (likely(ata_tag_valid(tag)))
727 return &ap->qcmd[tag];
728 return NULL;
729 }
730
731 static inline void ata_tf_init(struct ata_port *ap, struct ata_taskfile *tf, unsigned int device)
732 {
733 memset(tf, 0, sizeof(*tf));
734
735 tf->ctl = ap->ctl;
736 if (device == 0)
737 tf->device = ATA_DEVICE_OBS;
738 else
739 tf->device = ATA_DEVICE_OBS | ATA_DEV1;
740 }
741
742 static inline void ata_qc_reinit(struct ata_queued_cmd *qc)
743 {
744 qc->__sg = NULL;
745 qc->flags = 0;
746 qc->cursect = qc->cursg = qc->cursg_ofs = 0;
747 qc->nsect = 0;
748 qc->nbytes = qc->curbytes = 0;
749 qc->err_mask = 0;
750
751 ata_tf_init(qc->ap, &qc->tf, qc->dev->devno);
752 }
753
754 /**
755 * ata_qc_complete - Complete an active ATA command
756 * @qc: Command to complete
757 * @err_mask: ATA Status register contents
758 *
759 * Indicate to the mid and upper layers that an ATA
760 * command has completed, with either an ok or not-ok status.
761 *
762 * LOCKING:
763 * spin_lock_irqsave(host_set lock)
764 */
765 static inline void ata_qc_complete(struct ata_queued_cmd *qc)
766 {
767 if (unlikely(qc->flags & ATA_QCFLAG_EH_SCHEDULED))
768 return;
769
770 __ata_qc_complete(qc);
771 }
772
773 /**
774 * ata_irq_on - Enable interrupts on a port.
775 * @ap: Port on which interrupts are enabled.
776 *
777 * Enable interrupts on a legacy IDE device using MMIO or PIO,
778 * wait for idle, clear any pending interrupts.
779 *
780 * LOCKING:
781 * Inherited from caller.
782 */
783
784 static inline u8 ata_irq_on(struct ata_port *ap)
785 {
786 struct ata_ioports *ioaddr = &ap->ioaddr;
787 u8 tmp;
788
789 ap->ctl &= ~ATA_NIEN;
790 ap->last_ctl = ap->ctl;
791
792 if (ap->flags & ATA_FLAG_MMIO)
793 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
794 else
795 outb(ap->ctl, ioaddr->ctl_addr);
796 tmp = ata_wait_idle(ap);
797
798 ap->ops->irq_clear(ap);
799
800 return tmp;
801 }
802
803
804 /**
805 * ata_irq_ack - Acknowledge a device interrupt.
806 * @ap: Port on which interrupts are enabled.
807 *
808 * Wait up to 10 ms for legacy IDE device to become idle (BUSY
809 * or BUSY+DRQ clear). Obtain dma status and port status from
810 * device. Clear the interrupt. Return port status.
811 *
812 * LOCKING:
813 */
814
815 static inline u8 ata_irq_ack(struct ata_port *ap, unsigned int chk_drq)
816 {
817 unsigned int bits = chk_drq ? ATA_BUSY | ATA_DRQ : ATA_BUSY;
818 u8 host_stat, post_stat, status;
819
820 status = ata_busy_wait(ap, bits, 1000);
821 if (status & bits)
822 if (ata_msg_err(ap))
823 printk(KERN_ERR "abnormal status 0x%X\n", status);
824
825 /* get controller status; clear intr, err bits */
826 if (ap->flags & ATA_FLAG_MMIO) {
827 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
828 host_stat = readb(mmio + ATA_DMA_STATUS);
829 writeb(host_stat | ATA_DMA_INTR | ATA_DMA_ERR,
830 mmio + ATA_DMA_STATUS);
831
832 post_stat = readb(mmio + ATA_DMA_STATUS);
833 } else {
834 host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
835 outb(host_stat | ATA_DMA_INTR | ATA_DMA_ERR,
836 ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
837
838 post_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
839 }
840
841 if (ata_msg_intr(ap))
842 printk(KERN_INFO "%s: irq ack: host_stat 0x%X, new host_stat 0x%X, drv_stat 0x%X\n",
843 __FUNCTION__,
844 host_stat, post_stat, status);
845
846 return status;
847 }
848
849 static inline u32 scr_read(struct ata_port *ap, unsigned int reg)
850 {
851 return ap->ops->scr_read(ap, reg);
852 }
853
854 static inline void scr_write(struct ata_port *ap, unsigned int reg, u32 val)
855 {
856 ap->ops->scr_write(ap, reg, val);
857 }
858
859 static inline void scr_write_flush(struct ata_port *ap, unsigned int reg,
860 u32 val)
861 {
862 ap->ops->scr_write(ap, reg, val);
863 (void) ap->ops->scr_read(ap, reg);
864 }
865
866 static inline unsigned int sata_dev_present(struct ata_port *ap)
867 {
868 return ((scr_read(ap, SCR_STATUS) & 0xf) == 0x3) ? 1 : 0;
869 }
870
871 static inline int ata_try_flush_cache(const struct ata_device *dev)
872 {
873 return ata_id_wcache_enabled(dev->id) ||
874 ata_id_has_flush(dev->id) ||
875 ata_id_has_flush_ext(dev->id);
876 }
877
878 static inline unsigned int ac_err_mask(u8 status)
879 {
880 if (status & ATA_BUSY)
881 return AC_ERR_HSM;
882 if (status & (ATA_ERR | ATA_DF))
883 return AC_ERR_DEV;
884 return 0;
885 }
886
887 static inline unsigned int __ac_err_mask(u8 status)
888 {
889 unsigned int mask = ac_err_mask(status);
890 if (mask == 0)
891 return AC_ERR_OTHER;
892 return mask;
893 }
894
895 static inline int ata_pad_alloc(struct ata_port *ap, struct device *dev)
896 {
897 ap->pad_dma = 0;
898 ap->pad = dma_alloc_coherent(dev, ATA_DMA_PAD_BUF_SZ,
899 &ap->pad_dma, GFP_KERNEL);
900 return (ap->pad == NULL) ? -ENOMEM : 0;
901 }
902
903 static inline void ata_pad_free(struct ata_port *ap, struct device *dev)
904 {
905 dma_free_coherent(dev, ATA_DMA_PAD_BUF_SZ, ap->pad, ap->pad_dma);
906 }
907
908 #endif /* __LINUX_LIBATA_H__ */
This page took 0.051312 seconds and 5 git commands to generate.