[PATCH] libata: separate out __ata_ehi_hotplugged()
[deliverable/linux.git] / include / linux / libata.h
1 /*
2 * Copyright 2003-2005 Red Hat, Inc. All rights reserved.
3 * Copyright 2003-2005 Jeff Garzik
4 *
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2, or (at your option)
9 * any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; see the file COPYING. If not, write to
18 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
19 *
20 *
21 * libata documentation is available via 'make {ps|pdf}docs',
22 * as Documentation/DocBook/libata.*
23 *
24 */
25
26 #ifndef __LINUX_LIBATA_H__
27 #define __LINUX_LIBATA_H__
28
29 #include <linux/delay.h>
30 #include <linux/interrupt.h>
31 #include <linux/pci.h>
32 #include <linux/dma-mapping.h>
33 #include <asm/scatterlist.h>
34 #include <asm/io.h>
35 #include <linux/ata.h>
36 #include <linux/workqueue.h>
37 #include <scsi/scsi_host.h>
38
39 /*
40 * compile-time options: to be removed as soon as all the drivers are
41 * converted to the new debugging mechanism
42 */
43 #undef ATA_DEBUG /* debugging output */
44 #undef ATA_VERBOSE_DEBUG /* yet more debugging output */
45 #undef ATA_IRQ_TRAP /* define to ack screaming irqs */
46 #undef ATA_NDEBUG /* define to disable quick runtime checks */
47 #undef ATA_ENABLE_PATA /* define to enable PATA support in some
48 * low-level drivers */
49
50
51 /* note: prints function name for you */
52 #ifdef ATA_DEBUG
53 #define DPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args)
54 #ifdef ATA_VERBOSE_DEBUG
55 #define VPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args)
56 #else
57 #define VPRINTK(fmt, args...)
58 #endif /* ATA_VERBOSE_DEBUG */
59 #else
60 #define DPRINTK(fmt, args...)
61 #define VPRINTK(fmt, args...)
62 #endif /* ATA_DEBUG */
63
64 #define BPRINTK(fmt, args...) if (ap->flags & ATA_FLAG_DEBUGMSG) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args)
65
66 /* NEW: debug levels */
67 #define HAVE_LIBATA_MSG 1
68
69 enum {
70 ATA_MSG_DRV = 0x0001,
71 ATA_MSG_INFO = 0x0002,
72 ATA_MSG_PROBE = 0x0004,
73 ATA_MSG_WARN = 0x0008,
74 ATA_MSG_MALLOC = 0x0010,
75 ATA_MSG_CTL = 0x0020,
76 ATA_MSG_INTR = 0x0040,
77 ATA_MSG_ERR = 0x0080,
78 };
79
80 #define ata_msg_drv(p) ((p)->msg_enable & ATA_MSG_DRV)
81 #define ata_msg_info(p) ((p)->msg_enable & ATA_MSG_INFO)
82 #define ata_msg_probe(p) ((p)->msg_enable & ATA_MSG_PROBE)
83 #define ata_msg_warn(p) ((p)->msg_enable & ATA_MSG_WARN)
84 #define ata_msg_malloc(p) ((p)->msg_enable & ATA_MSG_MALLOC)
85 #define ata_msg_ctl(p) ((p)->msg_enable & ATA_MSG_CTL)
86 #define ata_msg_intr(p) ((p)->msg_enable & ATA_MSG_INTR)
87 #define ata_msg_err(p) ((p)->msg_enable & ATA_MSG_ERR)
88
89 static inline u32 ata_msg_init(int dval, int default_msg_enable_bits)
90 {
91 if (dval < 0 || dval >= (sizeof(u32) * 8))
92 return default_msg_enable_bits; /* should be 0x1 - only driver info msgs */
93 if (!dval)
94 return 0;
95 return (1 << dval) - 1;
96 }
97
98 /* defines only for the constants which don't work well as enums */
99 #define ATA_TAG_POISON 0xfafbfcfdU
100
101 /* move to PCI layer? */
102 static inline struct device *pci_dev_to_dev(struct pci_dev *pdev)
103 {
104 return &pdev->dev;
105 }
106
107 enum {
108 /* various global constants */
109 LIBATA_MAX_PRD = ATA_MAX_PRD / 2,
110 ATA_MAX_PORTS = 8,
111 ATA_DEF_QUEUE = 1,
112 /* tag ATA_MAX_QUEUE - 1 is reserved for internal commands */
113 ATA_MAX_QUEUE = 32,
114 ATA_TAG_INTERNAL = ATA_MAX_QUEUE - 1,
115 ATA_MAX_SECTORS = 200, /* FIXME */
116 ATA_MAX_SECTORS_LBA48 = 65535,
117 ATA_MAX_BUS = 2,
118 ATA_DEF_BUSY_WAIT = 10000,
119 ATA_SHORT_PAUSE = (HZ >> 6) + 1,
120
121 ATA_SHT_EMULATED = 1,
122 ATA_SHT_CMD_PER_LUN = 1,
123 ATA_SHT_THIS_ID = -1,
124 ATA_SHT_USE_CLUSTERING = 1,
125
126 /* struct ata_device stuff */
127 ATA_DFLAG_LBA = (1 << 0), /* device supports LBA */
128 ATA_DFLAG_LBA48 = (1 << 1), /* device supports LBA48 */
129 ATA_DFLAG_CDB_INTR = (1 << 2), /* device asserts INTRQ when ready for CDB */
130 ATA_DFLAG_NCQ = (1 << 3), /* device supports NCQ */
131 ATA_DFLAG_CFG_MASK = (1 << 8) - 1,
132
133 ATA_DFLAG_PIO = (1 << 8), /* device currently in PIO mode */
134 ATA_DFLAG_INIT_MASK = (1 << 16) - 1,
135
136 ATA_DFLAG_DETACH = (1 << 16),
137 ATA_DFLAG_DETACHED = (1 << 17),
138
139 ATA_DEV_UNKNOWN = 0, /* unknown device */
140 ATA_DEV_ATA = 1, /* ATA device */
141 ATA_DEV_ATA_UNSUP = 2, /* ATA device (unsupported) */
142 ATA_DEV_ATAPI = 3, /* ATAPI device */
143 ATA_DEV_ATAPI_UNSUP = 4, /* ATAPI device (unsupported) */
144 ATA_DEV_NONE = 5, /* no device */
145
146 /* struct ata_port flags */
147 ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */
148 /* (doesn't imply presence) */
149 ATA_FLAG_SATA = (1 << 1),
150 ATA_FLAG_NO_LEGACY = (1 << 2), /* no legacy mode check */
151 ATA_FLAG_MMIO = (1 << 3), /* use MMIO, not PIO */
152 ATA_FLAG_SRST = (1 << 4), /* (obsolete) use ATA SRST, not E.D.D. */
153 ATA_FLAG_SATA_RESET = (1 << 5), /* (obsolete) use COMRESET */
154 ATA_FLAG_NO_ATAPI = (1 << 6), /* No ATAPI support */
155 ATA_FLAG_PIO_DMA = (1 << 7), /* PIO cmds via DMA */
156 ATA_FLAG_PIO_LBA48 = (1 << 8), /* Host DMA engine is LBA28 only */
157 ATA_FLAG_PIO_POLLING = (1 << 9), /* use polling PIO if LLD
158 * doesn't handle PIO interrupts */
159 ATA_FLAG_NCQ = (1 << 10), /* host supports NCQ */
160 ATA_FLAG_HRST_TO_RESUME = (1 << 11), /* hardreset to resume phy */
161 ATA_FLAG_SKIP_D2H_BSY = (1 << 12), /* can't wait for the first D2H
162 * Register FIS clearing BSY */
163 ATA_FLAG_DEBUGMSG = (1 << 13),
164
165 /* The following flag belongs to ap->pflags but is kept in
166 * ap->flags because it's referenced in many LLDs and will be
167 * removed in not-too-distant future.
168 */
169 ATA_FLAG_DISABLED = (1 << 23), /* port is disabled, ignore it */
170
171 /* bits 24:31 of ap->flags are reserved for LLD specific flags */
172
173 /* struct ata_port pflags */
174 ATA_PFLAG_EH_PENDING = (1 << 0), /* EH pending */
175 ATA_PFLAG_EH_IN_PROGRESS = (1 << 1), /* EH in progress */
176 ATA_PFLAG_FROZEN = (1 << 2), /* port is frozen */
177 ATA_PFLAG_RECOVERED = (1 << 3), /* recovery action performed */
178 ATA_PFLAG_LOADING = (1 << 4), /* boot/loading probe */
179 ATA_PFLAG_UNLOADING = (1 << 5), /* module is unloading */
180 ATA_PFLAG_SCSI_HOTPLUG = (1 << 6), /* SCSI hotplug scheduled */
181
182 ATA_PFLAG_FLUSH_PORT_TASK = (1 << 16), /* flush port task */
183 ATA_PFLAG_SUSPENDED = (1 << 17), /* port is suspended (power) */
184
185 /* struct ata_queued_cmd flags */
186 ATA_QCFLAG_ACTIVE = (1 << 0), /* cmd not yet ack'd to scsi lyer */
187 ATA_QCFLAG_SG = (1 << 1), /* have s/g table? */
188 ATA_QCFLAG_SINGLE = (1 << 2), /* no s/g, just a single buffer */
189 ATA_QCFLAG_DMAMAP = ATA_QCFLAG_SG | ATA_QCFLAG_SINGLE,
190 ATA_QCFLAG_IO = (1 << 3), /* standard IO command */
191 ATA_QCFLAG_RESULT_TF = (1 << 4), /* result TF requested */
192
193 ATA_QCFLAG_FAILED = (1 << 16), /* cmd failed and is owned by EH */
194 ATA_QCFLAG_SENSE_VALID = (1 << 17), /* sense data valid */
195 ATA_QCFLAG_EH_SCHEDULED = (1 << 18), /* EH scheduled (obsolete) */
196
197 /* host set flags */
198 ATA_HOST_SIMPLEX = (1 << 0), /* Host is simplex, one DMA channel per host_set only */
199
200 /* various lengths of time */
201 ATA_TMOUT_BOOT = 30 * HZ, /* heuristic */
202 ATA_TMOUT_BOOT_QUICK = 7 * HZ, /* heuristic */
203 ATA_TMOUT_INTERNAL = 30 * HZ,
204 ATA_TMOUT_INTERNAL_QUICK = 5 * HZ,
205
206 /* ATA bus states */
207 BUS_UNKNOWN = 0,
208 BUS_DMA = 1,
209 BUS_IDLE = 2,
210 BUS_NOINTR = 3,
211 BUS_NODATA = 4,
212 BUS_TIMER = 5,
213 BUS_PIO = 6,
214 BUS_EDD = 7,
215 BUS_IDENTIFY = 8,
216 BUS_PACKET = 9,
217
218 /* SATA port states */
219 PORT_UNKNOWN = 0,
220 PORT_ENABLED = 1,
221 PORT_DISABLED = 2,
222
223 /* encoding various smaller bitmaps into a single
224 * unsigned int bitmap
225 */
226 ATA_BITS_PIO = 5,
227 ATA_BITS_MWDMA = 3,
228 ATA_BITS_UDMA = 8,
229
230 ATA_SHIFT_PIO = 0,
231 ATA_SHIFT_MWDMA = ATA_SHIFT_PIO + ATA_BITS_PIO,
232 ATA_SHIFT_UDMA = ATA_SHIFT_MWDMA + ATA_BITS_MWDMA,
233
234 ATA_MASK_PIO = ((1 << ATA_BITS_PIO) - 1) << ATA_SHIFT_PIO,
235 ATA_MASK_MWDMA = ((1 << ATA_BITS_MWDMA) - 1) << ATA_SHIFT_MWDMA,
236 ATA_MASK_UDMA = ((1 << ATA_BITS_UDMA) - 1) << ATA_SHIFT_UDMA,
237
238 /* size of buffer to pad xfers ending on unaligned boundaries */
239 ATA_DMA_PAD_SZ = 4,
240 ATA_DMA_PAD_BUF_SZ = ATA_DMA_PAD_SZ * ATA_MAX_QUEUE,
241
242 /* masks for port functions */
243 ATA_PORT_PRIMARY = (1 << 0),
244 ATA_PORT_SECONDARY = (1 << 1),
245
246 /* ering size */
247 ATA_ERING_SIZE = 32,
248
249 /* desc_len for ata_eh_info and context */
250 ATA_EH_DESC_LEN = 80,
251
252 /* reset / recovery action types */
253 ATA_EH_REVALIDATE = (1 << 0),
254 ATA_EH_SOFTRESET = (1 << 1),
255 ATA_EH_HARDRESET = (1 << 2),
256
257 ATA_EH_RESET_MASK = ATA_EH_SOFTRESET | ATA_EH_HARDRESET,
258 ATA_EH_PERDEV_MASK = ATA_EH_REVALIDATE,
259
260 /* ata_eh_info->flags */
261 ATA_EHI_HOTPLUGGED = (1 << 0), /* could have been hotplugged */
262 ATA_EHI_RESUME_LINK = (1 << 1), /* need to resume link */
263 ATA_EHI_NO_AUTOPSY = (1 << 2), /* no autopsy */
264 ATA_EHI_QUIET = (1 << 3), /* be quiet */
265
266 ATA_EHI_DID_RESET = (1 << 16), /* already reset this port */
267
268 /* max repeat if error condition is still set after ->error_handler */
269 ATA_EH_MAX_REPEAT = 5,
270
271 /* how hard are we gonna try to probe/recover devices */
272 ATA_PROBE_MAX_TRIES = 3,
273 ATA_EH_RESET_TRIES = 3,
274 ATA_EH_DEV_TRIES = 3,
275
276 /* Drive spinup time (time from power-on to the first D2H FIS)
277 * in msecs - 8s currently. Failing to get ready in this time
278 * isn't critical. It will result in reset failure for
279 * controllers which can't wait for the first D2H FIS. libata
280 * will retry, so it just has to be long enough to spin up
281 * most devices.
282 */
283 ATA_SPINUP_WAIT = 8000,
284 };
285
286 enum hsm_task_states {
287 HSM_ST_UNKNOWN, /* state unknown */
288 HSM_ST_IDLE, /* no command on going */
289 HSM_ST, /* (waiting the device to) transfer data */
290 HSM_ST_LAST, /* (waiting the device to) complete command */
291 HSM_ST_ERR, /* error */
292 HSM_ST_FIRST, /* (waiting the device to)
293 write CDB or first data block */
294 };
295
296 enum ata_completion_errors {
297 AC_ERR_DEV = (1 << 0), /* device reported error */
298 AC_ERR_HSM = (1 << 1), /* host state machine violation */
299 AC_ERR_TIMEOUT = (1 << 2), /* timeout */
300 AC_ERR_MEDIA = (1 << 3), /* media error */
301 AC_ERR_ATA_BUS = (1 << 4), /* ATA bus error */
302 AC_ERR_HOST_BUS = (1 << 5), /* host bus error */
303 AC_ERR_SYSTEM = (1 << 6), /* system error */
304 AC_ERR_INVALID = (1 << 7), /* invalid argument */
305 AC_ERR_OTHER = (1 << 8), /* unknown */
306 };
307
308 /* forward declarations */
309 struct scsi_device;
310 struct ata_port_operations;
311 struct ata_port;
312 struct ata_queued_cmd;
313
314 /* typedefs */
315 typedef void (*ata_qc_cb_t) (struct ata_queued_cmd *qc);
316 typedef int (*ata_prereset_fn_t)(struct ata_port *ap);
317 typedef int (*ata_reset_fn_t)(struct ata_port *ap, unsigned int *classes);
318 typedef void (*ata_postreset_fn_t)(struct ata_port *ap, unsigned int *classes);
319
320 struct ata_ioports {
321 unsigned long cmd_addr;
322 unsigned long data_addr;
323 unsigned long error_addr;
324 unsigned long feature_addr;
325 unsigned long nsect_addr;
326 unsigned long lbal_addr;
327 unsigned long lbam_addr;
328 unsigned long lbah_addr;
329 unsigned long device_addr;
330 unsigned long status_addr;
331 unsigned long command_addr;
332 unsigned long altstatus_addr;
333 unsigned long ctl_addr;
334 unsigned long bmdma_addr;
335 unsigned long scr_addr;
336 };
337
338 struct ata_probe_ent {
339 struct list_head node;
340 struct device *dev;
341 const struct ata_port_operations *port_ops;
342 struct scsi_host_template *sht;
343 struct ata_ioports port[ATA_MAX_PORTS];
344 unsigned int n_ports;
345 unsigned int hard_port_no;
346 unsigned int pio_mask;
347 unsigned int mwdma_mask;
348 unsigned int udma_mask;
349 unsigned int legacy_mode;
350 unsigned long irq;
351 unsigned int irq_flags;
352 unsigned long host_flags;
353 unsigned long host_set_flags;
354 void __iomem *mmio_base;
355 void *private_data;
356 };
357
358 struct ata_host_set {
359 spinlock_t lock;
360 struct device *dev;
361 unsigned long irq;
362 void __iomem *mmio_base;
363 unsigned int n_ports;
364 void *private_data;
365 const struct ata_port_operations *ops;
366 unsigned long flags;
367 int simplex_claimed; /* Keep seperate in case we
368 ever need to do this locked */
369 struct ata_host_set *next; /* for legacy mode */
370 struct ata_port *ports[0];
371 };
372
373 struct ata_queued_cmd {
374 struct ata_port *ap;
375 struct ata_device *dev;
376
377 struct scsi_cmnd *scsicmd;
378 void (*scsidone)(struct scsi_cmnd *);
379
380 struct ata_taskfile tf;
381 u8 cdb[ATAPI_CDB_LEN];
382
383 unsigned long flags; /* ATA_QCFLAG_xxx */
384 unsigned int tag;
385 unsigned int n_elem;
386 unsigned int orig_n_elem;
387
388 int dma_dir;
389
390 unsigned int pad_len;
391
392 unsigned int nsect;
393 unsigned int cursect;
394
395 unsigned int nbytes;
396 unsigned int curbytes;
397
398 unsigned int cursg;
399 unsigned int cursg_ofs;
400
401 struct scatterlist sgent;
402 struct scatterlist pad_sgent;
403 void *buf_virt;
404
405 /* DO NOT iterate over __sg manually, use ata_for_each_sg() */
406 struct scatterlist *__sg;
407
408 unsigned int err_mask;
409 struct ata_taskfile result_tf;
410 ata_qc_cb_t complete_fn;
411
412 void *private_data;
413 };
414
415 struct ata_host_stats {
416 unsigned long unhandled_irq;
417 unsigned long idle_irq;
418 unsigned long rw_reqbuf;
419 };
420
421 struct ata_ering_entry {
422 int is_io;
423 unsigned int err_mask;
424 u64 timestamp;
425 };
426
427 struct ata_ering {
428 int cursor;
429 struct ata_ering_entry ring[ATA_ERING_SIZE];
430 };
431
432 struct ata_device {
433 struct ata_port *ap;
434 unsigned int devno; /* 0 or 1 */
435 unsigned long flags; /* ATA_DFLAG_xxx */
436 struct scsi_device *sdev; /* attached SCSI device */
437 /* n_sector is used as CLEAR_OFFSET, read comment above CLEAR_OFFSET */
438 u64 n_sectors; /* size of device, if ATA */
439 unsigned int class; /* ATA_DEV_xxx */
440 u16 id[ATA_ID_WORDS]; /* IDENTIFY xxx DEVICE data */
441 u8 pio_mode;
442 u8 dma_mode;
443 u8 xfer_mode;
444 unsigned int xfer_shift; /* ATA_SHIFT_xxx */
445
446 unsigned int multi_count; /* sectors count for
447 READ/WRITE MULTIPLE */
448 unsigned int max_sectors; /* per-device max sectors */
449 unsigned int cdb_len;
450
451 /* per-dev xfer mask */
452 unsigned int pio_mask;
453 unsigned int mwdma_mask;
454 unsigned int udma_mask;
455
456 /* for CHS addressing */
457 u16 cylinders; /* Number of cylinders */
458 u16 heads; /* Number of heads */
459 u16 sectors; /* Number of sectors per track */
460
461 /* error history */
462 struct ata_ering ering;
463 };
464
465 /* Offset into struct ata_device. Fields above it are maintained
466 * acress device init. Fields below are zeroed.
467 */
468 #define ATA_DEVICE_CLEAR_OFFSET offsetof(struct ata_device, n_sectors)
469
470 struct ata_eh_info {
471 struct ata_device *dev; /* offending device */
472 u32 serror; /* SError from LLDD */
473 unsigned int err_mask; /* port-wide err_mask */
474 unsigned int action; /* ATA_EH_* action mask */
475 unsigned int dev_action[ATA_MAX_DEVICES]; /* dev EH action */
476 unsigned int flags; /* ATA_EHI_* flags */
477
478 unsigned long hotplug_timestamp;
479 unsigned int probe_mask;
480
481 char desc[ATA_EH_DESC_LEN];
482 int desc_len;
483 };
484
485 struct ata_eh_context {
486 struct ata_eh_info i;
487 int tries[ATA_MAX_DEVICES];
488 unsigned int classes[ATA_MAX_DEVICES];
489 unsigned int did_probe_mask;
490 };
491
492 struct ata_port {
493 struct Scsi_Host *host; /* our co-allocated scsi host */
494 const struct ata_port_operations *ops;
495 spinlock_t *lock;
496 unsigned long flags; /* ATA_FLAG_xxx */
497 unsigned int pflags; /* ATA_PFLAG_xxx */
498 unsigned int id; /* unique id req'd by scsi midlyr */
499 unsigned int port_no; /* unique port #; from zero */
500 unsigned int hard_port_no; /* hardware port #; from zero */
501
502 struct ata_prd *prd; /* our SG list */
503 dma_addr_t prd_dma; /* and its DMA mapping */
504
505 void *pad; /* array of DMA pad buffers */
506 dma_addr_t pad_dma;
507
508 struct ata_ioports ioaddr; /* ATA cmd/ctl/dma register blocks */
509
510 u8 ctl; /* cache of ATA control register */
511 u8 last_ctl; /* Cache last written value */
512 unsigned int pio_mask;
513 unsigned int mwdma_mask;
514 unsigned int udma_mask;
515 unsigned int cbl; /* cable type; ATA_CBL_xxx */
516 unsigned int hw_sata_spd_limit;
517 unsigned int sata_spd_limit; /* SATA PHY speed limit */
518
519 /* record runtime error info, protected by host_set lock */
520 struct ata_eh_info eh_info;
521 /* EH context owned by EH */
522 struct ata_eh_context eh_context;
523
524 struct ata_device device[ATA_MAX_DEVICES];
525
526 struct ata_queued_cmd qcmd[ATA_MAX_QUEUE];
527 unsigned long qc_allocated;
528 unsigned int qc_active;
529
530 unsigned int active_tag;
531 u32 sactive;
532
533 struct ata_host_stats stats;
534 struct ata_host_set *host_set;
535 struct device *dev;
536
537 struct work_struct port_task;
538 struct work_struct hotplug_task;
539 struct work_struct scsi_rescan_task;
540
541 unsigned int hsm_task_state;
542
543 u32 msg_enable;
544 struct list_head eh_done_q;
545 wait_queue_head_t eh_wait_q;
546
547 void *private_data;
548
549 u8 sector_buf[ATA_SECT_SIZE]; /* owned by EH */
550 };
551
552 struct ata_port_operations {
553 void (*port_disable) (struct ata_port *);
554
555 void (*dev_config) (struct ata_port *, struct ata_device *);
556
557 void (*set_piomode) (struct ata_port *, struct ata_device *);
558 void (*set_dmamode) (struct ata_port *, struct ata_device *);
559 unsigned long (*mode_filter) (const struct ata_port *, struct ata_device *, unsigned long);
560
561 void (*tf_load) (struct ata_port *ap, const struct ata_taskfile *tf);
562 void (*tf_read) (struct ata_port *ap, struct ata_taskfile *tf);
563
564 void (*exec_command)(struct ata_port *ap, const struct ata_taskfile *tf);
565 u8 (*check_status)(struct ata_port *ap);
566 u8 (*check_altstatus)(struct ata_port *ap);
567 void (*dev_select)(struct ata_port *ap, unsigned int device);
568
569 void (*phy_reset) (struct ata_port *ap); /* obsolete */
570 void (*set_mode) (struct ata_port *ap);
571
572 void (*post_set_mode) (struct ata_port *ap);
573
574 int (*check_atapi_dma) (struct ata_queued_cmd *qc);
575
576 void (*bmdma_setup) (struct ata_queued_cmd *qc);
577 void (*bmdma_start) (struct ata_queued_cmd *qc);
578
579 void (*data_xfer) (struct ata_device *, unsigned char *, unsigned int, int);
580
581 void (*qc_prep) (struct ata_queued_cmd *qc);
582 unsigned int (*qc_issue) (struct ata_queued_cmd *qc);
583
584 /* Error handlers. ->error_handler overrides ->eng_timeout and
585 * indicates that new-style EH is in place.
586 */
587 void (*eng_timeout) (struct ata_port *ap); /* obsolete */
588
589 void (*freeze) (struct ata_port *ap);
590 void (*thaw) (struct ata_port *ap);
591 void (*error_handler) (struct ata_port *ap);
592 void (*post_internal_cmd) (struct ata_queued_cmd *qc);
593
594 irqreturn_t (*irq_handler)(int, void *, struct pt_regs *);
595 void (*irq_clear) (struct ata_port *);
596
597 u32 (*scr_read) (struct ata_port *ap, unsigned int sc_reg);
598 void (*scr_write) (struct ata_port *ap, unsigned int sc_reg,
599 u32 val);
600
601 int (*port_start) (struct ata_port *ap);
602 void (*port_stop) (struct ata_port *ap);
603
604 void (*host_stop) (struct ata_host_set *host_set);
605
606 void (*bmdma_stop) (struct ata_queued_cmd *qc);
607 u8 (*bmdma_status) (struct ata_port *ap);
608 };
609
610 struct ata_port_info {
611 struct scsi_host_template *sht;
612 unsigned long host_flags;
613 unsigned long pio_mask;
614 unsigned long mwdma_mask;
615 unsigned long udma_mask;
616 const struct ata_port_operations *port_ops;
617 void *private_data;
618 };
619
620 struct ata_timing {
621 unsigned short mode; /* ATA mode */
622 unsigned short setup; /* t1 */
623 unsigned short act8b; /* t2 for 8-bit I/O */
624 unsigned short rec8b; /* t2i for 8-bit I/O */
625 unsigned short cyc8b; /* t0 for 8-bit I/O */
626 unsigned short active; /* t2 or tD */
627 unsigned short recover; /* t2i or tK */
628 unsigned short cycle; /* t0 */
629 unsigned short udma; /* t2CYCTYP/2 */
630 };
631
632 #define FIT(v,vmin,vmax) max_t(short,min_t(short,v,vmax),vmin)
633
634 extern const unsigned long sata_deb_timing_normal[];
635 extern const unsigned long sata_deb_timing_hotplug[];
636 extern const unsigned long sata_deb_timing_long[];
637
638 static inline const unsigned long *
639 sata_ehc_deb_timing(struct ata_eh_context *ehc)
640 {
641 if (ehc->i.flags & ATA_EHI_HOTPLUGGED)
642 return sata_deb_timing_hotplug;
643 else
644 return sata_deb_timing_normal;
645 }
646
647 extern void ata_port_probe(struct ata_port *);
648 extern void __sata_phy_reset(struct ata_port *ap);
649 extern void sata_phy_reset(struct ata_port *ap);
650 extern void ata_bus_reset(struct ata_port *ap);
651 extern int sata_set_spd(struct ata_port *ap);
652 extern int sata_phy_debounce(struct ata_port *ap, const unsigned long *param);
653 extern int sata_phy_resume(struct ata_port *ap, const unsigned long *param);
654 extern int ata_std_prereset(struct ata_port *ap);
655 extern int ata_std_softreset(struct ata_port *ap, unsigned int *classes);
656 extern int sata_std_hardreset(struct ata_port *ap, unsigned int *class);
657 extern void ata_std_postreset(struct ata_port *ap, unsigned int *classes);
658 extern int ata_dev_revalidate(struct ata_device *dev, int post_reset);
659 extern void ata_port_disable(struct ata_port *);
660 extern void ata_std_ports(struct ata_ioports *ioaddr);
661 #ifdef CONFIG_PCI
662 extern int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
663 unsigned int n_ports);
664 extern void ata_pci_remove_one (struct pci_dev *pdev);
665 extern int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t state);
666 extern int ata_pci_device_resume(struct pci_dev *pdev);
667 extern int ata_pci_clear_simplex(struct pci_dev *pdev);
668 #endif /* CONFIG_PCI */
669 extern int ata_device_add(const struct ata_probe_ent *ent);
670 extern void ata_port_detach(struct ata_port *ap);
671 extern void ata_host_set_remove(struct ata_host_set *host_set);
672 extern int ata_scsi_detect(struct scsi_host_template *sht);
673 extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
674 extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *));
675 extern int ata_scsi_release(struct Scsi_Host *host);
676 extern unsigned int ata_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc);
677 extern int sata_scr_valid(struct ata_port *ap);
678 extern int sata_scr_read(struct ata_port *ap, int reg, u32 *val);
679 extern int sata_scr_write(struct ata_port *ap, int reg, u32 val);
680 extern int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val);
681 extern int ata_port_online(struct ata_port *ap);
682 extern int ata_port_offline(struct ata_port *ap);
683 extern int ata_scsi_device_resume(struct scsi_device *);
684 extern int ata_scsi_device_suspend(struct scsi_device *, pm_message_t state);
685 extern int ata_device_resume(struct ata_device *);
686 extern int ata_device_suspend(struct ata_device *, pm_message_t state);
687 extern int ata_ratelimit(void);
688 extern unsigned int ata_busy_sleep(struct ata_port *ap,
689 unsigned long timeout_pat,
690 unsigned long timeout);
691 extern void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *),
692 void *data, unsigned long delay);
693 extern u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
694 unsigned long interval_msec,
695 unsigned long timeout_msec);
696
697 /*
698 * Default driver ops implementations
699 */
700 extern void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf);
701 extern void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
702 extern void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp);
703 extern void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf);
704 extern void ata_noop_dev_select (struct ata_port *ap, unsigned int device);
705 extern void ata_std_dev_select (struct ata_port *ap, unsigned int device);
706 extern u8 ata_check_status(struct ata_port *ap);
707 extern u8 ata_altstatus(struct ata_port *ap);
708 extern void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf);
709 extern int ata_port_start (struct ata_port *ap);
710 extern void ata_port_stop (struct ata_port *ap);
711 extern void ata_host_stop (struct ata_host_set *host_set);
712 extern irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs);
713 extern void ata_mmio_data_xfer(struct ata_device *adev, unsigned char *buf,
714 unsigned int buflen, int write_data);
715 extern void ata_pio_data_xfer(struct ata_device *adev, unsigned char *buf,
716 unsigned int buflen, int write_data);
717 extern void ata_pio_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
718 unsigned int buflen, int write_data);
719 extern void ata_qc_prep(struct ata_queued_cmd *qc);
720 extern void ata_noop_qc_prep(struct ata_queued_cmd *qc);
721 extern unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc);
722 extern void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf,
723 unsigned int buflen);
724 extern void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
725 unsigned int n_elem);
726 extern unsigned int ata_dev_classify(const struct ata_taskfile *tf);
727 extern void ata_id_string(const u16 *id, unsigned char *s,
728 unsigned int ofs, unsigned int len);
729 extern void ata_id_c_string(const u16 *id, unsigned char *s,
730 unsigned int ofs, unsigned int len);
731 extern void ata_bmdma_setup (struct ata_queued_cmd *qc);
732 extern void ata_bmdma_start (struct ata_queued_cmd *qc);
733 extern void ata_bmdma_stop(struct ata_queued_cmd *qc);
734 extern u8 ata_bmdma_status(struct ata_port *ap);
735 extern void ata_bmdma_irq_clear(struct ata_port *ap);
736 extern void ata_bmdma_freeze(struct ata_port *ap);
737 extern void ata_bmdma_thaw(struct ata_port *ap);
738 extern void ata_bmdma_drive_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
739 ata_reset_fn_t softreset,
740 ata_reset_fn_t hardreset,
741 ata_postreset_fn_t postreset);
742 extern void ata_bmdma_error_handler(struct ata_port *ap);
743 extern void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc);
744 extern int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
745 u8 status, int in_wq);
746 extern void ata_qc_complete(struct ata_queued_cmd *qc);
747 extern int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
748 void (*finish_qc)(struct ata_queued_cmd *));
749 extern void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd,
750 void (*done)(struct scsi_cmnd *));
751 extern int ata_std_bios_param(struct scsi_device *sdev,
752 struct block_device *bdev,
753 sector_t capacity, int geom[]);
754 extern int ata_scsi_slave_config(struct scsi_device *sdev);
755 extern void ata_scsi_slave_destroy(struct scsi_device *sdev);
756 extern int ata_scsi_change_queue_depth(struct scsi_device *sdev,
757 int queue_depth);
758 extern struct ata_device *ata_dev_pair(struct ata_device *adev);
759
760 /*
761 * Timing helpers
762 */
763
764 extern unsigned int ata_pio_need_iordy(const struct ata_device *);
765 extern int ata_timing_compute(struct ata_device *, unsigned short,
766 struct ata_timing *, int, int);
767 extern void ata_timing_merge(const struct ata_timing *,
768 const struct ata_timing *, struct ata_timing *,
769 unsigned int);
770
771 enum {
772 ATA_TIMING_SETUP = (1 << 0),
773 ATA_TIMING_ACT8B = (1 << 1),
774 ATA_TIMING_REC8B = (1 << 2),
775 ATA_TIMING_CYC8B = (1 << 3),
776 ATA_TIMING_8BIT = ATA_TIMING_ACT8B | ATA_TIMING_REC8B |
777 ATA_TIMING_CYC8B,
778 ATA_TIMING_ACTIVE = (1 << 4),
779 ATA_TIMING_RECOVER = (1 << 5),
780 ATA_TIMING_CYCLE = (1 << 6),
781 ATA_TIMING_UDMA = (1 << 7),
782 ATA_TIMING_ALL = ATA_TIMING_SETUP | ATA_TIMING_ACT8B |
783 ATA_TIMING_REC8B | ATA_TIMING_CYC8B |
784 ATA_TIMING_ACTIVE | ATA_TIMING_RECOVER |
785 ATA_TIMING_CYCLE | ATA_TIMING_UDMA,
786 };
787
788
789 #ifdef CONFIG_PCI
790 struct pci_bits {
791 unsigned int reg; /* PCI config register to read */
792 unsigned int width; /* 1 (8 bit), 2 (16 bit), 4 (32 bit) */
793 unsigned long mask;
794 unsigned long val;
795 };
796
797 extern void ata_pci_host_stop (struct ata_host_set *host_set);
798 extern struct ata_probe_ent *
799 ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int portmask);
800 extern int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits);
801 extern unsigned long ata_pci_default_filter(const struct ata_port *, struct ata_device *, unsigned long);
802 #endif /* CONFIG_PCI */
803
804 /*
805 * EH
806 */
807 extern void ata_eng_timeout(struct ata_port *ap);
808
809 extern void ata_port_schedule_eh(struct ata_port *ap);
810 extern int ata_port_abort(struct ata_port *ap);
811 extern int ata_port_freeze(struct ata_port *ap);
812
813 extern void ata_eh_freeze_port(struct ata_port *ap);
814 extern void ata_eh_thaw_port(struct ata_port *ap);
815
816 extern void ata_eh_qc_complete(struct ata_queued_cmd *qc);
817 extern void ata_eh_qc_retry(struct ata_queued_cmd *qc);
818
819 extern void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
820 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
821 ata_postreset_fn_t postreset);
822
823 /*
824 * printk helpers
825 */
826 #define ata_port_printk(ap, lv, fmt, args...) \
827 printk(lv"ata%u: "fmt, (ap)->id , ##args)
828
829 #define ata_dev_printk(dev, lv, fmt, args...) \
830 printk(lv"ata%u.%02u: "fmt, (dev)->ap->id, (dev)->devno , ##args)
831
832 /*
833 * ata_eh_info helpers
834 */
835 #define ata_ehi_push_desc(ehi, fmt, args...) do { \
836 (ehi)->desc_len += scnprintf((ehi)->desc + (ehi)->desc_len, \
837 ATA_EH_DESC_LEN - (ehi)->desc_len, \
838 fmt , ##args); \
839 } while (0)
840
841 #define ata_ehi_clear_desc(ehi) do { \
842 (ehi)->desc[0] = '\0'; \
843 (ehi)->desc_len = 0; \
844 } while (0)
845
846 static inline void __ata_ehi_hotplugged(struct ata_eh_info *ehi)
847 {
848 if (ehi->flags & ATA_EHI_HOTPLUGGED)
849 return;
850
851 ehi->flags |= ATA_EHI_HOTPLUGGED | ATA_EHI_RESUME_LINK;
852 ehi->hotplug_timestamp = jiffies;
853
854 ehi->action |= ATA_EH_SOFTRESET;
855 ehi->probe_mask |= (1 << ATA_MAX_DEVICES) - 1;
856 }
857
858 static inline void ata_ehi_hotplugged(struct ata_eh_info *ehi)
859 {
860 __ata_ehi_hotplugged(ehi);
861 ehi->err_mask |= AC_ERR_ATA_BUS;
862 }
863
864 /*
865 * qc helpers
866 */
867 static inline int
868 ata_sg_is_last(struct scatterlist *sg, struct ata_queued_cmd *qc)
869 {
870 if (sg == &qc->pad_sgent)
871 return 1;
872 if (qc->pad_len)
873 return 0;
874 if (((sg - qc->__sg) + 1) == qc->n_elem)
875 return 1;
876 return 0;
877 }
878
879 static inline struct scatterlist *
880 ata_qc_first_sg(struct ata_queued_cmd *qc)
881 {
882 if (qc->n_elem)
883 return qc->__sg;
884 if (qc->pad_len)
885 return &qc->pad_sgent;
886 return NULL;
887 }
888
889 static inline struct scatterlist *
890 ata_qc_next_sg(struct scatterlist *sg, struct ata_queued_cmd *qc)
891 {
892 if (sg == &qc->pad_sgent)
893 return NULL;
894 if (++sg - qc->__sg < qc->n_elem)
895 return sg;
896 if (qc->pad_len)
897 return &qc->pad_sgent;
898 return NULL;
899 }
900
901 #define ata_for_each_sg(sg, qc) \
902 for (sg = ata_qc_first_sg(qc); sg; sg = ata_qc_next_sg(sg, qc))
903
904 static inline unsigned int ata_tag_valid(unsigned int tag)
905 {
906 return (tag < ATA_MAX_QUEUE) ? 1 : 0;
907 }
908
909 static inline unsigned int ata_tag_internal(unsigned int tag)
910 {
911 return tag == ATA_MAX_QUEUE - 1;
912 }
913
914 /*
915 * device helpers
916 */
917 static inline unsigned int ata_class_enabled(unsigned int class)
918 {
919 return class == ATA_DEV_ATA || class == ATA_DEV_ATAPI;
920 }
921
922 static inline unsigned int ata_class_disabled(unsigned int class)
923 {
924 return class == ATA_DEV_ATA_UNSUP || class == ATA_DEV_ATAPI_UNSUP;
925 }
926
927 static inline unsigned int ata_class_absent(unsigned int class)
928 {
929 return !ata_class_enabled(class) && !ata_class_disabled(class);
930 }
931
932 static inline unsigned int ata_dev_enabled(const struct ata_device *dev)
933 {
934 return ata_class_enabled(dev->class);
935 }
936
937 static inline unsigned int ata_dev_disabled(const struct ata_device *dev)
938 {
939 return ata_class_disabled(dev->class);
940 }
941
942 static inline unsigned int ata_dev_absent(const struct ata_device *dev)
943 {
944 return ata_class_absent(dev->class);
945 }
946
947 /*
948 * port helpers
949 */
950 static inline int ata_port_max_devices(const struct ata_port *ap)
951 {
952 if (ap->flags & ATA_FLAG_SLAVE_POSS)
953 return 2;
954 return 1;
955 }
956
957
958 static inline u8 ata_chk_status(struct ata_port *ap)
959 {
960 return ap->ops->check_status(ap);
961 }
962
963
964 /**
965 * ata_pause - Flush writes and pause 400 nanoseconds.
966 * @ap: Port to wait for.
967 *
968 * LOCKING:
969 * Inherited from caller.
970 */
971
972 static inline void ata_pause(struct ata_port *ap)
973 {
974 ata_altstatus(ap);
975 ndelay(400);
976 }
977
978
979 /**
980 * ata_busy_wait - Wait for a port status register
981 * @ap: Port to wait for.
982 *
983 * Waits up to max*10 microseconds for the selected bits in the port's
984 * status register to be cleared.
985 * Returns final value of status register.
986 *
987 * LOCKING:
988 * Inherited from caller.
989 */
990
991 static inline u8 ata_busy_wait(struct ata_port *ap, unsigned int bits,
992 unsigned int max)
993 {
994 u8 status;
995
996 do {
997 udelay(10);
998 status = ata_chk_status(ap);
999 max--;
1000 } while ((status & bits) && (max > 0));
1001
1002 return status;
1003 }
1004
1005
1006 /**
1007 * ata_wait_idle - Wait for a port to be idle.
1008 * @ap: Port to wait for.
1009 *
1010 * Waits up to 10ms for port's BUSY and DRQ signals to clear.
1011 * Returns final value of status register.
1012 *
1013 * LOCKING:
1014 * Inherited from caller.
1015 */
1016
1017 static inline u8 ata_wait_idle(struct ata_port *ap)
1018 {
1019 u8 status = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
1020
1021 if (status & (ATA_BUSY | ATA_DRQ)) {
1022 unsigned long l = ap->ioaddr.status_addr;
1023 if (ata_msg_warn(ap))
1024 printk(KERN_WARNING "ATA: abnormal status 0x%X on port 0x%lX\n",
1025 status, l);
1026 }
1027
1028 return status;
1029 }
1030
1031 static inline void ata_qc_set_polling(struct ata_queued_cmd *qc)
1032 {
1033 qc->tf.ctl |= ATA_NIEN;
1034 }
1035
1036 static inline struct ata_queued_cmd *__ata_qc_from_tag(struct ata_port *ap,
1037 unsigned int tag)
1038 {
1039 if (likely(ata_tag_valid(tag)))
1040 return &ap->qcmd[tag];
1041 return NULL;
1042 }
1043
1044 static inline struct ata_queued_cmd *ata_qc_from_tag(struct ata_port *ap,
1045 unsigned int tag)
1046 {
1047 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
1048
1049 if (unlikely(!qc) || !ap->ops->error_handler)
1050 return qc;
1051
1052 if ((qc->flags & (ATA_QCFLAG_ACTIVE |
1053 ATA_QCFLAG_FAILED)) == ATA_QCFLAG_ACTIVE)
1054 return qc;
1055
1056 return NULL;
1057 }
1058
1059 static inline void ata_tf_init(struct ata_device *dev, struct ata_taskfile *tf)
1060 {
1061 memset(tf, 0, sizeof(*tf));
1062
1063 tf->ctl = dev->ap->ctl;
1064 if (dev->devno == 0)
1065 tf->device = ATA_DEVICE_OBS;
1066 else
1067 tf->device = ATA_DEVICE_OBS | ATA_DEV1;
1068 }
1069
1070 static inline void ata_qc_reinit(struct ata_queued_cmd *qc)
1071 {
1072 qc->__sg = NULL;
1073 qc->flags = 0;
1074 qc->cursect = qc->cursg = qc->cursg_ofs = 0;
1075 qc->nsect = 0;
1076 qc->nbytes = qc->curbytes = 0;
1077 qc->err_mask = 0;
1078
1079 ata_tf_init(qc->dev, &qc->tf);
1080
1081 /* init result_tf such that it indicates normal completion */
1082 qc->result_tf.command = ATA_DRDY;
1083 qc->result_tf.feature = 0;
1084 }
1085
1086 /**
1087 * ata_irq_on - Enable interrupts on a port.
1088 * @ap: Port on which interrupts are enabled.
1089 *
1090 * Enable interrupts on a legacy IDE device using MMIO or PIO,
1091 * wait for idle, clear any pending interrupts.
1092 *
1093 * LOCKING:
1094 * Inherited from caller.
1095 */
1096
1097 static inline u8 ata_irq_on(struct ata_port *ap)
1098 {
1099 struct ata_ioports *ioaddr = &ap->ioaddr;
1100 u8 tmp;
1101
1102 ap->ctl &= ~ATA_NIEN;
1103 ap->last_ctl = ap->ctl;
1104
1105 if (ap->flags & ATA_FLAG_MMIO)
1106 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
1107 else
1108 outb(ap->ctl, ioaddr->ctl_addr);
1109 tmp = ata_wait_idle(ap);
1110
1111 ap->ops->irq_clear(ap);
1112
1113 return tmp;
1114 }
1115
1116
1117 /**
1118 * ata_irq_ack - Acknowledge a device interrupt.
1119 * @ap: Port on which interrupts are enabled.
1120 *
1121 * Wait up to 10 ms for legacy IDE device to become idle (BUSY
1122 * or BUSY+DRQ clear). Obtain dma status and port status from
1123 * device. Clear the interrupt. Return port status.
1124 *
1125 * LOCKING:
1126 */
1127
1128 static inline u8 ata_irq_ack(struct ata_port *ap, unsigned int chk_drq)
1129 {
1130 unsigned int bits = chk_drq ? ATA_BUSY | ATA_DRQ : ATA_BUSY;
1131 u8 host_stat, post_stat, status;
1132
1133 status = ata_busy_wait(ap, bits, 1000);
1134 if (status & bits)
1135 if (ata_msg_err(ap))
1136 printk(KERN_ERR "abnormal status 0x%X\n", status);
1137
1138 /* get controller status; clear intr, err bits */
1139 if (ap->flags & ATA_FLAG_MMIO) {
1140 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
1141 host_stat = readb(mmio + ATA_DMA_STATUS);
1142 writeb(host_stat | ATA_DMA_INTR | ATA_DMA_ERR,
1143 mmio + ATA_DMA_STATUS);
1144
1145 post_stat = readb(mmio + ATA_DMA_STATUS);
1146 } else {
1147 host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
1148 outb(host_stat | ATA_DMA_INTR | ATA_DMA_ERR,
1149 ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
1150
1151 post_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
1152 }
1153
1154 if (ata_msg_intr(ap))
1155 printk(KERN_INFO "%s: irq ack: host_stat 0x%X, new host_stat 0x%X, drv_stat 0x%X\n",
1156 __FUNCTION__,
1157 host_stat, post_stat, status);
1158
1159 return status;
1160 }
1161
1162 static inline int ata_try_flush_cache(const struct ata_device *dev)
1163 {
1164 return ata_id_wcache_enabled(dev->id) ||
1165 ata_id_has_flush(dev->id) ||
1166 ata_id_has_flush_ext(dev->id);
1167 }
1168
1169 static inline unsigned int ac_err_mask(u8 status)
1170 {
1171 if (status & (ATA_BUSY | ATA_DRQ))
1172 return AC_ERR_HSM;
1173 if (status & (ATA_ERR | ATA_DF))
1174 return AC_ERR_DEV;
1175 return 0;
1176 }
1177
1178 static inline unsigned int __ac_err_mask(u8 status)
1179 {
1180 unsigned int mask = ac_err_mask(status);
1181 if (mask == 0)
1182 return AC_ERR_OTHER;
1183 return mask;
1184 }
1185
1186 static inline int ata_pad_alloc(struct ata_port *ap, struct device *dev)
1187 {
1188 ap->pad_dma = 0;
1189 ap->pad = dma_alloc_coherent(dev, ATA_DMA_PAD_BUF_SZ,
1190 &ap->pad_dma, GFP_KERNEL);
1191 return (ap->pad == NULL) ? -ENOMEM : 0;
1192 }
1193
1194 static inline void ata_pad_free(struct ata_port *ap, struct device *dev)
1195 {
1196 dma_free_coherent(dev, ATA_DMA_PAD_BUF_SZ, ap->pad, ap->pad_dma);
1197 }
1198
1199 static inline struct ata_port *ata_shost_to_port(struct Scsi_Host *host)
1200 {
1201 return (struct ata_port *) &host->hostdata[0];
1202 }
1203
1204 #endif /* __LINUX_LIBATA_H__ */
This page took 0.057098 seconds and 6 git commands to generate.