| 1 | /* |
| 2 | * libata-core.c - helper library for ATA |
| 3 | * |
| 4 | * Maintained by: Jeff Garzik <jgarzik@pobox.com> |
| 5 | * Please ALWAYS copy linux-ide@vger.kernel.org |
| 6 | * on emails. |
| 7 | * |
| 8 | * Copyright 2003-2004 Red Hat, Inc. All rights reserved. |
| 9 | * Copyright 2003-2004 Jeff Garzik |
| 10 | * |
| 11 | * |
| 12 | * This program is free software; you can redistribute it and/or modify |
| 13 | * it under the terms of the GNU General Public License as published by |
| 14 | * the Free Software Foundation; either version 2, or (at your option) |
| 15 | * any later version. |
| 16 | * |
| 17 | * This program is distributed in the hope that it will be useful, |
| 18 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 19 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 20 | * GNU General Public License for more details. |
| 21 | * |
| 22 | * You should have received a copy of the GNU General Public License |
| 23 | * along with this program; see the file COPYING. If not, write to |
| 24 | * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. |
| 25 | * |
| 26 | * |
| 27 | * libata documentation is available via 'make {ps|pdf}docs', |
| 28 | * as Documentation/DocBook/libata.* |
| 29 | * |
| 30 | * Hardware documentation available from http://www.t13.org/ and |
| 31 | * http://www.sata-io.org/ |
| 32 | * |
| 33 | */ |
| 34 | |
| 35 | #include <linux/kernel.h> |
| 36 | #include <linux/module.h> |
| 37 | #include <linux/pci.h> |
| 38 | #include <linux/init.h> |
| 39 | #include <linux/list.h> |
| 40 | #include <linux/mm.h> |
| 41 | #include <linux/highmem.h> |
| 42 | #include <linux/spinlock.h> |
| 43 | #include <linux/blkdev.h> |
| 44 | #include <linux/delay.h> |
| 45 | #include <linux/timer.h> |
| 46 | #include <linux/interrupt.h> |
| 47 | #include <linux/completion.h> |
| 48 | #include <linux/suspend.h> |
| 49 | #include <linux/workqueue.h> |
| 50 | #include <linux/jiffies.h> |
| 51 | #include <linux/scatterlist.h> |
| 52 | #include <scsi/scsi.h> |
| 53 | #include <scsi/scsi_cmnd.h> |
| 54 | #include <scsi/scsi_host.h> |
| 55 | #include <linux/libata.h> |
| 56 | #include <asm/io.h> |
| 57 | #include <asm/semaphore.h> |
| 58 | #include <asm/byteorder.h> |
| 59 | |
| 60 | #include "libata.h" |
| 61 | |
| 62 | #define DRV_VERSION "2.10" /* must be exactly four chars */ |
| 63 | |
| 64 | |
| 65 | /* debounce timing parameters in msecs { interval, duration, timeout } */ |
| 66 | const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 }; |
| 67 | const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 }; |
| 68 | const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 }; |
| 69 | |
| 70 | static unsigned int ata_dev_init_params(struct ata_device *dev, |
| 71 | u16 heads, u16 sectors); |
| 72 | static unsigned int ata_dev_set_xfermode(struct ata_device *dev); |
| 73 | static void ata_dev_xfermask(struct ata_device *dev); |
| 74 | |
| 75 | static unsigned int ata_unique_id = 1; |
| 76 | static struct workqueue_struct *ata_wq; |
| 77 | |
| 78 | struct workqueue_struct *ata_aux_wq; |
| 79 | |
| 80 | int atapi_enabled = 1; |
| 81 | module_param(atapi_enabled, int, 0444); |
| 82 | MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)"); |
| 83 | |
| 84 | int atapi_dmadir = 0; |
| 85 | module_param(atapi_dmadir, int, 0444); |
| 86 | MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)"); |
| 87 | |
| 88 | int libata_fua = 0; |
| 89 | module_param_named(fua, libata_fua, int, 0444); |
| 90 | MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)"); |
| 91 | |
| 92 | static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ; |
| 93 | module_param(ata_probe_timeout, int, 0444); |
| 94 | MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)"); |
| 95 | |
| 96 | MODULE_AUTHOR("Jeff Garzik"); |
| 97 | MODULE_DESCRIPTION("Library module for ATA devices"); |
| 98 | MODULE_LICENSE("GPL"); |
| 99 | MODULE_VERSION(DRV_VERSION); |
| 100 | |
| 101 | |
| 102 | /** |
| 103 | * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure |
| 104 | * @tf: Taskfile to convert |
| 105 | * @fis: Buffer into which data will output |
| 106 | * @pmp: Port multiplier port |
| 107 | * |
| 108 | * Converts a standard ATA taskfile to a Serial ATA |
| 109 | * FIS structure (Register - Host to Device). |
| 110 | * |
| 111 | * LOCKING: |
| 112 | * Inherited from caller. |
| 113 | */ |
| 114 | |
| 115 | void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp) |
| 116 | { |
| 117 | fis[0] = 0x27; /* Register - Host to Device FIS */ |
| 118 | fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number, |
| 119 | bit 7 indicates Command FIS */ |
| 120 | fis[2] = tf->command; |
| 121 | fis[3] = tf->feature; |
| 122 | |
| 123 | fis[4] = tf->lbal; |
| 124 | fis[5] = tf->lbam; |
| 125 | fis[6] = tf->lbah; |
| 126 | fis[7] = tf->device; |
| 127 | |
| 128 | fis[8] = tf->hob_lbal; |
| 129 | fis[9] = tf->hob_lbam; |
| 130 | fis[10] = tf->hob_lbah; |
| 131 | fis[11] = tf->hob_feature; |
| 132 | |
| 133 | fis[12] = tf->nsect; |
| 134 | fis[13] = tf->hob_nsect; |
| 135 | fis[14] = 0; |
| 136 | fis[15] = tf->ctl; |
| 137 | |
| 138 | fis[16] = 0; |
| 139 | fis[17] = 0; |
| 140 | fis[18] = 0; |
| 141 | fis[19] = 0; |
| 142 | } |
| 143 | |
| 144 | /** |
| 145 | * ata_tf_from_fis - Convert SATA FIS to ATA taskfile |
| 146 | * @fis: Buffer from which data will be input |
| 147 | * @tf: Taskfile to output |
| 148 | * |
| 149 | * Converts a serial ATA FIS structure to a standard ATA taskfile. |
| 150 | * |
| 151 | * LOCKING: |
| 152 | * Inherited from caller. |
| 153 | */ |
| 154 | |
| 155 | void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf) |
| 156 | { |
| 157 | tf->command = fis[2]; /* status */ |
| 158 | tf->feature = fis[3]; /* error */ |
| 159 | |
| 160 | tf->lbal = fis[4]; |
| 161 | tf->lbam = fis[5]; |
| 162 | tf->lbah = fis[6]; |
| 163 | tf->device = fis[7]; |
| 164 | |
| 165 | tf->hob_lbal = fis[8]; |
| 166 | tf->hob_lbam = fis[9]; |
| 167 | tf->hob_lbah = fis[10]; |
| 168 | |
| 169 | tf->nsect = fis[12]; |
| 170 | tf->hob_nsect = fis[13]; |
| 171 | } |
| 172 | |
| 173 | static const u8 ata_rw_cmds[] = { |
| 174 | /* pio multi */ |
| 175 | ATA_CMD_READ_MULTI, |
| 176 | ATA_CMD_WRITE_MULTI, |
| 177 | ATA_CMD_READ_MULTI_EXT, |
| 178 | ATA_CMD_WRITE_MULTI_EXT, |
| 179 | 0, |
| 180 | 0, |
| 181 | 0, |
| 182 | ATA_CMD_WRITE_MULTI_FUA_EXT, |
| 183 | /* pio */ |
| 184 | ATA_CMD_PIO_READ, |
| 185 | ATA_CMD_PIO_WRITE, |
| 186 | ATA_CMD_PIO_READ_EXT, |
| 187 | ATA_CMD_PIO_WRITE_EXT, |
| 188 | 0, |
| 189 | 0, |
| 190 | 0, |
| 191 | 0, |
| 192 | /* dma */ |
| 193 | ATA_CMD_READ, |
| 194 | ATA_CMD_WRITE, |
| 195 | ATA_CMD_READ_EXT, |
| 196 | ATA_CMD_WRITE_EXT, |
| 197 | 0, |
| 198 | 0, |
| 199 | 0, |
| 200 | ATA_CMD_WRITE_FUA_EXT |
| 201 | }; |
| 202 | |
| 203 | /** |
| 204 | * ata_rwcmd_protocol - set taskfile r/w commands and protocol |
| 205 | * @tf: command to examine and configure |
| 206 | * @dev: device tf belongs to |
| 207 | * |
| 208 | * Examine the device configuration and tf->flags to calculate |
| 209 | * the proper read/write commands and protocol to use. |
| 210 | * |
| 211 | * LOCKING: |
| 212 | * caller. |
| 213 | */ |
| 214 | static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev) |
| 215 | { |
| 216 | u8 cmd; |
| 217 | |
| 218 | int index, fua, lba48, write; |
| 219 | |
| 220 | fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0; |
| 221 | lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0; |
| 222 | write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0; |
| 223 | |
| 224 | if (dev->flags & ATA_DFLAG_PIO) { |
| 225 | tf->protocol = ATA_PROT_PIO; |
| 226 | index = dev->multi_count ? 0 : 8; |
| 227 | } else if (lba48 && (dev->ap->flags & ATA_FLAG_PIO_LBA48)) { |
| 228 | /* Unable to use DMA due to host limitation */ |
| 229 | tf->protocol = ATA_PROT_PIO; |
| 230 | index = dev->multi_count ? 0 : 8; |
| 231 | } else { |
| 232 | tf->protocol = ATA_PROT_DMA; |
| 233 | index = 16; |
| 234 | } |
| 235 | |
| 236 | cmd = ata_rw_cmds[index + fua + lba48 + write]; |
| 237 | if (cmd) { |
| 238 | tf->command = cmd; |
| 239 | return 0; |
| 240 | } |
| 241 | return -1; |
| 242 | } |
| 243 | |
| 244 | /** |
| 245 | * ata_tf_read_block - Read block address from ATA taskfile |
| 246 | * @tf: ATA taskfile of interest |
| 247 | * @dev: ATA device @tf belongs to |
| 248 | * |
| 249 | * LOCKING: |
| 250 | * None. |
| 251 | * |
| 252 | * Read block address from @tf. This function can handle all |
| 253 | * three address formats - LBA, LBA48 and CHS. tf->protocol and |
| 254 | * flags select the address format to use. |
| 255 | * |
| 256 | * RETURNS: |
| 257 | * Block address read from @tf. |
| 258 | */ |
| 259 | u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev) |
| 260 | { |
| 261 | u64 block = 0; |
| 262 | |
| 263 | if (tf->flags & ATA_TFLAG_LBA) { |
| 264 | if (tf->flags & ATA_TFLAG_LBA48) { |
| 265 | block |= (u64)tf->hob_lbah << 40; |
| 266 | block |= (u64)tf->hob_lbam << 32; |
| 267 | block |= tf->hob_lbal << 24; |
| 268 | } else |
| 269 | block |= (tf->device & 0xf) << 24; |
| 270 | |
| 271 | block |= tf->lbah << 16; |
| 272 | block |= tf->lbam << 8; |
| 273 | block |= tf->lbal; |
| 274 | } else { |
| 275 | u32 cyl, head, sect; |
| 276 | |
| 277 | cyl = tf->lbam | (tf->lbah << 8); |
| 278 | head = tf->device & 0xf; |
| 279 | sect = tf->lbal; |
| 280 | |
| 281 | block = (cyl * dev->heads + head) * dev->sectors + sect; |
| 282 | } |
| 283 | |
| 284 | return block; |
| 285 | } |
| 286 | |
| 287 | /** |
| 288 | * ata_build_rw_tf - Build ATA taskfile for given read/write request |
| 289 | * @tf: Target ATA taskfile |
| 290 | * @dev: ATA device @tf belongs to |
| 291 | * @block: Block address |
| 292 | * @n_block: Number of blocks |
| 293 | * @tf_flags: RW/FUA etc... |
| 294 | * @tag: tag |
| 295 | * |
| 296 | * LOCKING: |
| 297 | * None. |
| 298 | * |
| 299 | * Build ATA taskfile @tf for read/write request described by |
| 300 | * @block, @n_block, @tf_flags and @tag on @dev. |
| 301 | * |
| 302 | * RETURNS: |
| 303 | * |
| 304 | * 0 on success, -ERANGE if the request is too large for @dev, |
| 305 | * -EINVAL if the request is invalid. |
| 306 | */ |
| 307 | int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev, |
| 308 | u64 block, u32 n_block, unsigned int tf_flags, |
| 309 | unsigned int tag) |
| 310 | { |
| 311 | tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; |
| 312 | tf->flags |= tf_flags; |
| 313 | |
| 314 | if ((dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ_OFF | |
| 315 | ATA_DFLAG_NCQ)) == ATA_DFLAG_NCQ && |
| 316 | likely(tag != ATA_TAG_INTERNAL)) { |
| 317 | /* yay, NCQ */ |
| 318 | if (!lba_48_ok(block, n_block)) |
| 319 | return -ERANGE; |
| 320 | |
| 321 | tf->protocol = ATA_PROT_NCQ; |
| 322 | tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48; |
| 323 | |
| 324 | if (tf->flags & ATA_TFLAG_WRITE) |
| 325 | tf->command = ATA_CMD_FPDMA_WRITE; |
| 326 | else |
| 327 | tf->command = ATA_CMD_FPDMA_READ; |
| 328 | |
| 329 | tf->nsect = tag << 3; |
| 330 | tf->hob_feature = (n_block >> 8) & 0xff; |
| 331 | tf->feature = n_block & 0xff; |
| 332 | |
| 333 | tf->hob_lbah = (block >> 40) & 0xff; |
| 334 | tf->hob_lbam = (block >> 32) & 0xff; |
| 335 | tf->hob_lbal = (block >> 24) & 0xff; |
| 336 | tf->lbah = (block >> 16) & 0xff; |
| 337 | tf->lbam = (block >> 8) & 0xff; |
| 338 | tf->lbal = block & 0xff; |
| 339 | |
| 340 | tf->device = 1 << 6; |
| 341 | if (tf->flags & ATA_TFLAG_FUA) |
| 342 | tf->device |= 1 << 7; |
| 343 | } else if (dev->flags & ATA_DFLAG_LBA) { |
| 344 | tf->flags |= ATA_TFLAG_LBA; |
| 345 | |
| 346 | if (lba_28_ok(block, n_block)) { |
| 347 | /* use LBA28 */ |
| 348 | tf->device |= (block >> 24) & 0xf; |
| 349 | } else if (lba_48_ok(block, n_block)) { |
| 350 | if (!(dev->flags & ATA_DFLAG_LBA48)) |
| 351 | return -ERANGE; |
| 352 | |
| 353 | /* use LBA48 */ |
| 354 | tf->flags |= ATA_TFLAG_LBA48; |
| 355 | |
| 356 | tf->hob_nsect = (n_block >> 8) & 0xff; |
| 357 | |
| 358 | tf->hob_lbah = (block >> 40) & 0xff; |
| 359 | tf->hob_lbam = (block >> 32) & 0xff; |
| 360 | tf->hob_lbal = (block >> 24) & 0xff; |
| 361 | } else |
| 362 | /* request too large even for LBA48 */ |
| 363 | return -ERANGE; |
| 364 | |
| 365 | if (unlikely(ata_rwcmd_protocol(tf, dev) < 0)) |
| 366 | return -EINVAL; |
| 367 | |
| 368 | tf->nsect = n_block & 0xff; |
| 369 | |
| 370 | tf->lbah = (block >> 16) & 0xff; |
| 371 | tf->lbam = (block >> 8) & 0xff; |
| 372 | tf->lbal = block & 0xff; |
| 373 | |
| 374 | tf->device |= ATA_LBA; |
| 375 | } else { |
| 376 | /* CHS */ |
| 377 | u32 sect, head, cyl, track; |
| 378 | |
| 379 | /* The request -may- be too large for CHS addressing. */ |
| 380 | if (!lba_28_ok(block, n_block)) |
| 381 | return -ERANGE; |
| 382 | |
| 383 | if (unlikely(ata_rwcmd_protocol(tf, dev) < 0)) |
| 384 | return -EINVAL; |
| 385 | |
| 386 | /* Convert LBA to CHS */ |
| 387 | track = (u32)block / dev->sectors; |
| 388 | cyl = track / dev->heads; |
| 389 | head = track % dev->heads; |
| 390 | sect = (u32)block % dev->sectors + 1; |
| 391 | |
| 392 | DPRINTK("block %u track %u cyl %u head %u sect %u\n", |
| 393 | (u32)block, track, cyl, head, sect); |
| 394 | |
| 395 | /* Check whether the converted CHS can fit. |
| 396 | Cylinder: 0-65535 |
| 397 | Head: 0-15 |
| 398 | Sector: 1-255*/ |
| 399 | if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect)) |
| 400 | return -ERANGE; |
| 401 | |
| 402 | tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */ |
| 403 | tf->lbal = sect; |
| 404 | tf->lbam = cyl; |
| 405 | tf->lbah = cyl >> 8; |
| 406 | tf->device |= head; |
| 407 | } |
| 408 | |
| 409 | return 0; |
| 410 | } |
| 411 | |
| 412 | /** |
| 413 | * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask |
| 414 | * @pio_mask: pio_mask |
| 415 | * @mwdma_mask: mwdma_mask |
| 416 | * @udma_mask: udma_mask |
| 417 | * |
| 418 | * Pack @pio_mask, @mwdma_mask and @udma_mask into a single |
| 419 | * unsigned int xfer_mask. |
| 420 | * |
| 421 | * LOCKING: |
| 422 | * None. |
| 423 | * |
| 424 | * RETURNS: |
| 425 | * Packed xfer_mask. |
| 426 | */ |
| 427 | static unsigned int ata_pack_xfermask(unsigned int pio_mask, |
| 428 | unsigned int mwdma_mask, |
| 429 | unsigned int udma_mask) |
| 430 | { |
| 431 | return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) | |
| 432 | ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) | |
| 433 | ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA); |
| 434 | } |
| 435 | |
| 436 | /** |
| 437 | * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks |
| 438 | * @xfer_mask: xfer_mask to unpack |
| 439 | * @pio_mask: resulting pio_mask |
| 440 | * @mwdma_mask: resulting mwdma_mask |
| 441 | * @udma_mask: resulting udma_mask |
| 442 | * |
| 443 | * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask. |
| 444 | * Any NULL distination masks will be ignored. |
| 445 | */ |
| 446 | static void ata_unpack_xfermask(unsigned int xfer_mask, |
| 447 | unsigned int *pio_mask, |
| 448 | unsigned int *mwdma_mask, |
| 449 | unsigned int *udma_mask) |
| 450 | { |
| 451 | if (pio_mask) |
| 452 | *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO; |
| 453 | if (mwdma_mask) |
| 454 | *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA; |
| 455 | if (udma_mask) |
| 456 | *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA; |
| 457 | } |
| 458 | |
| 459 | static const struct ata_xfer_ent { |
| 460 | int shift, bits; |
| 461 | u8 base; |
| 462 | } ata_xfer_tbl[] = { |
| 463 | { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 }, |
| 464 | { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 }, |
| 465 | { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 }, |
| 466 | { -1, }, |
| 467 | }; |
| 468 | |
| 469 | /** |
| 470 | * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask |
| 471 | * @xfer_mask: xfer_mask of interest |
| 472 | * |
| 473 | * Return matching XFER_* value for @xfer_mask. Only the highest |
| 474 | * bit of @xfer_mask is considered. |
| 475 | * |
| 476 | * LOCKING: |
| 477 | * None. |
| 478 | * |
| 479 | * RETURNS: |
| 480 | * Matching XFER_* value, 0 if no match found. |
| 481 | */ |
| 482 | static u8 ata_xfer_mask2mode(unsigned int xfer_mask) |
| 483 | { |
| 484 | int highbit = fls(xfer_mask) - 1; |
| 485 | const struct ata_xfer_ent *ent; |
| 486 | |
| 487 | for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) |
| 488 | if (highbit >= ent->shift && highbit < ent->shift + ent->bits) |
| 489 | return ent->base + highbit - ent->shift; |
| 490 | return 0; |
| 491 | } |
| 492 | |
| 493 | /** |
| 494 | * ata_xfer_mode2mask - Find matching xfer_mask for XFER_* |
| 495 | * @xfer_mode: XFER_* of interest |
| 496 | * |
| 497 | * Return matching xfer_mask for @xfer_mode. |
| 498 | * |
| 499 | * LOCKING: |
| 500 | * None. |
| 501 | * |
| 502 | * RETURNS: |
| 503 | * Matching xfer_mask, 0 if no match found. |
| 504 | */ |
| 505 | static unsigned int ata_xfer_mode2mask(u8 xfer_mode) |
| 506 | { |
| 507 | const struct ata_xfer_ent *ent; |
| 508 | |
| 509 | for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) |
| 510 | if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits) |
| 511 | return 1 << (ent->shift + xfer_mode - ent->base); |
| 512 | return 0; |
| 513 | } |
| 514 | |
| 515 | /** |
| 516 | * ata_xfer_mode2shift - Find matching xfer_shift for XFER_* |
| 517 | * @xfer_mode: XFER_* of interest |
| 518 | * |
| 519 | * Return matching xfer_shift for @xfer_mode. |
| 520 | * |
| 521 | * LOCKING: |
| 522 | * None. |
| 523 | * |
| 524 | * RETURNS: |
| 525 | * Matching xfer_shift, -1 if no match found. |
| 526 | */ |
| 527 | static int ata_xfer_mode2shift(unsigned int xfer_mode) |
| 528 | { |
| 529 | const struct ata_xfer_ent *ent; |
| 530 | |
| 531 | for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) |
| 532 | if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits) |
| 533 | return ent->shift; |
| 534 | return -1; |
| 535 | } |
| 536 | |
| 537 | /** |
| 538 | * ata_mode_string - convert xfer_mask to string |
| 539 | * @xfer_mask: mask of bits supported; only highest bit counts. |
| 540 | * |
| 541 | * Determine string which represents the highest speed |
| 542 | * (highest bit in @modemask). |
| 543 | * |
| 544 | * LOCKING: |
| 545 | * None. |
| 546 | * |
| 547 | * RETURNS: |
| 548 | * Constant C string representing highest speed listed in |
| 549 | * @mode_mask, or the constant C string "<n/a>". |
| 550 | */ |
| 551 | static const char *ata_mode_string(unsigned int xfer_mask) |
| 552 | { |
| 553 | static const char * const xfer_mode_str[] = { |
| 554 | "PIO0", |
| 555 | "PIO1", |
| 556 | "PIO2", |
| 557 | "PIO3", |
| 558 | "PIO4", |
| 559 | "PIO5", |
| 560 | "PIO6", |
| 561 | "MWDMA0", |
| 562 | "MWDMA1", |
| 563 | "MWDMA2", |
| 564 | "MWDMA3", |
| 565 | "MWDMA4", |
| 566 | "UDMA/16", |
| 567 | "UDMA/25", |
| 568 | "UDMA/33", |
| 569 | "UDMA/44", |
| 570 | "UDMA/66", |
| 571 | "UDMA/100", |
| 572 | "UDMA/133", |
| 573 | "UDMA7", |
| 574 | }; |
| 575 | int highbit; |
| 576 | |
| 577 | highbit = fls(xfer_mask) - 1; |
| 578 | if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str)) |
| 579 | return xfer_mode_str[highbit]; |
| 580 | return "<n/a>"; |
| 581 | } |
| 582 | |
| 583 | static const char *sata_spd_string(unsigned int spd) |
| 584 | { |
| 585 | static const char * const spd_str[] = { |
| 586 | "1.5 Gbps", |
| 587 | "3.0 Gbps", |
| 588 | }; |
| 589 | |
| 590 | if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str)) |
| 591 | return "<unknown>"; |
| 592 | return spd_str[spd - 1]; |
| 593 | } |
| 594 | |
| 595 | void ata_dev_disable(struct ata_device *dev) |
| 596 | { |
| 597 | if (ata_dev_enabled(dev) && ata_msg_drv(dev->ap)) { |
| 598 | ata_dev_printk(dev, KERN_WARNING, "disabled\n"); |
| 599 | dev->class++; |
| 600 | } |
| 601 | } |
| 602 | |
| 603 | /** |
| 604 | * ata_devchk - PATA device presence detection |
| 605 | * @ap: ATA channel to examine |
| 606 | * @device: Device to examine (starting at zero) |
| 607 | * |
| 608 | * This technique was originally described in |
| 609 | * Hale Landis's ATADRVR (www.ata-atapi.com), and |
| 610 | * later found its way into the ATA/ATAPI spec. |
| 611 | * |
| 612 | * Write a pattern to the ATA shadow registers, |
| 613 | * and if a device is present, it will respond by |
| 614 | * correctly storing and echoing back the |
| 615 | * ATA shadow register contents. |
| 616 | * |
| 617 | * LOCKING: |
| 618 | * caller. |
| 619 | */ |
| 620 | |
| 621 | static unsigned int ata_devchk(struct ata_port *ap, unsigned int device) |
| 622 | { |
| 623 | struct ata_ioports *ioaddr = &ap->ioaddr; |
| 624 | u8 nsect, lbal; |
| 625 | |
| 626 | ap->ops->dev_select(ap, device); |
| 627 | |
| 628 | iowrite8(0x55, ioaddr->nsect_addr); |
| 629 | iowrite8(0xaa, ioaddr->lbal_addr); |
| 630 | |
| 631 | iowrite8(0xaa, ioaddr->nsect_addr); |
| 632 | iowrite8(0x55, ioaddr->lbal_addr); |
| 633 | |
| 634 | iowrite8(0x55, ioaddr->nsect_addr); |
| 635 | iowrite8(0xaa, ioaddr->lbal_addr); |
| 636 | |
| 637 | nsect = ioread8(ioaddr->nsect_addr); |
| 638 | lbal = ioread8(ioaddr->lbal_addr); |
| 639 | |
| 640 | if ((nsect == 0x55) && (lbal == 0xaa)) |
| 641 | return 1; /* we found a device */ |
| 642 | |
| 643 | return 0; /* nothing found */ |
| 644 | } |
| 645 | |
| 646 | /** |
| 647 | * ata_dev_classify - determine device type based on ATA-spec signature |
| 648 | * @tf: ATA taskfile register set for device to be identified |
| 649 | * |
| 650 | * Determine from taskfile register contents whether a device is |
| 651 | * ATA or ATAPI, as per "Signature and persistence" section |
| 652 | * of ATA/PI spec (volume 1, sect 5.14). |
| 653 | * |
| 654 | * LOCKING: |
| 655 | * None. |
| 656 | * |
| 657 | * RETURNS: |
| 658 | * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN |
| 659 | * the event of failure. |
| 660 | */ |
| 661 | |
| 662 | unsigned int ata_dev_classify(const struct ata_taskfile *tf) |
| 663 | { |
| 664 | /* Apple's open source Darwin code hints that some devices only |
| 665 | * put a proper signature into the LBA mid/high registers, |
| 666 | * So, we only check those. It's sufficient for uniqueness. |
| 667 | */ |
| 668 | |
| 669 | if (((tf->lbam == 0) && (tf->lbah == 0)) || |
| 670 | ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) { |
| 671 | DPRINTK("found ATA device by sig\n"); |
| 672 | return ATA_DEV_ATA; |
| 673 | } |
| 674 | |
| 675 | if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) || |
| 676 | ((tf->lbam == 0x69) && (tf->lbah == 0x96))) { |
| 677 | DPRINTK("found ATAPI device by sig\n"); |
| 678 | return ATA_DEV_ATAPI; |
| 679 | } |
| 680 | |
| 681 | DPRINTK("unknown device\n"); |
| 682 | return ATA_DEV_UNKNOWN; |
| 683 | } |
| 684 | |
| 685 | /** |
| 686 | * ata_dev_try_classify - Parse returned ATA device signature |
| 687 | * @ap: ATA channel to examine |
| 688 | * @device: Device to examine (starting at zero) |
| 689 | * @r_err: Value of error register on completion |
| 690 | * |
| 691 | * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs, |
| 692 | * an ATA/ATAPI-defined set of values is placed in the ATA |
| 693 | * shadow registers, indicating the results of device detection |
| 694 | * and diagnostics. |
| 695 | * |
| 696 | * Select the ATA device, and read the values from the ATA shadow |
| 697 | * registers. Then parse according to the Error register value, |
| 698 | * and the spec-defined values examined by ata_dev_classify(). |
| 699 | * |
| 700 | * LOCKING: |
| 701 | * caller. |
| 702 | * |
| 703 | * RETURNS: |
| 704 | * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE. |
| 705 | */ |
| 706 | |
| 707 | static unsigned int |
| 708 | ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err) |
| 709 | { |
| 710 | struct ata_taskfile tf; |
| 711 | unsigned int class; |
| 712 | u8 err; |
| 713 | |
| 714 | ap->ops->dev_select(ap, device); |
| 715 | |
| 716 | memset(&tf, 0, sizeof(tf)); |
| 717 | |
| 718 | ap->ops->tf_read(ap, &tf); |
| 719 | err = tf.feature; |
| 720 | if (r_err) |
| 721 | *r_err = err; |
| 722 | |
| 723 | /* see if device passed diags: if master then continue and warn later */ |
| 724 | if (err == 0 && device == 0) |
| 725 | /* diagnostic fail : do nothing _YET_ */ |
| 726 | ap->device[device].horkage |= ATA_HORKAGE_DIAGNOSTIC; |
| 727 | else if (err == 1) |
| 728 | /* do nothing */ ; |
| 729 | else if ((device == 0) && (err == 0x81)) |
| 730 | /* do nothing */ ; |
| 731 | else |
| 732 | return ATA_DEV_NONE; |
| 733 | |
| 734 | /* determine if device is ATA or ATAPI */ |
| 735 | class = ata_dev_classify(&tf); |
| 736 | |
| 737 | if (class == ATA_DEV_UNKNOWN) |
| 738 | return ATA_DEV_NONE; |
| 739 | if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0)) |
| 740 | return ATA_DEV_NONE; |
| 741 | return class; |
| 742 | } |
| 743 | |
| 744 | /** |
| 745 | * ata_id_string - Convert IDENTIFY DEVICE page into string |
| 746 | * @id: IDENTIFY DEVICE results we will examine |
| 747 | * @s: string into which data is output |
| 748 | * @ofs: offset into identify device page |
| 749 | * @len: length of string to return. must be an even number. |
| 750 | * |
| 751 | * The strings in the IDENTIFY DEVICE page are broken up into |
| 752 | * 16-bit chunks. Run through the string, and output each |
| 753 | * 8-bit chunk linearly, regardless of platform. |
| 754 | * |
| 755 | * LOCKING: |
| 756 | * caller. |
| 757 | */ |
| 758 | |
| 759 | void ata_id_string(const u16 *id, unsigned char *s, |
| 760 | unsigned int ofs, unsigned int len) |
| 761 | { |
| 762 | unsigned int c; |
| 763 | |
| 764 | while (len > 0) { |
| 765 | c = id[ofs] >> 8; |
| 766 | *s = c; |
| 767 | s++; |
| 768 | |
| 769 | c = id[ofs] & 0xff; |
| 770 | *s = c; |
| 771 | s++; |
| 772 | |
| 773 | ofs++; |
| 774 | len -= 2; |
| 775 | } |
| 776 | } |
| 777 | |
| 778 | /** |
| 779 | * ata_id_c_string - Convert IDENTIFY DEVICE page into C string |
| 780 | * @id: IDENTIFY DEVICE results we will examine |
| 781 | * @s: string into which data is output |
| 782 | * @ofs: offset into identify device page |
| 783 | * @len: length of string to return. must be an odd number. |
| 784 | * |
| 785 | * This function is identical to ata_id_string except that it |
| 786 | * trims trailing spaces and terminates the resulting string with |
| 787 | * null. @len must be actual maximum length (even number) + 1. |
| 788 | * |
| 789 | * LOCKING: |
| 790 | * caller. |
| 791 | */ |
| 792 | void ata_id_c_string(const u16 *id, unsigned char *s, |
| 793 | unsigned int ofs, unsigned int len) |
| 794 | { |
| 795 | unsigned char *p; |
| 796 | |
| 797 | WARN_ON(!(len & 1)); |
| 798 | |
| 799 | ata_id_string(id, s, ofs, len - 1); |
| 800 | |
| 801 | p = s + strnlen(s, len - 1); |
| 802 | while (p > s && p[-1] == ' ') |
| 803 | p--; |
| 804 | *p = '\0'; |
| 805 | } |
| 806 | |
| 807 | static u64 ata_id_n_sectors(const u16 *id) |
| 808 | { |
| 809 | if (ata_id_has_lba(id)) { |
| 810 | if (ata_id_has_lba48(id)) |
| 811 | return ata_id_u64(id, 100); |
| 812 | else |
| 813 | return ata_id_u32(id, 60); |
| 814 | } else { |
| 815 | if (ata_id_current_chs_valid(id)) |
| 816 | return ata_id_u32(id, 57); |
| 817 | else |
| 818 | return id[1] * id[3] * id[6]; |
| 819 | } |
| 820 | } |
| 821 | |
| 822 | /** |
| 823 | * ata_noop_dev_select - Select device 0/1 on ATA bus |
| 824 | * @ap: ATA channel to manipulate |
| 825 | * @device: ATA device (numbered from zero) to select |
| 826 | * |
| 827 | * This function performs no actual function. |
| 828 | * |
| 829 | * May be used as the dev_select() entry in ata_port_operations. |
| 830 | * |
| 831 | * LOCKING: |
| 832 | * caller. |
| 833 | */ |
| 834 | void ata_noop_dev_select (struct ata_port *ap, unsigned int device) |
| 835 | { |
| 836 | } |
| 837 | |
| 838 | |
| 839 | /** |
| 840 | * ata_std_dev_select - Select device 0/1 on ATA bus |
| 841 | * @ap: ATA channel to manipulate |
| 842 | * @device: ATA device (numbered from zero) to select |
| 843 | * |
| 844 | * Use the method defined in the ATA specification to |
| 845 | * make either device 0, or device 1, active on the |
| 846 | * ATA channel. Works with both PIO and MMIO. |
| 847 | * |
| 848 | * May be used as the dev_select() entry in ata_port_operations. |
| 849 | * |
| 850 | * LOCKING: |
| 851 | * caller. |
| 852 | */ |
| 853 | |
| 854 | void ata_std_dev_select (struct ata_port *ap, unsigned int device) |
| 855 | { |
| 856 | u8 tmp; |
| 857 | |
| 858 | if (device == 0) |
| 859 | tmp = ATA_DEVICE_OBS; |
| 860 | else |
| 861 | tmp = ATA_DEVICE_OBS | ATA_DEV1; |
| 862 | |
| 863 | iowrite8(tmp, ap->ioaddr.device_addr); |
| 864 | ata_pause(ap); /* needed; also flushes, for mmio */ |
| 865 | } |
| 866 | |
| 867 | /** |
| 868 | * ata_dev_select - Select device 0/1 on ATA bus |
| 869 | * @ap: ATA channel to manipulate |
| 870 | * @device: ATA device (numbered from zero) to select |
| 871 | * @wait: non-zero to wait for Status register BSY bit to clear |
| 872 | * @can_sleep: non-zero if context allows sleeping |
| 873 | * |
| 874 | * Use the method defined in the ATA specification to |
| 875 | * make either device 0, or device 1, active on the |
| 876 | * ATA channel. |
| 877 | * |
| 878 | * This is a high-level version of ata_std_dev_select(), |
| 879 | * which additionally provides the services of inserting |
| 880 | * the proper pauses and status polling, where needed. |
| 881 | * |
| 882 | * LOCKING: |
| 883 | * caller. |
| 884 | */ |
| 885 | |
| 886 | void ata_dev_select(struct ata_port *ap, unsigned int device, |
| 887 | unsigned int wait, unsigned int can_sleep) |
| 888 | { |
| 889 | if (ata_msg_probe(ap)) |
| 890 | ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, ata%u: " |
| 891 | "device %u, wait %u\n", ap->id, device, wait); |
| 892 | |
| 893 | if (wait) |
| 894 | ata_wait_idle(ap); |
| 895 | |
| 896 | ap->ops->dev_select(ap, device); |
| 897 | |
| 898 | if (wait) { |
| 899 | if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI) |
| 900 | msleep(150); |
| 901 | ata_wait_idle(ap); |
| 902 | } |
| 903 | } |
| 904 | |
| 905 | /** |
| 906 | * ata_dump_id - IDENTIFY DEVICE info debugging output |
| 907 | * @id: IDENTIFY DEVICE page to dump |
| 908 | * |
| 909 | * Dump selected 16-bit words from the given IDENTIFY DEVICE |
| 910 | * page. |
| 911 | * |
| 912 | * LOCKING: |
| 913 | * caller. |
| 914 | */ |
| 915 | |
| 916 | static inline void ata_dump_id(const u16 *id) |
| 917 | { |
| 918 | DPRINTK("49==0x%04x " |
| 919 | "53==0x%04x " |
| 920 | "63==0x%04x " |
| 921 | "64==0x%04x " |
| 922 | "75==0x%04x \n", |
| 923 | id[49], |
| 924 | id[53], |
| 925 | id[63], |
| 926 | id[64], |
| 927 | id[75]); |
| 928 | DPRINTK("80==0x%04x " |
| 929 | "81==0x%04x " |
| 930 | "82==0x%04x " |
| 931 | "83==0x%04x " |
| 932 | "84==0x%04x \n", |
| 933 | id[80], |
| 934 | id[81], |
| 935 | id[82], |
| 936 | id[83], |
| 937 | id[84]); |
| 938 | DPRINTK("88==0x%04x " |
| 939 | "93==0x%04x\n", |
| 940 | id[88], |
| 941 | id[93]); |
| 942 | } |
| 943 | |
| 944 | /** |
| 945 | * ata_id_xfermask - Compute xfermask from the given IDENTIFY data |
| 946 | * @id: IDENTIFY data to compute xfer mask from |
| 947 | * |
| 948 | * Compute the xfermask for this device. This is not as trivial |
| 949 | * as it seems if we must consider early devices correctly. |
| 950 | * |
| 951 | * FIXME: pre IDE drive timing (do we care ?). |
| 952 | * |
| 953 | * LOCKING: |
| 954 | * None. |
| 955 | * |
| 956 | * RETURNS: |
| 957 | * Computed xfermask |
| 958 | */ |
| 959 | static unsigned int ata_id_xfermask(const u16 *id) |
| 960 | { |
| 961 | unsigned int pio_mask, mwdma_mask, udma_mask; |
| 962 | |
| 963 | /* Usual case. Word 53 indicates word 64 is valid */ |
| 964 | if (id[ATA_ID_FIELD_VALID] & (1 << 1)) { |
| 965 | pio_mask = id[ATA_ID_PIO_MODES] & 0x03; |
| 966 | pio_mask <<= 3; |
| 967 | pio_mask |= 0x7; |
| 968 | } else { |
| 969 | /* If word 64 isn't valid then Word 51 high byte holds |
| 970 | * the PIO timing number for the maximum. Turn it into |
| 971 | * a mask. |
| 972 | */ |
| 973 | u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF; |
| 974 | if (mode < 5) /* Valid PIO range */ |
| 975 | pio_mask = (2 << mode) - 1; |
| 976 | else |
| 977 | pio_mask = 1; |
| 978 | |
| 979 | /* But wait.. there's more. Design your standards by |
| 980 | * committee and you too can get a free iordy field to |
| 981 | * process. However its the speeds not the modes that |
| 982 | * are supported... Note drivers using the timing API |
| 983 | * will get this right anyway |
| 984 | */ |
| 985 | } |
| 986 | |
| 987 | mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07; |
| 988 | |
| 989 | if (ata_id_is_cfa(id)) { |
| 990 | /* |
| 991 | * Process compact flash extended modes |
| 992 | */ |
| 993 | int pio = id[163] & 0x7; |
| 994 | int dma = (id[163] >> 3) & 7; |
| 995 | |
| 996 | if (pio) |
| 997 | pio_mask |= (1 << 5); |
| 998 | if (pio > 1) |
| 999 | pio_mask |= (1 << 6); |
| 1000 | if (dma) |
| 1001 | mwdma_mask |= (1 << 3); |
| 1002 | if (dma > 1) |
| 1003 | mwdma_mask |= (1 << 4); |
| 1004 | } |
| 1005 | |
| 1006 | udma_mask = 0; |
| 1007 | if (id[ATA_ID_FIELD_VALID] & (1 << 2)) |
| 1008 | udma_mask = id[ATA_ID_UDMA_MODES] & 0xff; |
| 1009 | |
| 1010 | return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask); |
| 1011 | } |
| 1012 | |
| 1013 | /** |
| 1014 | * ata_port_queue_task - Queue port_task |
| 1015 | * @ap: The ata_port to queue port_task for |
| 1016 | * @fn: workqueue function to be scheduled |
| 1017 | * @data: data for @fn to use |
| 1018 | * @delay: delay time for workqueue function |
| 1019 | * |
| 1020 | * Schedule @fn(@data) for execution after @delay jiffies using |
| 1021 | * port_task. There is one port_task per port and it's the |
| 1022 | * user(low level driver)'s responsibility to make sure that only |
| 1023 | * one task is active at any given time. |
| 1024 | * |
| 1025 | * libata core layer takes care of synchronization between |
| 1026 | * port_task and EH. ata_port_queue_task() may be ignored for EH |
| 1027 | * synchronization. |
| 1028 | * |
| 1029 | * LOCKING: |
| 1030 | * Inherited from caller. |
| 1031 | */ |
| 1032 | void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data, |
| 1033 | unsigned long delay) |
| 1034 | { |
| 1035 | int rc; |
| 1036 | |
| 1037 | if (ap->pflags & ATA_PFLAG_FLUSH_PORT_TASK) |
| 1038 | return; |
| 1039 | |
| 1040 | PREPARE_DELAYED_WORK(&ap->port_task, fn); |
| 1041 | ap->port_task_data = data; |
| 1042 | |
| 1043 | rc = queue_delayed_work(ata_wq, &ap->port_task, delay); |
| 1044 | |
| 1045 | /* rc == 0 means that another user is using port task */ |
| 1046 | WARN_ON(rc == 0); |
| 1047 | } |
| 1048 | |
| 1049 | /** |
| 1050 | * ata_port_flush_task - Flush port_task |
| 1051 | * @ap: The ata_port to flush port_task for |
| 1052 | * |
| 1053 | * After this function completes, port_task is guranteed not to |
| 1054 | * be running or scheduled. |
| 1055 | * |
| 1056 | * LOCKING: |
| 1057 | * Kernel thread context (may sleep) |
| 1058 | */ |
| 1059 | void ata_port_flush_task(struct ata_port *ap) |
| 1060 | { |
| 1061 | unsigned long flags; |
| 1062 | |
| 1063 | DPRINTK("ENTER\n"); |
| 1064 | |
| 1065 | spin_lock_irqsave(ap->lock, flags); |
| 1066 | ap->pflags |= ATA_PFLAG_FLUSH_PORT_TASK; |
| 1067 | spin_unlock_irqrestore(ap->lock, flags); |
| 1068 | |
| 1069 | DPRINTK("flush #1\n"); |
| 1070 | flush_workqueue(ata_wq); |
| 1071 | |
| 1072 | /* |
| 1073 | * At this point, if a task is running, it's guaranteed to see |
| 1074 | * the FLUSH flag; thus, it will never queue pio tasks again. |
| 1075 | * Cancel and flush. |
| 1076 | */ |
| 1077 | if (!cancel_delayed_work(&ap->port_task)) { |
| 1078 | if (ata_msg_ctl(ap)) |
| 1079 | ata_port_printk(ap, KERN_DEBUG, "%s: flush #2\n", |
| 1080 | __FUNCTION__); |
| 1081 | flush_workqueue(ata_wq); |
| 1082 | } |
| 1083 | |
| 1084 | spin_lock_irqsave(ap->lock, flags); |
| 1085 | ap->pflags &= ~ATA_PFLAG_FLUSH_PORT_TASK; |
| 1086 | spin_unlock_irqrestore(ap->lock, flags); |
| 1087 | |
| 1088 | if (ata_msg_ctl(ap)) |
| 1089 | ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__); |
| 1090 | } |
| 1091 | |
| 1092 | static void ata_qc_complete_internal(struct ata_queued_cmd *qc) |
| 1093 | { |
| 1094 | struct completion *waiting = qc->private_data; |
| 1095 | |
| 1096 | complete(waiting); |
| 1097 | } |
| 1098 | |
| 1099 | /** |
| 1100 | * ata_exec_internal_sg - execute libata internal command |
| 1101 | * @dev: Device to which the command is sent |
| 1102 | * @tf: Taskfile registers for the command and the result |
| 1103 | * @cdb: CDB for packet command |
| 1104 | * @dma_dir: Data tranfer direction of the command |
| 1105 | * @sg: sg list for the data buffer of the command |
| 1106 | * @n_elem: Number of sg entries |
| 1107 | * |
| 1108 | * Executes libata internal command with timeout. @tf contains |
| 1109 | * command on entry and result on return. Timeout and error |
| 1110 | * conditions are reported via return value. No recovery action |
| 1111 | * is taken after a command times out. It's caller's duty to |
| 1112 | * clean up after timeout. |
| 1113 | * |
| 1114 | * LOCKING: |
| 1115 | * None. Should be called with kernel context, might sleep. |
| 1116 | * |
| 1117 | * RETURNS: |
| 1118 | * Zero on success, AC_ERR_* mask on failure |
| 1119 | */ |
| 1120 | unsigned ata_exec_internal_sg(struct ata_device *dev, |
| 1121 | struct ata_taskfile *tf, const u8 *cdb, |
| 1122 | int dma_dir, struct scatterlist *sg, |
| 1123 | unsigned int n_elem) |
| 1124 | { |
| 1125 | struct ata_port *ap = dev->ap; |
| 1126 | u8 command = tf->command; |
| 1127 | struct ata_queued_cmd *qc; |
| 1128 | unsigned int tag, preempted_tag; |
| 1129 | u32 preempted_sactive, preempted_qc_active; |
| 1130 | DECLARE_COMPLETION_ONSTACK(wait); |
| 1131 | unsigned long flags; |
| 1132 | unsigned int err_mask; |
| 1133 | int rc; |
| 1134 | |
| 1135 | spin_lock_irqsave(ap->lock, flags); |
| 1136 | |
| 1137 | /* no internal command while frozen */ |
| 1138 | if (ap->pflags & ATA_PFLAG_FROZEN) { |
| 1139 | spin_unlock_irqrestore(ap->lock, flags); |
| 1140 | return AC_ERR_SYSTEM; |
| 1141 | } |
| 1142 | |
| 1143 | /* initialize internal qc */ |
| 1144 | |
| 1145 | /* XXX: Tag 0 is used for drivers with legacy EH as some |
| 1146 | * drivers choke if any other tag is given. This breaks |
| 1147 | * ata_tag_internal() test for those drivers. Don't use new |
| 1148 | * EH stuff without converting to it. |
| 1149 | */ |
| 1150 | if (ap->ops->error_handler) |
| 1151 | tag = ATA_TAG_INTERNAL; |
| 1152 | else |
| 1153 | tag = 0; |
| 1154 | |
| 1155 | if (test_and_set_bit(tag, &ap->qc_allocated)) |
| 1156 | BUG(); |
| 1157 | qc = __ata_qc_from_tag(ap, tag); |
| 1158 | |
| 1159 | qc->tag = tag; |
| 1160 | qc->scsicmd = NULL; |
| 1161 | qc->ap = ap; |
| 1162 | qc->dev = dev; |
| 1163 | ata_qc_reinit(qc); |
| 1164 | |
| 1165 | preempted_tag = ap->active_tag; |
| 1166 | preempted_sactive = ap->sactive; |
| 1167 | preempted_qc_active = ap->qc_active; |
| 1168 | ap->active_tag = ATA_TAG_POISON; |
| 1169 | ap->sactive = 0; |
| 1170 | ap->qc_active = 0; |
| 1171 | |
| 1172 | /* prepare & issue qc */ |
| 1173 | qc->tf = *tf; |
| 1174 | if (cdb) |
| 1175 | memcpy(qc->cdb, cdb, ATAPI_CDB_LEN); |
| 1176 | qc->flags |= ATA_QCFLAG_RESULT_TF; |
| 1177 | qc->dma_dir = dma_dir; |
| 1178 | if (dma_dir != DMA_NONE) { |
| 1179 | unsigned int i, buflen = 0; |
| 1180 | |
| 1181 | for (i = 0; i < n_elem; i++) |
| 1182 | buflen += sg[i].length; |
| 1183 | |
| 1184 | ata_sg_init(qc, sg, n_elem); |
| 1185 | qc->nbytes = buflen; |
| 1186 | } |
| 1187 | |
| 1188 | qc->private_data = &wait; |
| 1189 | qc->complete_fn = ata_qc_complete_internal; |
| 1190 | |
| 1191 | ata_qc_issue(qc); |
| 1192 | |
| 1193 | spin_unlock_irqrestore(ap->lock, flags); |
| 1194 | |
| 1195 | rc = wait_for_completion_timeout(&wait, ata_probe_timeout); |
| 1196 | |
| 1197 | ata_port_flush_task(ap); |
| 1198 | |
| 1199 | if (!rc) { |
| 1200 | spin_lock_irqsave(ap->lock, flags); |
| 1201 | |
| 1202 | /* We're racing with irq here. If we lose, the |
| 1203 | * following test prevents us from completing the qc |
| 1204 | * twice. If we win, the port is frozen and will be |
| 1205 | * cleaned up by ->post_internal_cmd(). |
| 1206 | */ |
| 1207 | if (qc->flags & ATA_QCFLAG_ACTIVE) { |
| 1208 | qc->err_mask |= AC_ERR_TIMEOUT; |
| 1209 | |
| 1210 | if (ap->ops->error_handler) |
| 1211 | ata_port_freeze(ap); |
| 1212 | else |
| 1213 | ata_qc_complete(qc); |
| 1214 | |
| 1215 | if (ata_msg_warn(ap)) |
| 1216 | ata_dev_printk(dev, KERN_WARNING, |
| 1217 | "qc timeout (cmd 0x%x)\n", command); |
| 1218 | } |
| 1219 | |
| 1220 | spin_unlock_irqrestore(ap->lock, flags); |
| 1221 | } |
| 1222 | |
| 1223 | /* do post_internal_cmd */ |
| 1224 | if (ap->ops->post_internal_cmd) |
| 1225 | ap->ops->post_internal_cmd(qc); |
| 1226 | |
| 1227 | if ((qc->flags & ATA_QCFLAG_FAILED) && !qc->err_mask) { |
| 1228 | if (ata_msg_warn(ap)) |
| 1229 | ata_dev_printk(dev, KERN_WARNING, |
| 1230 | "zero err_mask for failed " |
| 1231 | "internal command, assuming AC_ERR_OTHER\n"); |
| 1232 | qc->err_mask |= AC_ERR_OTHER; |
| 1233 | } |
| 1234 | |
| 1235 | /* finish up */ |
| 1236 | spin_lock_irqsave(ap->lock, flags); |
| 1237 | |
| 1238 | *tf = qc->result_tf; |
| 1239 | err_mask = qc->err_mask; |
| 1240 | |
| 1241 | ata_qc_free(qc); |
| 1242 | ap->active_tag = preempted_tag; |
| 1243 | ap->sactive = preempted_sactive; |
| 1244 | ap->qc_active = preempted_qc_active; |
| 1245 | |
| 1246 | /* XXX - Some LLDDs (sata_mv) disable port on command failure. |
| 1247 | * Until those drivers are fixed, we detect the condition |
| 1248 | * here, fail the command with AC_ERR_SYSTEM and reenable the |
| 1249 | * port. |
| 1250 | * |
| 1251 | * Note that this doesn't change any behavior as internal |
| 1252 | * command failure results in disabling the device in the |
| 1253 | * higher layer for LLDDs without new reset/EH callbacks. |
| 1254 | * |
| 1255 | * Kill the following code as soon as those drivers are fixed. |
| 1256 | */ |
| 1257 | if (ap->flags & ATA_FLAG_DISABLED) { |
| 1258 | err_mask |= AC_ERR_SYSTEM; |
| 1259 | ata_port_probe(ap); |
| 1260 | } |
| 1261 | |
| 1262 | spin_unlock_irqrestore(ap->lock, flags); |
| 1263 | |
| 1264 | return err_mask; |
| 1265 | } |
| 1266 | |
| 1267 | /** |
| 1268 | * ata_exec_internal - execute libata internal command |
| 1269 | * @dev: Device to which the command is sent |
| 1270 | * @tf: Taskfile registers for the command and the result |
| 1271 | * @cdb: CDB for packet command |
| 1272 | * @dma_dir: Data tranfer direction of the command |
| 1273 | * @buf: Data buffer of the command |
| 1274 | * @buflen: Length of data buffer |
| 1275 | * |
| 1276 | * Wrapper around ata_exec_internal_sg() which takes simple |
| 1277 | * buffer instead of sg list. |
| 1278 | * |
| 1279 | * LOCKING: |
| 1280 | * None. Should be called with kernel context, might sleep. |
| 1281 | * |
| 1282 | * RETURNS: |
| 1283 | * Zero on success, AC_ERR_* mask on failure |
| 1284 | */ |
| 1285 | unsigned ata_exec_internal(struct ata_device *dev, |
| 1286 | struct ata_taskfile *tf, const u8 *cdb, |
| 1287 | int dma_dir, void *buf, unsigned int buflen) |
| 1288 | { |
| 1289 | struct scatterlist *psg = NULL, sg; |
| 1290 | unsigned int n_elem = 0; |
| 1291 | |
| 1292 | if (dma_dir != DMA_NONE) { |
| 1293 | WARN_ON(!buf); |
| 1294 | sg_init_one(&sg, buf, buflen); |
| 1295 | psg = &sg; |
| 1296 | n_elem++; |
| 1297 | } |
| 1298 | |
| 1299 | return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem); |
| 1300 | } |
| 1301 | |
| 1302 | /** |
| 1303 | * ata_do_simple_cmd - execute simple internal command |
| 1304 | * @dev: Device to which the command is sent |
| 1305 | * @cmd: Opcode to execute |
| 1306 | * |
| 1307 | * Execute a 'simple' command, that only consists of the opcode |
| 1308 | * 'cmd' itself, without filling any other registers |
| 1309 | * |
| 1310 | * LOCKING: |
| 1311 | * Kernel thread context (may sleep). |
| 1312 | * |
| 1313 | * RETURNS: |
| 1314 | * Zero on success, AC_ERR_* mask on failure |
| 1315 | */ |
| 1316 | unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd) |
| 1317 | { |
| 1318 | struct ata_taskfile tf; |
| 1319 | |
| 1320 | ata_tf_init(dev, &tf); |
| 1321 | |
| 1322 | tf.command = cmd; |
| 1323 | tf.flags |= ATA_TFLAG_DEVICE; |
| 1324 | tf.protocol = ATA_PROT_NODATA; |
| 1325 | |
| 1326 | return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0); |
| 1327 | } |
| 1328 | |
| 1329 | /** |
| 1330 | * ata_pio_need_iordy - check if iordy needed |
| 1331 | * @adev: ATA device |
| 1332 | * |
| 1333 | * Check if the current speed of the device requires IORDY. Used |
| 1334 | * by various controllers for chip configuration. |
| 1335 | */ |
| 1336 | |
| 1337 | unsigned int ata_pio_need_iordy(const struct ata_device *adev) |
| 1338 | { |
| 1339 | int pio; |
| 1340 | int speed = adev->pio_mode - XFER_PIO_0; |
| 1341 | |
| 1342 | if (speed < 2) |
| 1343 | return 0; |
| 1344 | if (speed > 2) |
| 1345 | return 1; |
| 1346 | |
| 1347 | /* If we have no drive specific rule, then PIO 2 is non IORDY */ |
| 1348 | |
| 1349 | if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */ |
| 1350 | pio = adev->id[ATA_ID_EIDE_PIO]; |
| 1351 | /* Is the speed faster than the drive allows non IORDY ? */ |
| 1352 | if (pio) { |
| 1353 | /* This is cycle times not frequency - watch the logic! */ |
| 1354 | if (pio > 240) /* PIO2 is 240nS per cycle */ |
| 1355 | return 1; |
| 1356 | return 0; |
| 1357 | } |
| 1358 | } |
| 1359 | return 0; |
| 1360 | } |
| 1361 | |
| 1362 | /** |
| 1363 | * ata_dev_read_id - Read ID data from the specified device |
| 1364 | * @dev: target device |
| 1365 | * @p_class: pointer to class of the target device (may be changed) |
| 1366 | * @flags: ATA_READID_* flags |
| 1367 | * @id: buffer to read IDENTIFY data into |
| 1368 | * |
| 1369 | * Read ID data from the specified device. ATA_CMD_ID_ATA is |
| 1370 | * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI |
| 1371 | * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS |
| 1372 | * for pre-ATA4 drives. |
| 1373 | * |
| 1374 | * LOCKING: |
| 1375 | * Kernel thread context (may sleep) |
| 1376 | * |
| 1377 | * RETURNS: |
| 1378 | * 0 on success, -errno otherwise. |
| 1379 | */ |
| 1380 | int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class, |
| 1381 | unsigned int flags, u16 *id) |
| 1382 | { |
| 1383 | struct ata_port *ap = dev->ap; |
| 1384 | unsigned int class = *p_class; |
| 1385 | struct ata_taskfile tf; |
| 1386 | unsigned int err_mask = 0; |
| 1387 | const char *reason; |
| 1388 | int rc; |
| 1389 | |
| 1390 | if (ata_msg_ctl(ap)) |
| 1391 | ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER, host %u, dev %u\n", |
| 1392 | __FUNCTION__, ap->id, dev->devno); |
| 1393 | |
| 1394 | ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */ |
| 1395 | |
| 1396 | retry: |
| 1397 | ata_tf_init(dev, &tf); |
| 1398 | |
| 1399 | switch (class) { |
| 1400 | case ATA_DEV_ATA: |
| 1401 | tf.command = ATA_CMD_ID_ATA; |
| 1402 | break; |
| 1403 | case ATA_DEV_ATAPI: |
| 1404 | tf.command = ATA_CMD_ID_ATAPI; |
| 1405 | break; |
| 1406 | default: |
| 1407 | rc = -ENODEV; |
| 1408 | reason = "unsupported class"; |
| 1409 | goto err_out; |
| 1410 | } |
| 1411 | |
| 1412 | tf.protocol = ATA_PROT_PIO; |
| 1413 | tf.flags |= ATA_TFLAG_POLLING; /* for polling presence detection */ |
| 1414 | |
| 1415 | err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE, |
| 1416 | id, sizeof(id[0]) * ATA_ID_WORDS); |
| 1417 | if (err_mask) { |
| 1418 | if (err_mask & AC_ERR_NODEV_HINT) { |
| 1419 | DPRINTK("ata%u.%d: NODEV after polling detection\n", |
| 1420 | ap->id, dev->devno); |
| 1421 | return -ENOENT; |
| 1422 | } |
| 1423 | |
| 1424 | rc = -EIO; |
| 1425 | reason = "I/O error"; |
| 1426 | goto err_out; |
| 1427 | } |
| 1428 | |
| 1429 | swap_buf_le16(id, ATA_ID_WORDS); |
| 1430 | |
| 1431 | /* sanity check */ |
| 1432 | rc = -EINVAL; |
| 1433 | reason = "device reports illegal type"; |
| 1434 | |
| 1435 | if (class == ATA_DEV_ATA) { |
| 1436 | if (!ata_id_is_ata(id) && !ata_id_is_cfa(id)) |
| 1437 | goto err_out; |
| 1438 | } else { |
| 1439 | if (ata_id_is_ata(id)) |
| 1440 | goto err_out; |
| 1441 | } |
| 1442 | |
| 1443 | if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) { |
| 1444 | /* |
| 1445 | * The exact sequence expected by certain pre-ATA4 drives is: |
| 1446 | * SRST RESET |
| 1447 | * IDENTIFY |
| 1448 | * INITIALIZE DEVICE PARAMETERS |
| 1449 | * anything else.. |
| 1450 | * Some drives were very specific about that exact sequence. |
| 1451 | */ |
| 1452 | if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) { |
| 1453 | err_mask = ata_dev_init_params(dev, id[3], id[6]); |
| 1454 | if (err_mask) { |
| 1455 | rc = -EIO; |
| 1456 | reason = "INIT_DEV_PARAMS failed"; |
| 1457 | goto err_out; |
| 1458 | } |
| 1459 | |
| 1460 | /* current CHS translation info (id[53-58]) might be |
| 1461 | * changed. reread the identify device info. |
| 1462 | */ |
| 1463 | flags &= ~ATA_READID_POSTRESET; |
| 1464 | goto retry; |
| 1465 | } |
| 1466 | } |
| 1467 | |
| 1468 | *p_class = class; |
| 1469 | |
| 1470 | return 0; |
| 1471 | |
| 1472 | err_out: |
| 1473 | if (ata_msg_warn(ap)) |
| 1474 | ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY " |
| 1475 | "(%s, err_mask=0x%x)\n", reason, err_mask); |
| 1476 | return rc; |
| 1477 | } |
| 1478 | |
| 1479 | static inline u8 ata_dev_knobble(struct ata_device *dev) |
| 1480 | { |
| 1481 | return ((dev->ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id))); |
| 1482 | } |
| 1483 | |
| 1484 | static void ata_dev_config_ncq(struct ata_device *dev, |
| 1485 | char *desc, size_t desc_sz) |
| 1486 | { |
| 1487 | struct ata_port *ap = dev->ap; |
| 1488 | int hdepth = 0, ddepth = ata_id_queue_depth(dev->id); |
| 1489 | |
| 1490 | if (!ata_id_has_ncq(dev->id)) { |
| 1491 | desc[0] = '\0'; |
| 1492 | return; |
| 1493 | } |
| 1494 | if (ata_device_blacklisted(dev) & ATA_HORKAGE_NONCQ) { |
| 1495 | snprintf(desc, desc_sz, "NCQ (not used)"); |
| 1496 | return; |
| 1497 | } |
| 1498 | if (ap->flags & ATA_FLAG_NCQ) { |
| 1499 | hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1); |
| 1500 | dev->flags |= ATA_DFLAG_NCQ; |
| 1501 | } |
| 1502 | |
| 1503 | if (hdepth >= ddepth) |
| 1504 | snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth); |
| 1505 | else |
| 1506 | snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth); |
| 1507 | } |
| 1508 | |
| 1509 | static void ata_set_port_max_cmd_len(struct ata_port *ap) |
| 1510 | { |
| 1511 | int i; |
| 1512 | |
| 1513 | if (ap->scsi_host) { |
| 1514 | unsigned int len = 0; |
| 1515 | |
| 1516 | for (i = 0; i < ATA_MAX_DEVICES; i++) |
| 1517 | len = max(len, ap->device[i].cdb_len); |
| 1518 | |
| 1519 | ap->scsi_host->max_cmd_len = len; |
| 1520 | } |
| 1521 | } |
| 1522 | |
| 1523 | /** |
| 1524 | * ata_dev_configure - Configure the specified ATA/ATAPI device |
| 1525 | * @dev: Target device to configure |
| 1526 | * |
| 1527 | * Configure @dev according to @dev->id. Generic and low-level |
| 1528 | * driver specific fixups are also applied. |
| 1529 | * |
| 1530 | * LOCKING: |
| 1531 | * Kernel thread context (may sleep) |
| 1532 | * |
| 1533 | * RETURNS: |
| 1534 | * 0 on success, -errno otherwise |
| 1535 | */ |
| 1536 | int ata_dev_configure(struct ata_device *dev) |
| 1537 | { |
| 1538 | struct ata_port *ap = dev->ap; |
| 1539 | int print_info = ap->eh_context.i.flags & ATA_EHI_PRINTINFO; |
| 1540 | const u16 *id = dev->id; |
| 1541 | unsigned int xfer_mask; |
| 1542 | char revbuf[7]; /* XYZ-99\0 */ |
| 1543 | char fwrevbuf[ATA_ID_FW_REV_LEN+1]; |
| 1544 | char modelbuf[ATA_ID_PROD_LEN+1]; |
| 1545 | int rc; |
| 1546 | |
| 1547 | if (!ata_dev_enabled(dev) && ata_msg_info(ap)) { |
| 1548 | ata_dev_printk(dev, KERN_INFO, |
| 1549 | "%s: ENTER/EXIT (host %u, dev %u) -- nodev\n", |
| 1550 | __FUNCTION__, ap->id, dev->devno); |
| 1551 | return 0; |
| 1552 | } |
| 1553 | |
| 1554 | if (ata_msg_probe(ap)) |
| 1555 | ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER, host %u, dev %u\n", |
| 1556 | __FUNCTION__, ap->id, dev->devno); |
| 1557 | |
| 1558 | /* print device capabilities */ |
| 1559 | if (ata_msg_probe(ap)) |
| 1560 | ata_dev_printk(dev, KERN_DEBUG, |
| 1561 | "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x " |
| 1562 | "85:%04x 86:%04x 87:%04x 88:%04x\n", |
| 1563 | __FUNCTION__, |
| 1564 | id[49], id[82], id[83], id[84], |
| 1565 | id[85], id[86], id[87], id[88]); |
| 1566 | |
| 1567 | /* initialize to-be-configured parameters */ |
| 1568 | dev->flags &= ~ATA_DFLAG_CFG_MASK; |
| 1569 | dev->max_sectors = 0; |
| 1570 | dev->cdb_len = 0; |
| 1571 | dev->n_sectors = 0; |
| 1572 | dev->cylinders = 0; |
| 1573 | dev->heads = 0; |
| 1574 | dev->sectors = 0; |
| 1575 | |
| 1576 | /* |
| 1577 | * common ATA, ATAPI feature tests |
| 1578 | */ |
| 1579 | |
| 1580 | /* find max transfer mode; for printk only */ |
| 1581 | xfer_mask = ata_id_xfermask(id); |
| 1582 | |
| 1583 | if (ata_msg_probe(ap)) |
| 1584 | ata_dump_id(id); |
| 1585 | |
| 1586 | /* ATA-specific feature tests */ |
| 1587 | if (dev->class == ATA_DEV_ATA) { |
| 1588 | if (ata_id_is_cfa(id)) { |
| 1589 | if (id[162] & 1) /* CPRM may make this media unusable */ |
| 1590 | ata_dev_printk(dev, KERN_WARNING, "ata%u: device %u supports DRM functions and may not be fully accessable.\n", |
| 1591 | ap->id, dev->devno); |
| 1592 | snprintf(revbuf, 7, "CFA"); |
| 1593 | } |
| 1594 | else |
| 1595 | snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id)); |
| 1596 | |
| 1597 | dev->n_sectors = ata_id_n_sectors(id); |
| 1598 | |
| 1599 | /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */ |
| 1600 | ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV_OFS, |
| 1601 | sizeof(fwrevbuf)); |
| 1602 | |
| 1603 | ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD_OFS, |
| 1604 | sizeof(modelbuf)); |
| 1605 | |
| 1606 | if (dev->id[59] & 0x100) |
| 1607 | dev->multi_count = dev->id[59] & 0xff; |
| 1608 | |
| 1609 | if (ata_id_has_lba(id)) { |
| 1610 | const char *lba_desc; |
| 1611 | char ncq_desc[20]; |
| 1612 | |
| 1613 | lba_desc = "LBA"; |
| 1614 | dev->flags |= ATA_DFLAG_LBA; |
| 1615 | if (ata_id_has_lba48(id)) { |
| 1616 | dev->flags |= ATA_DFLAG_LBA48; |
| 1617 | lba_desc = "LBA48"; |
| 1618 | |
| 1619 | if (dev->n_sectors >= (1UL << 28) && |
| 1620 | ata_id_has_flush_ext(id)) |
| 1621 | dev->flags |= ATA_DFLAG_FLUSH_EXT; |
| 1622 | } |
| 1623 | |
| 1624 | /* config NCQ */ |
| 1625 | ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc)); |
| 1626 | |
| 1627 | /* print device info to dmesg */ |
| 1628 | if (ata_msg_drv(ap) && print_info) { |
| 1629 | ata_dev_printk(dev, KERN_INFO, |
| 1630 | "%s: %s, %s, max %s\n", |
| 1631 | revbuf, modelbuf, fwrevbuf, |
| 1632 | ata_mode_string(xfer_mask)); |
| 1633 | ata_dev_printk(dev, KERN_INFO, |
| 1634 | "%Lu sectors, multi %u: %s %s\n", |
| 1635 | (unsigned long long)dev->n_sectors, |
| 1636 | dev->multi_count, lba_desc, ncq_desc); |
| 1637 | } |
| 1638 | } else { |
| 1639 | /* CHS */ |
| 1640 | |
| 1641 | /* Default translation */ |
| 1642 | dev->cylinders = id[1]; |
| 1643 | dev->heads = id[3]; |
| 1644 | dev->sectors = id[6]; |
| 1645 | |
| 1646 | if (ata_id_current_chs_valid(id)) { |
| 1647 | /* Current CHS translation is valid. */ |
| 1648 | dev->cylinders = id[54]; |
| 1649 | dev->heads = id[55]; |
| 1650 | dev->sectors = id[56]; |
| 1651 | } |
| 1652 | |
| 1653 | /* print device info to dmesg */ |
| 1654 | if (ata_msg_drv(ap) && print_info) { |
| 1655 | ata_dev_printk(dev, KERN_INFO, |
| 1656 | "%s: %s, %s, max %s\n", |
| 1657 | revbuf, modelbuf, fwrevbuf, |
| 1658 | ata_mode_string(xfer_mask)); |
| 1659 | ata_dev_printk(dev, KERN_INFO, |
| 1660 | "%Lu sectors, multi %u, CHS %u/%u/%u\n", |
| 1661 | (unsigned long long)dev->n_sectors, |
| 1662 | dev->multi_count, dev->cylinders, |
| 1663 | dev->heads, dev->sectors); |
| 1664 | } |
| 1665 | } |
| 1666 | |
| 1667 | dev->cdb_len = 16; |
| 1668 | } |
| 1669 | |
| 1670 | /* ATAPI-specific feature tests */ |
| 1671 | else if (dev->class == ATA_DEV_ATAPI) { |
| 1672 | char *cdb_intr_string = ""; |
| 1673 | |
| 1674 | rc = atapi_cdb_len(id); |
| 1675 | if ((rc < 12) || (rc > ATAPI_CDB_LEN)) { |
| 1676 | if (ata_msg_warn(ap)) |
| 1677 | ata_dev_printk(dev, KERN_WARNING, |
| 1678 | "unsupported CDB len\n"); |
| 1679 | rc = -EINVAL; |
| 1680 | goto err_out_nosup; |
| 1681 | } |
| 1682 | dev->cdb_len = (unsigned int) rc; |
| 1683 | |
| 1684 | if (ata_id_cdb_intr(dev->id)) { |
| 1685 | dev->flags |= ATA_DFLAG_CDB_INTR; |
| 1686 | cdb_intr_string = ", CDB intr"; |
| 1687 | } |
| 1688 | |
| 1689 | /* print device info to dmesg */ |
| 1690 | if (ata_msg_drv(ap) && print_info) |
| 1691 | ata_dev_printk(dev, KERN_INFO, "ATAPI, max %s%s\n", |
| 1692 | ata_mode_string(xfer_mask), |
| 1693 | cdb_intr_string); |
| 1694 | } |
| 1695 | |
| 1696 | /* determine max_sectors */ |
| 1697 | dev->max_sectors = ATA_MAX_SECTORS; |
| 1698 | if (dev->flags & ATA_DFLAG_LBA48) |
| 1699 | dev->max_sectors = ATA_MAX_SECTORS_LBA48; |
| 1700 | |
| 1701 | if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) { |
| 1702 | /* Let the user know. We don't want to disallow opens for |
| 1703 | rescue purposes, or in case the vendor is just a blithering |
| 1704 | idiot */ |
| 1705 | if (print_info) { |
| 1706 | ata_dev_printk(dev, KERN_WARNING, |
| 1707 | "Drive reports diagnostics failure. This may indicate a drive\n"); |
| 1708 | ata_dev_printk(dev, KERN_WARNING, |
| 1709 | "fault or invalid emulation. Contact drive vendor for information.\n"); |
| 1710 | } |
| 1711 | } |
| 1712 | |
| 1713 | ata_set_port_max_cmd_len(ap); |
| 1714 | |
| 1715 | /* limit bridge transfers to udma5, 200 sectors */ |
| 1716 | if (ata_dev_knobble(dev)) { |
| 1717 | if (ata_msg_drv(ap) && print_info) |
| 1718 | ata_dev_printk(dev, KERN_INFO, |
| 1719 | "applying bridge limits\n"); |
| 1720 | dev->udma_mask &= ATA_UDMA5; |
| 1721 | dev->max_sectors = ATA_MAX_SECTORS; |
| 1722 | } |
| 1723 | |
| 1724 | if (ap->ops->dev_config) |
| 1725 | ap->ops->dev_config(ap, dev); |
| 1726 | |
| 1727 | if (ata_msg_probe(ap)) |
| 1728 | ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n", |
| 1729 | __FUNCTION__, ata_chk_status(ap)); |
| 1730 | return 0; |
| 1731 | |
| 1732 | err_out_nosup: |
| 1733 | if (ata_msg_probe(ap)) |
| 1734 | ata_dev_printk(dev, KERN_DEBUG, |
| 1735 | "%s: EXIT, err\n", __FUNCTION__); |
| 1736 | return rc; |
| 1737 | } |
| 1738 | |
| 1739 | /** |
| 1740 | * ata_bus_probe - Reset and probe ATA bus |
| 1741 | * @ap: Bus to probe |
| 1742 | * |
| 1743 | * Master ATA bus probing function. Initiates a hardware-dependent |
| 1744 | * bus reset, then attempts to identify any devices found on |
| 1745 | * the bus. |
| 1746 | * |
| 1747 | * LOCKING: |
| 1748 | * PCI/etc. bus probe sem. |
| 1749 | * |
| 1750 | * RETURNS: |
| 1751 | * Zero on success, negative errno otherwise. |
| 1752 | */ |
| 1753 | |
| 1754 | int ata_bus_probe(struct ata_port *ap) |
| 1755 | { |
| 1756 | unsigned int classes[ATA_MAX_DEVICES]; |
| 1757 | int tries[ATA_MAX_DEVICES]; |
| 1758 | int i, rc, down_xfermask; |
| 1759 | struct ata_device *dev; |
| 1760 | |
| 1761 | ata_port_probe(ap); |
| 1762 | |
| 1763 | for (i = 0; i < ATA_MAX_DEVICES; i++) |
| 1764 | tries[i] = ATA_PROBE_MAX_TRIES; |
| 1765 | |
| 1766 | retry: |
| 1767 | down_xfermask = 0; |
| 1768 | |
| 1769 | /* reset and determine device classes */ |
| 1770 | ap->ops->phy_reset(ap); |
| 1771 | |
| 1772 | for (i = 0; i < ATA_MAX_DEVICES; i++) { |
| 1773 | dev = &ap->device[i]; |
| 1774 | |
| 1775 | if (!(ap->flags & ATA_FLAG_DISABLED) && |
| 1776 | dev->class != ATA_DEV_UNKNOWN) |
| 1777 | classes[dev->devno] = dev->class; |
| 1778 | else |
| 1779 | classes[dev->devno] = ATA_DEV_NONE; |
| 1780 | |
| 1781 | dev->class = ATA_DEV_UNKNOWN; |
| 1782 | } |
| 1783 | |
| 1784 | ata_port_probe(ap); |
| 1785 | |
| 1786 | /* after the reset the device state is PIO 0 and the controller |
| 1787 | state is undefined. Record the mode */ |
| 1788 | |
| 1789 | for (i = 0; i < ATA_MAX_DEVICES; i++) |
| 1790 | ap->device[i].pio_mode = XFER_PIO_0; |
| 1791 | |
| 1792 | /* read IDENTIFY page and configure devices */ |
| 1793 | for (i = 0; i < ATA_MAX_DEVICES; i++) { |
| 1794 | dev = &ap->device[i]; |
| 1795 | |
| 1796 | if (tries[i]) |
| 1797 | dev->class = classes[i]; |
| 1798 | |
| 1799 | if (!ata_dev_enabled(dev)) |
| 1800 | continue; |
| 1801 | |
| 1802 | rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET, |
| 1803 | dev->id); |
| 1804 | if (rc) |
| 1805 | goto fail; |
| 1806 | |
| 1807 | ap->eh_context.i.flags |= ATA_EHI_PRINTINFO; |
| 1808 | rc = ata_dev_configure(dev); |
| 1809 | ap->eh_context.i.flags &= ~ATA_EHI_PRINTINFO; |
| 1810 | if (rc) |
| 1811 | goto fail; |
| 1812 | } |
| 1813 | |
| 1814 | /* configure transfer mode */ |
| 1815 | rc = ata_set_mode(ap, &dev); |
| 1816 | if (rc) { |
| 1817 | down_xfermask = 1; |
| 1818 | goto fail; |
| 1819 | } |
| 1820 | |
| 1821 | for (i = 0; i < ATA_MAX_DEVICES; i++) |
| 1822 | if (ata_dev_enabled(&ap->device[i])) |
| 1823 | return 0; |
| 1824 | |
| 1825 | /* no device present, disable port */ |
| 1826 | ata_port_disable(ap); |
| 1827 | ap->ops->port_disable(ap); |
| 1828 | return -ENODEV; |
| 1829 | |
| 1830 | fail: |
| 1831 | switch (rc) { |
| 1832 | case -EINVAL: |
| 1833 | case -ENODEV: |
| 1834 | tries[dev->devno] = 0; |
| 1835 | break; |
| 1836 | case -EIO: |
| 1837 | sata_down_spd_limit(ap); |
| 1838 | /* fall through */ |
| 1839 | default: |
| 1840 | tries[dev->devno]--; |
| 1841 | if (down_xfermask && |
| 1842 | ata_down_xfermask_limit(dev, tries[dev->devno] == 1)) |
| 1843 | tries[dev->devno] = 0; |
| 1844 | } |
| 1845 | |
| 1846 | if (!tries[dev->devno]) { |
| 1847 | ata_down_xfermask_limit(dev, 1); |
| 1848 | ata_dev_disable(dev); |
| 1849 | } |
| 1850 | |
| 1851 | goto retry; |
| 1852 | } |
| 1853 | |
| 1854 | /** |
| 1855 | * ata_port_probe - Mark port as enabled |
| 1856 | * @ap: Port for which we indicate enablement |
| 1857 | * |
| 1858 | * Modify @ap data structure such that the system |
| 1859 | * thinks that the entire port is enabled. |
| 1860 | * |
| 1861 | * LOCKING: host lock, or some other form of |
| 1862 | * serialization. |
| 1863 | */ |
| 1864 | |
| 1865 | void ata_port_probe(struct ata_port *ap) |
| 1866 | { |
| 1867 | ap->flags &= ~ATA_FLAG_DISABLED; |
| 1868 | } |
| 1869 | |
| 1870 | /** |
| 1871 | * sata_print_link_status - Print SATA link status |
| 1872 | * @ap: SATA port to printk link status about |
| 1873 | * |
| 1874 | * This function prints link speed and status of a SATA link. |
| 1875 | * |
| 1876 | * LOCKING: |
| 1877 | * None. |
| 1878 | */ |
| 1879 | static void sata_print_link_status(struct ata_port *ap) |
| 1880 | { |
| 1881 | u32 sstatus, scontrol, tmp; |
| 1882 | |
| 1883 | if (sata_scr_read(ap, SCR_STATUS, &sstatus)) |
| 1884 | return; |
| 1885 | sata_scr_read(ap, SCR_CONTROL, &scontrol); |
| 1886 | |
| 1887 | if (ata_port_online(ap)) { |
| 1888 | tmp = (sstatus >> 4) & 0xf; |
| 1889 | ata_port_printk(ap, KERN_INFO, |
| 1890 | "SATA link up %s (SStatus %X SControl %X)\n", |
| 1891 | sata_spd_string(tmp), sstatus, scontrol); |
| 1892 | } else { |
| 1893 | ata_port_printk(ap, KERN_INFO, |
| 1894 | "SATA link down (SStatus %X SControl %X)\n", |
| 1895 | sstatus, scontrol); |
| 1896 | } |
| 1897 | } |
| 1898 | |
| 1899 | /** |
| 1900 | * __sata_phy_reset - Wake/reset a low-level SATA PHY |
| 1901 | * @ap: SATA port associated with target SATA PHY. |
| 1902 | * |
| 1903 | * This function issues commands to standard SATA Sxxx |
| 1904 | * PHY registers, to wake up the phy (and device), and |
| 1905 | * clear any reset condition. |
| 1906 | * |
| 1907 | * LOCKING: |
| 1908 | * PCI/etc. bus probe sem. |
| 1909 | * |
| 1910 | */ |
| 1911 | void __sata_phy_reset(struct ata_port *ap) |
| 1912 | { |
| 1913 | u32 sstatus; |
| 1914 | unsigned long timeout = jiffies + (HZ * 5); |
| 1915 | |
| 1916 | if (ap->flags & ATA_FLAG_SATA_RESET) { |
| 1917 | /* issue phy wake/reset */ |
| 1918 | sata_scr_write_flush(ap, SCR_CONTROL, 0x301); |
| 1919 | /* Couldn't find anything in SATA I/II specs, but |
| 1920 | * AHCI-1.1 10.4.2 says at least 1 ms. */ |
| 1921 | mdelay(1); |
| 1922 | } |
| 1923 | /* phy wake/clear reset */ |
| 1924 | sata_scr_write_flush(ap, SCR_CONTROL, 0x300); |
| 1925 | |
| 1926 | /* wait for phy to become ready, if necessary */ |
| 1927 | do { |
| 1928 | msleep(200); |
| 1929 | sata_scr_read(ap, SCR_STATUS, &sstatus); |
| 1930 | if ((sstatus & 0xf) != 1) |
| 1931 | break; |
| 1932 | } while (time_before(jiffies, timeout)); |
| 1933 | |
| 1934 | /* print link status */ |
| 1935 | sata_print_link_status(ap); |
| 1936 | |
| 1937 | /* TODO: phy layer with polling, timeouts, etc. */ |
| 1938 | if (!ata_port_offline(ap)) |
| 1939 | ata_port_probe(ap); |
| 1940 | else |
| 1941 | ata_port_disable(ap); |
| 1942 | |
| 1943 | if (ap->flags & ATA_FLAG_DISABLED) |
| 1944 | return; |
| 1945 | |
| 1946 | if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) { |
| 1947 | ata_port_disable(ap); |
| 1948 | return; |
| 1949 | } |
| 1950 | |
| 1951 | ap->cbl = ATA_CBL_SATA; |
| 1952 | } |
| 1953 | |
| 1954 | /** |
| 1955 | * sata_phy_reset - Reset SATA bus. |
| 1956 | * @ap: SATA port associated with target SATA PHY. |
| 1957 | * |
| 1958 | * This function resets the SATA bus, and then probes |
| 1959 | * the bus for devices. |
| 1960 | * |
| 1961 | * LOCKING: |
| 1962 | * PCI/etc. bus probe sem. |
| 1963 | * |
| 1964 | */ |
| 1965 | void sata_phy_reset(struct ata_port *ap) |
| 1966 | { |
| 1967 | __sata_phy_reset(ap); |
| 1968 | if (ap->flags & ATA_FLAG_DISABLED) |
| 1969 | return; |
| 1970 | ata_bus_reset(ap); |
| 1971 | } |
| 1972 | |
| 1973 | /** |
| 1974 | * ata_dev_pair - return other device on cable |
| 1975 | * @adev: device |
| 1976 | * |
| 1977 | * Obtain the other device on the same cable, or if none is |
| 1978 | * present NULL is returned |
| 1979 | */ |
| 1980 | |
| 1981 | struct ata_device *ata_dev_pair(struct ata_device *adev) |
| 1982 | { |
| 1983 | struct ata_port *ap = adev->ap; |
| 1984 | struct ata_device *pair = &ap->device[1 - adev->devno]; |
| 1985 | if (!ata_dev_enabled(pair)) |
| 1986 | return NULL; |
| 1987 | return pair; |
| 1988 | } |
| 1989 | |
| 1990 | /** |
| 1991 | * ata_port_disable - Disable port. |
| 1992 | * @ap: Port to be disabled. |
| 1993 | * |
| 1994 | * Modify @ap data structure such that the system |
| 1995 | * thinks that the entire port is disabled, and should |
| 1996 | * never attempt to probe or communicate with devices |
| 1997 | * on this port. |
| 1998 | * |
| 1999 | * LOCKING: host lock, or some other form of |
| 2000 | * serialization. |
| 2001 | */ |
| 2002 | |
| 2003 | void ata_port_disable(struct ata_port *ap) |
| 2004 | { |
| 2005 | ap->device[0].class = ATA_DEV_NONE; |
| 2006 | ap->device[1].class = ATA_DEV_NONE; |
| 2007 | ap->flags |= ATA_FLAG_DISABLED; |
| 2008 | } |
| 2009 | |
| 2010 | /** |
| 2011 | * sata_down_spd_limit - adjust SATA spd limit downward |
| 2012 | * @ap: Port to adjust SATA spd limit for |
| 2013 | * |
| 2014 | * Adjust SATA spd limit of @ap downward. Note that this |
| 2015 | * function only adjusts the limit. The change must be applied |
| 2016 | * using sata_set_spd(). |
| 2017 | * |
| 2018 | * LOCKING: |
| 2019 | * Inherited from caller. |
| 2020 | * |
| 2021 | * RETURNS: |
| 2022 | * 0 on success, negative errno on failure |
| 2023 | */ |
| 2024 | int sata_down_spd_limit(struct ata_port *ap) |
| 2025 | { |
| 2026 | u32 sstatus, spd, mask; |
| 2027 | int rc, highbit; |
| 2028 | |
| 2029 | rc = sata_scr_read(ap, SCR_STATUS, &sstatus); |
| 2030 | if (rc) |
| 2031 | return rc; |
| 2032 | |
| 2033 | mask = ap->sata_spd_limit; |
| 2034 | if (mask <= 1) |
| 2035 | return -EINVAL; |
| 2036 | highbit = fls(mask) - 1; |
| 2037 | mask &= ~(1 << highbit); |
| 2038 | |
| 2039 | spd = (sstatus >> 4) & 0xf; |
| 2040 | if (spd <= 1) |
| 2041 | return -EINVAL; |
| 2042 | spd--; |
| 2043 | mask &= (1 << spd) - 1; |
| 2044 | if (!mask) |
| 2045 | return -EINVAL; |
| 2046 | |
| 2047 | ap->sata_spd_limit = mask; |
| 2048 | |
| 2049 | ata_port_printk(ap, KERN_WARNING, "limiting SATA link speed to %s\n", |
| 2050 | sata_spd_string(fls(mask))); |
| 2051 | |
| 2052 | return 0; |
| 2053 | } |
| 2054 | |
| 2055 | static int __sata_set_spd_needed(struct ata_port *ap, u32 *scontrol) |
| 2056 | { |
| 2057 | u32 spd, limit; |
| 2058 | |
| 2059 | if (ap->sata_spd_limit == UINT_MAX) |
| 2060 | limit = 0; |
| 2061 | else |
| 2062 | limit = fls(ap->sata_spd_limit); |
| 2063 | |
| 2064 | spd = (*scontrol >> 4) & 0xf; |
| 2065 | *scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4); |
| 2066 | |
| 2067 | return spd != limit; |
| 2068 | } |
| 2069 | |
| 2070 | /** |
| 2071 | * sata_set_spd_needed - is SATA spd configuration needed |
| 2072 | * @ap: Port in question |
| 2073 | * |
| 2074 | * Test whether the spd limit in SControl matches |
| 2075 | * @ap->sata_spd_limit. This function is used to determine |
| 2076 | * whether hardreset is necessary to apply SATA spd |
| 2077 | * configuration. |
| 2078 | * |
| 2079 | * LOCKING: |
| 2080 | * Inherited from caller. |
| 2081 | * |
| 2082 | * RETURNS: |
| 2083 | * 1 if SATA spd configuration is needed, 0 otherwise. |
| 2084 | */ |
| 2085 | int sata_set_spd_needed(struct ata_port *ap) |
| 2086 | { |
| 2087 | u32 scontrol; |
| 2088 | |
| 2089 | if (sata_scr_read(ap, SCR_CONTROL, &scontrol)) |
| 2090 | return 0; |
| 2091 | |
| 2092 | return __sata_set_spd_needed(ap, &scontrol); |
| 2093 | } |
| 2094 | |
| 2095 | /** |
| 2096 | * sata_set_spd - set SATA spd according to spd limit |
| 2097 | * @ap: Port to set SATA spd for |
| 2098 | * |
| 2099 | * Set SATA spd of @ap according to sata_spd_limit. |
| 2100 | * |
| 2101 | * LOCKING: |
| 2102 | * Inherited from caller. |
| 2103 | * |
| 2104 | * RETURNS: |
| 2105 | * 0 if spd doesn't need to be changed, 1 if spd has been |
| 2106 | * changed. Negative errno if SCR registers are inaccessible. |
| 2107 | */ |
| 2108 | int sata_set_spd(struct ata_port *ap) |
| 2109 | { |
| 2110 | u32 scontrol; |
| 2111 | int rc; |
| 2112 | |
| 2113 | if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol))) |
| 2114 | return rc; |
| 2115 | |
| 2116 | if (!__sata_set_spd_needed(ap, &scontrol)) |
| 2117 | return 0; |
| 2118 | |
| 2119 | if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol))) |
| 2120 | return rc; |
| 2121 | |
| 2122 | return 1; |
| 2123 | } |
| 2124 | |
| 2125 | /* |
| 2126 | * This mode timing computation functionality is ported over from |
| 2127 | * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik |
| 2128 | */ |
| 2129 | /* |
| 2130 | * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds). |
| 2131 | * These were taken from ATA/ATAPI-6 standard, rev 0a, except |
| 2132 | * for UDMA6, which is currently supported only by Maxtor drives. |
| 2133 | * |
| 2134 | * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0. |
| 2135 | */ |
| 2136 | |
| 2137 | static const struct ata_timing ata_timing[] = { |
| 2138 | |
| 2139 | { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 }, |
| 2140 | { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 }, |
| 2141 | { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 }, |
| 2142 | { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 }, |
| 2143 | |
| 2144 | { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 }, |
| 2145 | { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 }, |
| 2146 | { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 }, |
| 2147 | { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 }, |
| 2148 | { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 }, |
| 2149 | |
| 2150 | /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */ |
| 2151 | |
| 2152 | { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 }, |
| 2153 | { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 }, |
| 2154 | { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 }, |
| 2155 | |
| 2156 | { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 }, |
| 2157 | { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 }, |
| 2158 | { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 }, |
| 2159 | |
| 2160 | { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 }, |
| 2161 | { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 }, |
| 2162 | { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 }, |
| 2163 | { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 }, |
| 2164 | |
| 2165 | { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 }, |
| 2166 | { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 }, |
| 2167 | { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 }, |
| 2168 | |
| 2169 | /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */ |
| 2170 | |
| 2171 | { 0xFF } |
| 2172 | }; |
| 2173 | |
| 2174 | #define ENOUGH(v,unit) (((v)-1)/(unit)+1) |
| 2175 | #define EZ(v,unit) ((v)?ENOUGH(v,unit):0) |
| 2176 | |
| 2177 | static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT) |
| 2178 | { |
| 2179 | q->setup = EZ(t->setup * 1000, T); |
| 2180 | q->act8b = EZ(t->act8b * 1000, T); |
| 2181 | q->rec8b = EZ(t->rec8b * 1000, T); |
| 2182 | q->cyc8b = EZ(t->cyc8b * 1000, T); |
| 2183 | q->active = EZ(t->active * 1000, T); |
| 2184 | q->recover = EZ(t->recover * 1000, T); |
| 2185 | q->cycle = EZ(t->cycle * 1000, T); |
| 2186 | q->udma = EZ(t->udma * 1000, UT); |
| 2187 | } |
| 2188 | |
| 2189 | void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b, |
| 2190 | struct ata_timing *m, unsigned int what) |
| 2191 | { |
| 2192 | if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup); |
| 2193 | if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b); |
| 2194 | if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b); |
| 2195 | if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b); |
| 2196 | if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active); |
| 2197 | if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover); |
| 2198 | if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle); |
| 2199 | if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma); |
| 2200 | } |
| 2201 | |
| 2202 | static const struct ata_timing* ata_timing_find_mode(unsigned short speed) |
| 2203 | { |
| 2204 | const struct ata_timing *t; |
| 2205 | |
| 2206 | for (t = ata_timing; t->mode != speed; t++) |
| 2207 | if (t->mode == 0xFF) |
| 2208 | return NULL; |
| 2209 | return t; |
| 2210 | } |
| 2211 | |
| 2212 | int ata_timing_compute(struct ata_device *adev, unsigned short speed, |
| 2213 | struct ata_timing *t, int T, int UT) |
| 2214 | { |
| 2215 | const struct ata_timing *s; |
| 2216 | struct ata_timing p; |
| 2217 | |
| 2218 | /* |
| 2219 | * Find the mode. |
| 2220 | */ |
| 2221 | |
| 2222 | if (!(s = ata_timing_find_mode(speed))) |
| 2223 | return -EINVAL; |
| 2224 | |
| 2225 | memcpy(t, s, sizeof(*s)); |
| 2226 | |
| 2227 | /* |
| 2228 | * If the drive is an EIDE drive, it can tell us it needs extended |
| 2229 | * PIO/MW_DMA cycle timing. |
| 2230 | */ |
| 2231 | |
| 2232 | if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */ |
| 2233 | memset(&p, 0, sizeof(p)); |
| 2234 | if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) { |
| 2235 | if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO]; |
| 2236 | else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY]; |
| 2237 | } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) { |
| 2238 | p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN]; |
| 2239 | } |
| 2240 | ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B); |
| 2241 | } |
| 2242 | |
| 2243 | /* |
| 2244 | * Convert the timing to bus clock counts. |
| 2245 | */ |
| 2246 | |
| 2247 | ata_timing_quantize(t, t, T, UT); |
| 2248 | |
| 2249 | /* |
| 2250 | * Even in DMA/UDMA modes we still use PIO access for IDENTIFY, |
| 2251 | * S.M.A.R.T * and some other commands. We have to ensure that the |
| 2252 | * DMA cycle timing is slower/equal than the fastest PIO timing. |
| 2253 | */ |
| 2254 | |
| 2255 | if (speed > XFER_PIO_6) { |
| 2256 | ata_timing_compute(adev, adev->pio_mode, &p, T, UT); |
| 2257 | ata_timing_merge(&p, t, t, ATA_TIMING_ALL); |
| 2258 | } |
| 2259 | |
| 2260 | /* |
| 2261 | * Lengthen active & recovery time so that cycle time is correct. |
| 2262 | */ |
| 2263 | |
| 2264 | if (t->act8b + t->rec8b < t->cyc8b) { |
| 2265 | t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2; |
| 2266 | t->rec8b = t->cyc8b - t->act8b; |
| 2267 | } |
| 2268 | |
| 2269 | if (t->active + t->recover < t->cycle) { |
| 2270 | t->active += (t->cycle - (t->active + t->recover)) / 2; |
| 2271 | t->recover = t->cycle - t->active; |
| 2272 | } |
| 2273 | |
| 2274 | return 0; |
| 2275 | } |
| 2276 | |
| 2277 | /** |
| 2278 | * ata_down_xfermask_limit - adjust dev xfer masks downward |
| 2279 | * @dev: Device to adjust xfer masks |
| 2280 | * @force_pio0: Force PIO0 |
| 2281 | * |
| 2282 | * Adjust xfer masks of @dev downward. Note that this function |
| 2283 | * does not apply the change. Invoking ata_set_mode() afterwards |
| 2284 | * will apply the limit. |
| 2285 | * |
| 2286 | * LOCKING: |
| 2287 | * Inherited from caller. |
| 2288 | * |
| 2289 | * RETURNS: |
| 2290 | * 0 on success, negative errno on failure |
| 2291 | */ |
| 2292 | int ata_down_xfermask_limit(struct ata_device *dev, int force_pio0) |
| 2293 | { |
| 2294 | unsigned long xfer_mask; |
| 2295 | int highbit; |
| 2296 | |
| 2297 | xfer_mask = ata_pack_xfermask(dev->pio_mask, dev->mwdma_mask, |
| 2298 | dev->udma_mask); |
| 2299 | |
| 2300 | if (!xfer_mask) |
| 2301 | goto fail; |
| 2302 | /* don't gear down to MWDMA from UDMA, go directly to PIO */ |
| 2303 | if (xfer_mask & ATA_MASK_UDMA) |
| 2304 | xfer_mask &= ~ATA_MASK_MWDMA; |
| 2305 | |
| 2306 | highbit = fls(xfer_mask) - 1; |
| 2307 | xfer_mask &= ~(1 << highbit); |
| 2308 | if (force_pio0) |
| 2309 | xfer_mask &= 1 << ATA_SHIFT_PIO; |
| 2310 | if (!xfer_mask) |
| 2311 | goto fail; |
| 2312 | |
| 2313 | ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask, |
| 2314 | &dev->udma_mask); |
| 2315 | |
| 2316 | ata_dev_printk(dev, KERN_WARNING, "limiting speed to %s\n", |
| 2317 | ata_mode_string(xfer_mask)); |
| 2318 | |
| 2319 | return 0; |
| 2320 | |
| 2321 | fail: |
| 2322 | return -EINVAL; |
| 2323 | } |
| 2324 | |
| 2325 | static int ata_dev_set_mode(struct ata_device *dev) |
| 2326 | { |
| 2327 | struct ata_eh_context *ehc = &dev->ap->eh_context; |
| 2328 | unsigned int err_mask; |
| 2329 | int rc; |
| 2330 | |
| 2331 | dev->flags &= ~ATA_DFLAG_PIO; |
| 2332 | if (dev->xfer_shift == ATA_SHIFT_PIO) |
| 2333 | dev->flags |= ATA_DFLAG_PIO; |
| 2334 | |
| 2335 | err_mask = ata_dev_set_xfermode(dev); |
| 2336 | if (err_mask) { |
| 2337 | ata_dev_printk(dev, KERN_ERR, "failed to set xfermode " |
| 2338 | "(err_mask=0x%x)\n", err_mask); |
| 2339 | return -EIO; |
| 2340 | } |
| 2341 | |
| 2342 | ehc->i.flags |= ATA_EHI_POST_SETMODE; |
| 2343 | rc = ata_dev_revalidate(dev, 0); |
| 2344 | ehc->i.flags &= ~ATA_EHI_POST_SETMODE; |
| 2345 | if (rc) |
| 2346 | return rc; |
| 2347 | |
| 2348 | DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n", |
| 2349 | dev->xfer_shift, (int)dev->xfer_mode); |
| 2350 | |
| 2351 | ata_dev_printk(dev, KERN_INFO, "configured for %s\n", |
| 2352 | ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode))); |
| 2353 | return 0; |
| 2354 | } |
| 2355 | |
| 2356 | /** |
| 2357 | * ata_set_mode - Program timings and issue SET FEATURES - XFER |
| 2358 | * @ap: port on which timings will be programmed |
| 2359 | * @r_failed_dev: out paramter for failed device |
| 2360 | * |
| 2361 | * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If |
| 2362 | * ata_set_mode() fails, pointer to the failing device is |
| 2363 | * returned in @r_failed_dev. |
| 2364 | * |
| 2365 | * LOCKING: |
| 2366 | * PCI/etc. bus probe sem. |
| 2367 | * |
| 2368 | * RETURNS: |
| 2369 | * 0 on success, negative errno otherwise |
| 2370 | */ |
| 2371 | int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev) |
| 2372 | { |
| 2373 | struct ata_device *dev; |
| 2374 | int i, rc = 0, used_dma = 0, found = 0; |
| 2375 | |
| 2376 | /* has private set_mode? */ |
| 2377 | if (ap->ops->set_mode) |
| 2378 | return ap->ops->set_mode(ap, r_failed_dev); |
| 2379 | |
| 2380 | /* step 1: calculate xfer_mask */ |
| 2381 | for (i = 0; i < ATA_MAX_DEVICES; i++) { |
| 2382 | unsigned int pio_mask, dma_mask; |
| 2383 | |
| 2384 | dev = &ap->device[i]; |
| 2385 | |
| 2386 | if (!ata_dev_enabled(dev)) |
| 2387 | continue; |
| 2388 | |
| 2389 | ata_dev_xfermask(dev); |
| 2390 | |
| 2391 | pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0); |
| 2392 | dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask); |
| 2393 | dev->pio_mode = ata_xfer_mask2mode(pio_mask); |
| 2394 | dev->dma_mode = ata_xfer_mask2mode(dma_mask); |
| 2395 | |
| 2396 | found = 1; |
| 2397 | if (dev->dma_mode) |
| 2398 | used_dma = 1; |
| 2399 | } |
| 2400 | if (!found) |
| 2401 | goto out; |
| 2402 | |
| 2403 | /* step 2: always set host PIO timings */ |
| 2404 | for (i = 0; i < ATA_MAX_DEVICES; i++) { |
| 2405 | dev = &ap->device[i]; |
| 2406 | if (!ata_dev_enabled(dev)) |
| 2407 | continue; |
| 2408 | |
| 2409 | if (!dev->pio_mode) { |
| 2410 | ata_dev_printk(dev, KERN_WARNING, "no PIO support\n"); |
| 2411 | rc = -EINVAL; |
| 2412 | goto out; |
| 2413 | } |
| 2414 | |
| 2415 | dev->xfer_mode = dev->pio_mode; |
| 2416 | dev->xfer_shift = ATA_SHIFT_PIO; |
| 2417 | if (ap->ops->set_piomode) |
| 2418 | ap->ops->set_piomode(ap, dev); |
| 2419 | } |
| 2420 | |
| 2421 | /* step 3: set host DMA timings */ |
| 2422 | for (i = 0; i < ATA_MAX_DEVICES; i++) { |
| 2423 | dev = &ap->device[i]; |
| 2424 | |
| 2425 | if (!ata_dev_enabled(dev) || !dev->dma_mode) |
| 2426 | continue; |
| 2427 | |
| 2428 | dev->xfer_mode = dev->dma_mode; |
| 2429 | dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode); |
| 2430 | if (ap->ops->set_dmamode) |
| 2431 | ap->ops->set_dmamode(ap, dev); |
| 2432 | } |
| 2433 | |
| 2434 | /* step 4: update devices' xfer mode */ |
| 2435 | for (i = 0; i < ATA_MAX_DEVICES; i++) { |
| 2436 | dev = &ap->device[i]; |
| 2437 | |
| 2438 | /* don't update suspended devices' xfer mode */ |
| 2439 | if (!ata_dev_ready(dev)) |
| 2440 | continue; |
| 2441 | |
| 2442 | rc = ata_dev_set_mode(dev); |
| 2443 | if (rc) |
| 2444 | goto out; |
| 2445 | } |
| 2446 | |
| 2447 | /* Record simplex status. If we selected DMA then the other |
| 2448 | * host channels are not permitted to do so. |
| 2449 | */ |
| 2450 | if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX)) |
| 2451 | ap->host->simplex_claimed = 1; |
| 2452 | |
| 2453 | /* step5: chip specific finalisation */ |
| 2454 | if (ap->ops->post_set_mode) |
| 2455 | ap->ops->post_set_mode(ap); |
| 2456 | |
| 2457 | out: |
| 2458 | if (rc) |
| 2459 | *r_failed_dev = dev; |
| 2460 | return rc; |
| 2461 | } |
| 2462 | |
| 2463 | /** |
| 2464 | * ata_tf_to_host - issue ATA taskfile to host controller |
| 2465 | * @ap: port to which command is being issued |
| 2466 | * @tf: ATA taskfile register set |
| 2467 | * |
| 2468 | * Issues ATA taskfile register set to ATA host controller, |
| 2469 | * with proper synchronization with interrupt handler and |
| 2470 | * other threads. |
| 2471 | * |
| 2472 | * LOCKING: |
| 2473 | * spin_lock_irqsave(host lock) |
| 2474 | */ |
| 2475 | |
| 2476 | static inline void ata_tf_to_host(struct ata_port *ap, |
| 2477 | const struct ata_taskfile *tf) |
| 2478 | { |
| 2479 | ap->ops->tf_load(ap, tf); |
| 2480 | ap->ops->exec_command(ap, tf); |
| 2481 | } |
| 2482 | |
| 2483 | /** |
| 2484 | * ata_busy_sleep - sleep until BSY clears, or timeout |
| 2485 | * @ap: port containing status register to be polled |
| 2486 | * @tmout_pat: impatience timeout |
| 2487 | * @tmout: overall timeout |
| 2488 | * |
| 2489 | * Sleep until ATA Status register bit BSY clears, |
| 2490 | * or a timeout occurs. |
| 2491 | * |
| 2492 | * LOCKING: |
| 2493 | * Kernel thread context (may sleep). |
| 2494 | * |
| 2495 | * RETURNS: |
| 2496 | * 0 on success, -errno otherwise. |
| 2497 | */ |
| 2498 | int ata_busy_sleep(struct ata_port *ap, |
| 2499 | unsigned long tmout_pat, unsigned long tmout) |
| 2500 | { |
| 2501 | unsigned long timer_start, timeout; |
| 2502 | u8 status; |
| 2503 | |
| 2504 | status = ata_busy_wait(ap, ATA_BUSY, 300); |
| 2505 | timer_start = jiffies; |
| 2506 | timeout = timer_start + tmout_pat; |
| 2507 | while (status != 0xff && (status & ATA_BUSY) && |
| 2508 | time_before(jiffies, timeout)) { |
| 2509 | msleep(50); |
| 2510 | status = ata_busy_wait(ap, ATA_BUSY, 3); |
| 2511 | } |
| 2512 | |
| 2513 | if (status != 0xff && (status & ATA_BUSY)) |
| 2514 | ata_port_printk(ap, KERN_WARNING, |
| 2515 | "port is slow to respond, please be patient " |
| 2516 | "(Status 0x%x)\n", status); |
| 2517 | |
| 2518 | timeout = timer_start + tmout; |
| 2519 | while (status != 0xff && (status & ATA_BUSY) && |
| 2520 | time_before(jiffies, timeout)) { |
| 2521 | msleep(50); |
| 2522 | status = ata_chk_status(ap); |
| 2523 | } |
| 2524 | |
| 2525 | if (status == 0xff) |
| 2526 | return -ENODEV; |
| 2527 | |
| 2528 | if (status & ATA_BUSY) { |
| 2529 | ata_port_printk(ap, KERN_ERR, "port failed to respond " |
| 2530 | "(%lu secs, Status 0x%x)\n", |
| 2531 | tmout / HZ, status); |
| 2532 | return -EBUSY; |
| 2533 | } |
| 2534 | |
| 2535 | return 0; |
| 2536 | } |
| 2537 | |
| 2538 | static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask) |
| 2539 | { |
| 2540 | struct ata_ioports *ioaddr = &ap->ioaddr; |
| 2541 | unsigned int dev0 = devmask & (1 << 0); |
| 2542 | unsigned int dev1 = devmask & (1 << 1); |
| 2543 | unsigned long timeout; |
| 2544 | |
| 2545 | /* if device 0 was found in ata_devchk, wait for its |
| 2546 | * BSY bit to clear |
| 2547 | */ |
| 2548 | if (dev0) |
| 2549 | ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT); |
| 2550 | |
| 2551 | /* if device 1 was found in ata_devchk, wait for |
| 2552 | * register access, then wait for BSY to clear |
| 2553 | */ |
| 2554 | timeout = jiffies + ATA_TMOUT_BOOT; |
| 2555 | while (dev1) { |
| 2556 | u8 nsect, lbal; |
| 2557 | |
| 2558 | ap->ops->dev_select(ap, 1); |
| 2559 | nsect = ioread8(ioaddr->nsect_addr); |
| 2560 | lbal = ioread8(ioaddr->lbal_addr); |
| 2561 | if ((nsect == 1) && (lbal == 1)) |
| 2562 | break; |
| 2563 | if (time_after(jiffies, timeout)) { |
| 2564 | dev1 = 0; |
| 2565 | break; |
| 2566 | } |
| 2567 | msleep(50); /* give drive a breather */ |
| 2568 | } |
| 2569 | if (dev1) |
| 2570 | ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT); |
| 2571 | |
| 2572 | /* is all this really necessary? */ |
| 2573 | ap->ops->dev_select(ap, 0); |
| 2574 | if (dev1) |
| 2575 | ap->ops->dev_select(ap, 1); |
| 2576 | if (dev0) |
| 2577 | ap->ops->dev_select(ap, 0); |
| 2578 | } |
| 2579 | |
| 2580 | static unsigned int ata_bus_softreset(struct ata_port *ap, |
| 2581 | unsigned int devmask) |
| 2582 | { |
| 2583 | struct ata_ioports *ioaddr = &ap->ioaddr; |
| 2584 | |
| 2585 | DPRINTK("ata%u: bus reset via SRST\n", ap->id); |
| 2586 | |
| 2587 | /* software reset. causes dev0 to be selected */ |
| 2588 | iowrite8(ap->ctl, ioaddr->ctl_addr); |
| 2589 | udelay(20); /* FIXME: flush */ |
| 2590 | iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr); |
| 2591 | udelay(20); /* FIXME: flush */ |
| 2592 | iowrite8(ap->ctl, ioaddr->ctl_addr); |
| 2593 | |
| 2594 | /* spec mandates ">= 2ms" before checking status. |
| 2595 | * We wait 150ms, because that was the magic delay used for |
| 2596 | * ATAPI devices in Hale Landis's ATADRVR, for the period of time |
| 2597 | * between when the ATA command register is written, and then |
| 2598 | * status is checked. Because waiting for "a while" before |
| 2599 | * checking status is fine, post SRST, we perform this magic |
| 2600 | * delay here as well. |
| 2601 | * |
| 2602 | * Old drivers/ide uses the 2mS rule and then waits for ready |
| 2603 | */ |
| 2604 | msleep(150); |
| 2605 | |
| 2606 | /* Before we perform post reset processing we want to see if |
| 2607 | * the bus shows 0xFF because the odd clown forgets the D7 |
| 2608 | * pulldown resistor. |
| 2609 | */ |
| 2610 | if (ata_check_status(ap) == 0xFF) |
| 2611 | return 0; |
| 2612 | |
| 2613 | ata_bus_post_reset(ap, devmask); |
| 2614 | |
| 2615 | return 0; |
| 2616 | } |
| 2617 | |
| 2618 | /** |
| 2619 | * ata_bus_reset - reset host port and associated ATA channel |
| 2620 | * @ap: port to reset |
| 2621 | * |
| 2622 | * This is typically the first time we actually start issuing |
| 2623 | * commands to the ATA channel. We wait for BSY to clear, then |
| 2624 | * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its |
| 2625 | * result. Determine what devices, if any, are on the channel |
| 2626 | * by looking at the device 0/1 error register. Look at the signature |
| 2627 | * stored in each device's taskfile registers, to determine if |
| 2628 | * the device is ATA or ATAPI. |
| 2629 | * |
| 2630 | * LOCKING: |
| 2631 | * PCI/etc. bus probe sem. |
| 2632 | * Obtains host lock. |
| 2633 | * |
| 2634 | * SIDE EFFECTS: |
| 2635 | * Sets ATA_FLAG_DISABLED if bus reset fails. |
| 2636 | */ |
| 2637 | |
| 2638 | void ata_bus_reset(struct ata_port *ap) |
| 2639 | { |
| 2640 | struct ata_ioports *ioaddr = &ap->ioaddr; |
| 2641 | unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS; |
| 2642 | u8 err; |
| 2643 | unsigned int dev0, dev1 = 0, devmask = 0; |
| 2644 | |
| 2645 | DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no); |
| 2646 | |
| 2647 | /* determine if device 0/1 are present */ |
| 2648 | if (ap->flags & ATA_FLAG_SATA_RESET) |
| 2649 | dev0 = 1; |
| 2650 | else { |
| 2651 | dev0 = ata_devchk(ap, 0); |
| 2652 | if (slave_possible) |
| 2653 | dev1 = ata_devchk(ap, 1); |
| 2654 | } |
| 2655 | |
| 2656 | if (dev0) |
| 2657 | devmask |= (1 << 0); |
| 2658 | if (dev1) |
| 2659 | devmask |= (1 << 1); |
| 2660 | |
| 2661 | /* select device 0 again */ |
| 2662 | ap->ops->dev_select(ap, 0); |
| 2663 | |
| 2664 | /* issue bus reset */ |
| 2665 | if (ap->flags & ATA_FLAG_SRST) |
| 2666 | if (ata_bus_softreset(ap, devmask)) |
| 2667 | goto err_out; |
| 2668 | |
| 2669 | /* |
| 2670 | * determine by signature whether we have ATA or ATAPI devices |
| 2671 | */ |
| 2672 | ap->device[0].class = ata_dev_try_classify(ap, 0, &err); |
| 2673 | if ((slave_possible) && (err != 0x81)) |
| 2674 | ap->device[1].class = ata_dev_try_classify(ap, 1, &err); |
| 2675 | |
| 2676 | /* re-enable interrupts */ |
| 2677 | ap->ops->irq_on(ap); |
| 2678 | |
| 2679 | /* is double-select really necessary? */ |
| 2680 | if (ap->device[1].class != ATA_DEV_NONE) |
| 2681 | ap->ops->dev_select(ap, 1); |
| 2682 | if (ap->device[0].class != ATA_DEV_NONE) |
| 2683 | ap->ops->dev_select(ap, 0); |
| 2684 | |
| 2685 | /* if no devices were detected, disable this port */ |
| 2686 | if ((ap->device[0].class == ATA_DEV_NONE) && |
| 2687 | (ap->device[1].class == ATA_DEV_NONE)) |
| 2688 | goto err_out; |
| 2689 | |
| 2690 | if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) { |
| 2691 | /* set up device control for ATA_FLAG_SATA_RESET */ |
| 2692 | iowrite8(ap->ctl, ioaddr->ctl_addr); |
| 2693 | } |
| 2694 | |
| 2695 | DPRINTK("EXIT\n"); |
| 2696 | return; |
| 2697 | |
| 2698 | err_out: |
| 2699 | ata_port_printk(ap, KERN_ERR, "disabling port\n"); |
| 2700 | ap->ops->port_disable(ap); |
| 2701 | |
| 2702 | DPRINTK("EXIT\n"); |
| 2703 | } |
| 2704 | |
| 2705 | /** |
| 2706 | * sata_phy_debounce - debounce SATA phy status |
| 2707 | * @ap: ATA port to debounce SATA phy status for |
| 2708 | * @params: timing parameters { interval, duratinon, timeout } in msec |
| 2709 | * |
| 2710 | * Make sure SStatus of @ap reaches stable state, determined by |
| 2711 | * holding the same value where DET is not 1 for @duration polled |
| 2712 | * every @interval, before @timeout. Timeout constraints the |
| 2713 | * beginning of the stable state. Because, after hot unplugging, |
| 2714 | * DET gets stuck at 1 on some controllers, this functions waits |
| 2715 | * until timeout then returns 0 if DET is stable at 1. |
| 2716 | * |
| 2717 | * LOCKING: |
| 2718 | * Kernel thread context (may sleep) |
| 2719 | * |
| 2720 | * RETURNS: |
| 2721 | * 0 on success, -errno on failure. |
| 2722 | */ |
| 2723 | int sata_phy_debounce(struct ata_port *ap, const unsigned long *params) |
| 2724 | { |
| 2725 | unsigned long interval_msec = params[0]; |
| 2726 | unsigned long duration = params[1] * HZ / 1000; |
| 2727 | unsigned long timeout = jiffies + params[2] * HZ / 1000; |
| 2728 | unsigned long last_jiffies; |
| 2729 | u32 last, cur; |
| 2730 | int rc; |
| 2731 | |
| 2732 | if ((rc = sata_scr_read(ap, SCR_STATUS, &cur))) |
| 2733 | return rc; |
| 2734 | cur &= 0xf; |
| 2735 | |
| 2736 | last = cur; |
| 2737 | last_jiffies = jiffies; |
| 2738 | |
| 2739 | while (1) { |
| 2740 | msleep(interval_msec); |
| 2741 | if ((rc = sata_scr_read(ap, SCR_STATUS, &cur))) |
| 2742 | return rc; |
| 2743 | cur &= 0xf; |
| 2744 | |
| 2745 | /* DET stable? */ |
| 2746 | if (cur == last) { |
| 2747 | if (cur == 1 && time_before(jiffies, timeout)) |
| 2748 | continue; |
| 2749 | if (time_after(jiffies, last_jiffies + duration)) |
| 2750 | return 0; |
| 2751 | continue; |
| 2752 | } |
| 2753 | |
| 2754 | /* unstable, start over */ |
| 2755 | last = cur; |
| 2756 | last_jiffies = jiffies; |
| 2757 | |
| 2758 | /* check timeout */ |
| 2759 | if (time_after(jiffies, timeout)) |
| 2760 | return -EBUSY; |
| 2761 | } |
| 2762 | } |
| 2763 | |
| 2764 | /** |
| 2765 | * sata_phy_resume - resume SATA phy |
| 2766 | * @ap: ATA port to resume SATA phy for |
| 2767 | * @params: timing parameters { interval, duratinon, timeout } in msec |
| 2768 | * |
| 2769 | * Resume SATA phy of @ap and debounce it. |
| 2770 | * |
| 2771 | * LOCKING: |
| 2772 | * Kernel thread context (may sleep) |
| 2773 | * |
| 2774 | * RETURNS: |
| 2775 | * 0 on success, -errno on failure. |
| 2776 | */ |
| 2777 | int sata_phy_resume(struct ata_port *ap, const unsigned long *params) |
| 2778 | { |
| 2779 | u32 scontrol; |
| 2780 | int rc; |
| 2781 | |
| 2782 | if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol))) |
| 2783 | return rc; |
| 2784 | |
| 2785 | scontrol = (scontrol & 0x0f0) | 0x300; |
| 2786 | |
| 2787 | if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol))) |
| 2788 | return rc; |
| 2789 | |
| 2790 | /* Some PHYs react badly if SStatus is pounded immediately |
| 2791 | * after resuming. Delay 200ms before debouncing. |
| 2792 | */ |
| 2793 | msleep(200); |
| 2794 | |
| 2795 | return sata_phy_debounce(ap, params); |
| 2796 | } |
| 2797 | |
| 2798 | static void ata_wait_spinup(struct ata_port *ap) |
| 2799 | { |
| 2800 | struct ata_eh_context *ehc = &ap->eh_context; |
| 2801 | unsigned long end, secs; |
| 2802 | int rc; |
| 2803 | |
| 2804 | /* first, debounce phy if SATA */ |
| 2805 | if (ap->cbl == ATA_CBL_SATA) { |
| 2806 | rc = sata_phy_debounce(ap, sata_deb_timing_hotplug); |
| 2807 | |
| 2808 | /* if debounced successfully and offline, no need to wait */ |
| 2809 | if ((rc == 0 || rc == -EOPNOTSUPP) && ata_port_offline(ap)) |
| 2810 | return; |
| 2811 | } |
| 2812 | |
| 2813 | /* okay, let's give the drive time to spin up */ |
| 2814 | end = ehc->i.hotplug_timestamp + ATA_SPINUP_WAIT * HZ / 1000; |
| 2815 | secs = ((end - jiffies) + HZ - 1) / HZ; |
| 2816 | |
| 2817 | if (time_after(jiffies, end)) |
| 2818 | return; |
| 2819 | |
| 2820 | if (secs > 5) |
| 2821 | ata_port_printk(ap, KERN_INFO, "waiting for device to spin up " |
| 2822 | "(%lu secs)\n", secs); |
| 2823 | |
| 2824 | schedule_timeout_uninterruptible(end - jiffies); |
| 2825 | } |
| 2826 | |
| 2827 | /** |
| 2828 | * ata_std_prereset - prepare for reset |
| 2829 | * @ap: ATA port to be reset |
| 2830 | * |
| 2831 | * @ap is about to be reset. Initialize it. |
| 2832 | * |
| 2833 | * LOCKING: |
| 2834 | * Kernel thread context (may sleep) |
| 2835 | * |
| 2836 | * RETURNS: |
| 2837 | * 0 on success, -errno otherwise. |
| 2838 | */ |
| 2839 | int ata_std_prereset(struct ata_port *ap) |
| 2840 | { |
| 2841 | struct ata_eh_context *ehc = &ap->eh_context; |
| 2842 | const unsigned long *timing = sata_ehc_deb_timing(ehc); |
| 2843 | int rc; |
| 2844 | |
| 2845 | /* handle link resume & hotplug spinup */ |
| 2846 | if ((ehc->i.flags & ATA_EHI_RESUME_LINK) && |
| 2847 | (ap->flags & ATA_FLAG_HRST_TO_RESUME)) |
| 2848 | ehc->i.action |= ATA_EH_HARDRESET; |
| 2849 | |
| 2850 | if ((ehc->i.flags & ATA_EHI_HOTPLUGGED) && |
| 2851 | (ap->flags & ATA_FLAG_SKIP_D2H_BSY)) |
| 2852 | ata_wait_spinup(ap); |
| 2853 | |
| 2854 | /* if we're about to do hardreset, nothing more to do */ |
| 2855 | if (ehc->i.action & ATA_EH_HARDRESET) |
| 2856 | return 0; |
| 2857 | |
| 2858 | /* if SATA, resume phy */ |
| 2859 | if (ap->cbl == ATA_CBL_SATA) { |
| 2860 | rc = sata_phy_resume(ap, timing); |
| 2861 | if (rc && rc != -EOPNOTSUPP) { |
| 2862 | /* phy resume failed */ |
| 2863 | ata_port_printk(ap, KERN_WARNING, "failed to resume " |
| 2864 | "link for reset (errno=%d)\n", rc); |
| 2865 | return rc; |
| 2866 | } |
| 2867 | } |
| 2868 | |
| 2869 | /* Wait for !BSY if the controller can wait for the first D2H |
| 2870 | * Reg FIS and we don't know that no device is attached. |
| 2871 | */ |
| 2872 | if (!(ap->flags & ATA_FLAG_SKIP_D2H_BSY) && !ata_port_offline(ap)) |
| 2873 | ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT); |
| 2874 | |
| 2875 | return 0; |
| 2876 | } |
| 2877 | |
| 2878 | /** |
| 2879 | * ata_std_softreset - reset host port via ATA SRST |
| 2880 | * @ap: port to reset |
| 2881 | * @classes: resulting classes of attached devices |
| 2882 | * |
| 2883 | * Reset host port using ATA SRST. |
| 2884 | * |
| 2885 | * LOCKING: |
| 2886 | * Kernel thread context (may sleep) |
| 2887 | * |
| 2888 | * RETURNS: |
| 2889 | * 0 on success, -errno otherwise. |
| 2890 | */ |
| 2891 | int ata_std_softreset(struct ata_port *ap, unsigned int *classes) |
| 2892 | { |
| 2893 | unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS; |
| 2894 | unsigned int devmask = 0, err_mask; |
| 2895 | u8 err; |
| 2896 | |
| 2897 | DPRINTK("ENTER\n"); |
| 2898 | |
| 2899 | if (ata_port_offline(ap)) { |
| 2900 | classes[0] = ATA_DEV_NONE; |
| 2901 | goto out; |
| 2902 | } |
| 2903 | |
| 2904 | /* determine if device 0/1 are present */ |
| 2905 | if (ata_devchk(ap, 0)) |
| 2906 | devmask |= (1 << 0); |
| 2907 | if (slave_possible && ata_devchk(ap, 1)) |
| 2908 | devmask |= (1 << 1); |
| 2909 | |
| 2910 | /* select device 0 again */ |
| 2911 | ap->ops->dev_select(ap, 0); |
| 2912 | |
| 2913 | /* issue bus reset */ |
| 2914 | DPRINTK("about to softreset, devmask=%x\n", devmask); |
| 2915 | err_mask = ata_bus_softreset(ap, devmask); |
| 2916 | if (err_mask) { |
| 2917 | ata_port_printk(ap, KERN_ERR, "SRST failed (err_mask=0x%x)\n", |
| 2918 | err_mask); |
| 2919 | return -EIO; |
| 2920 | } |
| 2921 | |
| 2922 | /* determine by signature whether we have ATA or ATAPI devices */ |
| 2923 | classes[0] = ata_dev_try_classify(ap, 0, &err); |
| 2924 | if (slave_possible && err != 0x81) |
| 2925 | classes[1] = ata_dev_try_classify(ap, 1, &err); |
| 2926 | |
| 2927 | out: |
| 2928 | DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]); |
| 2929 | return 0; |
| 2930 | } |
| 2931 | |
| 2932 | /** |
| 2933 | * sata_port_hardreset - reset port via SATA phy reset |
| 2934 | * @ap: port to reset |
| 2935 | * @timing: timing parameters { interval, duratinon, timeout } in msec |
| 2936 | * |
| 2937 | * SATA phy-reset host port using DET bits of SControl register. |
| 2938 | * |
| 2939 | * LOCKING: |
| 2940 | * Kernel thread context (may sleep) |
| 2941 | * |
| 2942 | * RETURNS: |
| 2943 | * 0 on success, -errno otherwise. |
| 2944 | */ |
| 2945 | int sata_port_hardreset(struct ata_port *ap, const unsigned long *timing) |
| 2946 | { |
| 2947 | u32 scontrol; |
| 2948 | int rc; |
| 2949 | |
| 2950 | DPRINTK("ENTER\n"); |
| 2951 | |
| 2952 | if (sata_set_spd_needed(ap)) { |
| 2953 | /* SATA spec says nothing about how to reconfigure |
| 2954 | * spd. To be on the safe side, turn off phy during |
| 2955 | * reconfiguration. This works for at least ICH7 AHCI |
| 2956 | * and Sil3124. |
| 2957 | */ |
| 2958 | if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol))) |
| 2959 | goto out; |
| 2960 | |
| 2961 | scontrol = (scontrol & 0x0f0) | 0x304; |
| 2962 | |
| 2963 | if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol))) |
| 2964 | goto out; |
| 2965 | |
| 2966 | sata_set_spd(ap); |
| 2967 | } |
| 2968 | |
| 2969 | /* issue phy wake/reset */ |
| 2970 | if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol))) |
| 2971 | goto out; |
| 2972 | |
| 2973 | scontrol = (scontrol & 0x0f0) | 0x301; |
| 2974 | |
| 2975 | if ((rc = sata_scr_write_flush(ap, SCR_CONTROL, scontrol))) |
| 2976 | goto out; |
| 2977 | |
| 2978 | /* Couldn't find anything in SATA I/II specs, but AHCI-1.1 |
| 2979 | * 10.4.2 says at least 1 ms. |
| 2980 | */ |
| 2981 | msleep(1); |
| 2982 | |
| 2983 | /* bring phy back */ |
| 2984 | rc = sata_phy_resume(ap, timing); |
| 2985 | out: |
| 2986 | DPRINTK("EXIT, rc=%d\n", rc); |
| 2987 | return rc; |
| 2988 | } |
| 2989 | |
| 2990 | /** |
| 2991 | * sata_std_hardreset - reset host port via SATA phy reset |
| 2992 | * @ap: port to reset |
| 2993 | * @class: resulting class of attached device |
| 2994 | * |
| 2995 | * SATA phy-reset host port using DET bits of SControl register, |
| 2996 | * wait for !BSY and classify the attached device. |
| 2997 | * |
| 2998 | * LOCKING: |
| 2999 | * Kernel thread context (may sleep) |
| 3000 | * |
| 3001 | * RETURNS: |
| 3002 | * 0 on success, -errno otherwise. |
| 3003 | */ |
| 3004 | int sata_std_hardreset(struct ata_port *ap, unsigned int *class) |
| 3005 | { |
| 3006 | const unsigned long *timing = sata_ehc_deb_timing(&ap->eh_context); |
| 3007 | int rc; |
| 3008 | |
| 3009 | DPRINTK("ENTER\n"); |
| 3010 | |
| 3011 | /* do hardreset */ |
| 3012 | rc = sata_port_hardreset(ap, timing); |
| 3013 | if (rc) { |
| 3014 | ata_port_printk(ap, KERN_ERR, |
| 3015 | "COMRESET failed (errno=%d)\n", rc); |
| 3016 | return rc; |
| 3017 | } |
| 3018 | |
| 3019 | /* TODO: phy layer with polling, timeouts, etc. */ |
| 3020 | if (ata_port_offline(ap)) { |
| 3021 | *class = ATA_DEV_NONE; |
| 3022 | DPRINTK("EXIT, link offline\n"); |
| 3023 | return 0; |
| 3024 | } |
| 3025 | |
| 3026 | /* wait a while before checking status, see SRST for more info */ |
| 3027 | msleep(150); |
| 3028 | |
| 3029 | if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) { |
| 3030 | ata_port_printk(ap, KERN_ERR, |
| 3031 | "COMRESET failed (device not ready)\n"); |
| 3032 | return -EIO; |
| 3033 | } |
| 3034 | |
| 3035 | ap->ops->dev_select(ap, 0); /* probably unnecessary */ |
| 3036 | |
| 3037 | *class = ata_dev_try_classify(ap, 0, NULL); |
| 3038 | |
| 3039 | DPRINTK("EXIT, class=%u\n", *class); |
| 3040 | return 0; |
| 3041 | } |
| 3042 | |
| 3043 | /** |
| 3044 | * ata_std_postreset - standard postreset callback |
| 3045 | * @ap: the target ata_port |
| 3046 | * @classes: classes of attached devices |
| 3047 | * |
| 3048 | * This function is invoked after a successful reset. Note that |
| 3049 | * the device might have been reset more than once using |
| 3050 | * different reset methods before postreset is invoked. |
| 3051 | * |
| 3052 | * LOCKING: |
| 3053 | * Kernel thread context (may sleep) |
| 3054 | */ |
| 3055 | void ata_std_postreset(struct ata_port *ap, unsigned int *classes) |
| 3056 | { |
| 3057 | u32 serror; |
| 3058 | |
| 3059 | DPRINTK("ENTER\n"); |
| 3060 | |
| 3061 | /* print link status */ |
| 3062 | sata_print_link_status(ap); |
| 3063 | |
| 3064 | /* clear SError */ |
| 3065 | if (sata_scr_read(ap, SCR_ERROR, &serror) == 0) |
| 3066 | sata_scr_write(ap, SCR_ERROR, serror); |
| 3067 | |
| 3068 | /* re-enable interrupts */ |
| 3069 | if (!ap->ops->error_handler) |
| 3070 | ap->ops->irq_on(ap); |
| 3071 | |
| 3072 | /* is double-select really necessary? */ |
| 3073 | if (classes[0] != ATA_DEV_NONE) |
| 3074 | ap->ops->dev_select(ap, 1); |
| 3075 | if (classes[1] != ATA_DEV_NONE) |
| 3076 | ap->ops->dev_select(ap, 0); |
| 3077 | |
| 3078 | /* bail out if no device is present */ |
| 3079 | if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) { |
| 3080 | DPRINTK("EXIT, no device\n"); |
| 3081 | return; |
| 3082 | } |
| 3083 | |
| 3084 | /* set up device control */ |
| 3085 | if (ap->ioaddr.ctl_addr) |
| 3086 | iowrite8(ap->ctl, ap->ioaddr.ctl_addr); |
| 3087 | |
| 3088 | DPRINTK("EXIT\n"); |
| 3089 | } |
| 3090 | |
| 3091 | /** |
| 3092 | * ata_dev_same_device - Determine whether new ID matches configured device |
| 3093 | * @dev: device to compare against |
| 3094 | * @new_class: class of the new device |
| 3095 | * @new_id: IDENTIFY page of the new device |
| 3096 | * |
| 3097 | * Compare @new_class and @new_id against @dev and determine |
| 3098 | * whether @dev is the device indicated by @new_class and |
| 3099 | * @new_id. |
| 3100 | * |
| 3101 | * LOCKING: |
| 3102 | * None. |
| 3103 | * |
| 3104 | * RETURNS: |
| 3105 | * 1 if @dev matches @new_class and @new_id, 0 otherwise. |
| 3106 | */ |
| 3107 | static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class, |
| 3108 | const u16 *new_id) |
| 3109 | { |
| 3110 | const u16 *old_id = dev->id; |
| 3111 | unsigned char model[2][ATA_ID_PROD_LEN + 1]; |
| 3112 | unsigned char serial[2][ATA_ID_SERNO_LEN + 1]; |
| 3113 | u64 new_n_sectors; |
| 3114 | |
| 3115 | if (dev->class != new_class) { |
| 3116 | ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n", |
| 3117 | dev->class, new_class); |
| 3118 | return 0; |
| 3119 | } |
| 3120 | |
| 3121 | ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0])); |
| 3122 | ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1])); |
| 3123 | ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0])); |
| 3124 | ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1])); |
| 3125 | new_n_sectors = ata_id_n_sectors(new_id); |
| 3126 | |
| 3127 | if (strcmp(model[0], model[1])) { |
| 3128 | ata_dev_printk(dev, KERN_INFO, "model number mismatch " |
| 3129 | "'%s' != '%s'\n", model[0], model[1]); |
| 3130 | return 0; |
| 3131 | } |
| 3132 | |
| 3133 | if (strcmp(serial[0], serial[1])) { |
| 3134 | ata_dev_printk(dev, KERN_INFO, "serial number mismatch " |
| 3135 | "'%s' != '%s'\n", serial[0], serial[1]); |
| 3136 | return 0; |
| 3137 | } |
| 3138 | |
| 3139 | if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) { |
| 3140 | ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch " |
| 3141 | "%llu != %llu\n", |
| 3142 | (unsigned long long)dev->n_sectors, |
| 3143 | (unsigned long long)new_n_sectors); |
| 3144 | return 0; |
| 3145 | } |
| 3146 | |
| 3147 | return 1; |
| 3148 | } |
| 3149 | |
| 3150 | /** |
| 3151 | * ata_dev_revalidate - Revalidate ATA device |
| 3152 | * @dev: device to revalidate |
| 3153 | * @readid_flags: read ID flags |
| 3154 | * |
| 3155 | * Re-read IDENTIFY page and make sure @dev is still attached to |
| 3156 | * the port. |
| 3157 | * |
| 3158 | * LOCKING: |
| 3159 | * Kernel thread context (may sleep) |
| 3160 | * |
| 3161 | * RETURNS: |
| 3162 | * 0 on success, negative errno otherwise |
| 3163 | */ |
| 3164 | int ata_dev_revalidate(struct ata_device *dev, unsigned int readid_flags) |
| 3165 | { |
| 3166 | unsigned int class = dev->class; |
| 3167 | u16 *id = (void *)dev->ap->sector_buf; |
| 3168 | int rc; |
| 3169 | |
| 3170 | if (!ata_dev_enabled(dev)) { |
| 3171 | rc = -ENODEV; |
| 3172 | goto fail; |
| 3173 | } |
| 3174 | |
| 3175 | /* read ID data */ |
| 3176 | rc = ata_dev_read_id(dev, &class, readid_flags, id); |
| 3177 | if (rc) |
| 3178 | goto fail; |
| 3179 | |
| 3180 | /* is the device still there? */ |
| 3181 | if (!ata_dev_same_device(dev, class, id)) { |
| 3182 | rc = -ENODEV; |
| 3183 | goto fail; |
| 3184 | } |
| 3185 | |
| 3186 | memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS); |
| 3187 | |
| 3188 | /* configure device according to the new ID */ |
| 3189 | rc = ata_dev_configure(dev); |
| 3190 | if (rc == 0) |
| 3191 | return 0; |
| 3192 | |
| 3193 | fail: |
| 3194 | ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc); |
| 3195 | return rc; |
| 3196 | } |
| 3197 | |
| 3198 | struct ata_blacklist_entry { |
| 3199 | const char *model_num; |
| 3200 | const char *model_rev; |
| 3201 | unsigned long horkage; |
| 3202 | }; |
| 3203 | |
| 3204 | static const struct ata_blacklist_entry ata_device_blacklist [] = { |
| 3205 | /* Devices with DMA related problems under Linux */ |
| 3206 | { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA }, |
| 3207 | { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA }, |
| 3208 | { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA }, |
| 3209 | { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA }, |
| 3210 | { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA }, |
| 3211 | { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA }, |
| 3212 | { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA }, |
| 3213 | { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA }, |
| 3214 | { "CRD-8400B", NULL, ATA_HORKAGE_NODMA }, |
| 3215 | { "CRD-8480B", NULL, ATA_HORKAGE_NODMA }, |
| 3216 | { "CRD-8482B", NULL, ATA_HORKAGE_NODMA }, |
| 3217 | { "CRD-84", NULL, ATA_HORKAGE_NODMA }, |
| 3218 | { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA }, |
| 3219 | { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA }, |
| 3220 | { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA }, |
| 3221 | { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA }, |
| 3222 | { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA }, |
| 3223 | { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA }, |
| 3224 | { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA }, |
| 3225 | { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA }, |
| 3226 | { "CD-532E-A", NULL, ATA_HORKAGE_NODMA }, |
| 3227 | { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA }, |
| 3228 | { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA }, |
| 3229 | { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA }, |
| 3230 | { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA }, |
| 3231 | { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA }, |
| 3232 | { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA }, |
| 3233 | { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA }, |
| 3234 | { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA }, |
| 3235 | { "SAMSUNG CD-ROM SN-124","N001", ATA_HORKAGE_NODMA }, |
| 3236 | |
| 3237 | /* Devices we expect to fail diagnostics */ |
| 3238 | |
| 3239 | /* Devices where NCQ should be avoided */ |
| 3240 | /* NCQ is slow */ |
| 3241 | { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ }, |
| 3242 | |
| 3243 | /* Devices with NCQ limits */ |
| 3244 | |
| 3245 | /* End Marker */ |
| 3246 | { } |
| 3247 | }; |
| 3248 | |
| 3249 | unsigned long ata_device_blacklisted(const struct ata_device *dev) |
| 3250 | { |
| 3251 | unsigned char model_num[ATA_ID_PROD_LEN + 1]; |
| 3252 | unsigned char model_rev[ATA_ID_FW_REV_LEN + 1]; |
| 3253 | const struct ata_blacklist_entry *ad = ata_device_blacklist; |
| 3254 | |
| 3255 | ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num)); |
| 3256 | ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev)); |
| 3257 | |
| 3258 | while (ad->model_num) { |
| 3259 | if (!strcmp(ad->model_num, model_num)) { |
| 3260 | if (ad->model_rev == NULL) |
| 3261 | return ad->horkage; |
| 3262 | if (!strcmp(ad->model_rev, model_rev)) |
| 3263 | return ad->horkage; |
| 3264 | } |
| 3265 | ad++; |
| 3266 | } |
| 3267 | return 0; |
| 3268 | } |
| 3269 | |
| 3270 | static int ata_dma_blacklisted(const struct ata_device *dev) |
| 3271 | { |
| 3272 | /* We don't support polling DMA. |
| 3273 | * DMA blacklist those ATAPI devices with CDB-intr (and use PIO) |
| 3274 | * if the LLDD handles only interrupts in the HSM_ST_LAST state. |
| 3275 | */ |
| 3276 | if ((dev->ap->flags & ATA_FLAG_PIO_POLLING) && |
| 3277 | (dev->flags & ATA_DFLAG_CDB_INTR)) |
| 3278 | return 1; |
| 3279 | return (ata_device_blacklisted(dev) & ATA_HORKAGE_NODMA) ? 1 : 0; |
| 3280 | } |
| 3281 | |
| 3282 | /** |
| 3283 | * ata_dev_xfermask - Compute supported xfermask of the given device |
| 3284 | * @dev: Device to compute xfermask for |
| 3285 | * |
| 3286 | * Compute supported xfermask of @dev and store it in |
| 3287 | * dev->*_mask. This function is responsible for applying all |
| 3288 | * known limits including host controller limits, device |
| 3289 | * blacklist, etc... |
| 3290 | * |
| 3291 | * LOCKING: |
| 3292 | * None. |
| 3293 | */ |
| 3294 | static void ata_dev_xfermask(struct ata_device *dev) |
| 3295 | { |
| 3296 | struct ata_port *ap = dev->ap; |
| 3297 | struct ata_host *host = ap->host; |
| 3298 | unsigned long xfer_mask; |
| 3299 | |
| 3300 | /* controller modes available */ |
| 3301 | xfer_mask = ata_pack_xfermask(ap->pio_mask, |
| 3302 | ap->mwdma_mask, ap->udma_mask); |
| 3303 | |
| 3304 | /* Apply cable rule here. Don't apply it early because when |
| 3305 | * we handle hot plug the cable type can itself change. |
| 3306 | */ |
| 3307 | if (ap->cbl == ATA_CBL_PATA40) |
| 3308 | xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA); |
| 3309 | /* Apply drive side cable rule. Unknown or 80 pin cables reported |
| 3310 | * host side are checked drive side as well. Cases where we know a |
| 3311 | * 40wire cable is used safely for 80 are not checked here. |
| 3312 | */ |
| 3313 | if (ata_drive_40wire(dev->id) && (ap->cbl == ATA_CBL_PATA_UNK || ap->cbl == ATA_CBL_PATA80)) |
| 3314 | xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA); |
| 3315 | |
| 3316 | |
| 3317 | xfer_mask &= ata_pack_xfermask(dev->pio_mask, |
| 3318 | dev->mwdma_mask, dev->udma_mask); |
| 3319 | xfer_mask &= ata_id_xfermask(dev->id); |
| 3320 | |
| 3321 | /* |
| 3322 | * CFA Advanced TrueIDE timings are not allowed on a shared |
| 3323 | * cable |
| 3324 | */ |
| 3325 | if (ata_dev_pair(dev)) { |
| 3326 | /* No PIO5 or PIO6 */ |
| 3327 | xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5)); |
| 3328 | /* No MWDMA3 or MWDMA 4 */ |
| 3329 | xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3)); |
| 3330 | } |
| 3331 | |
| 3332 | if (ata_dma_blacklisted(dev)) { |
| 3333 | xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); |
| 3334 | ata_dev_printk(dev, KERN_WARNING, |
| 3335 | "device is on DMA blacklist, disabling DMA\n"); |
| 3336 | } |
| 3337 | |
| 3338 | if ((host->flags & ATA_HOST_SIMPLEX) && host->simplex_claimed) { |
| 3339 | xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); |
| 3340 | ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by " |
| 3341 | "other device, disabling DMA\n"); |
| 3342 | } |
| 3343 | |
| 3344 | if (ap->ops->mode_filter) |
| 3345 | xfer_mask = ap->ops->mode_filter(ap, dev, xfer_mask); |
| 3346 | |
| 3347 | ata_unpack_xfermask(xfer_mask, &dev->pio_mask, |
| 3348 | &dev->mwdma_mask, &dev->udma_mask); |
| 3349 | } |
| 3350 | |
| 3351 | /** |
| 3352 | * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command |
| 3353 | * @dev: Device to which command will be sent |
| 3354 | * |
| 3355 | * Issue SET FEATURES - XFER MODE command to device @dev |
| 3356 | * on port @ap. |
| 3357 | * |
| 3358 | * LOCKING: |
| 3359 | * PCI/etc. bus probe sem. |
| 3360 | * |
| 3361 | * RETURNS: |
| 3362 | * 0 on success, AC_ERR_* mask otherwise. |
| 3363 | */ |
| 3364 | |
| 3365 | static unsigned int ata_dev_set_xfermode(struct ata_device *dev) |
| 3366 | { |
| 3367 | struct ata_taskfile tf; |
| 3368 | unsigned int err_mask; |
| 3369 | |
| 3370 | /* set up set-features taskfile */ |
| 3371 | DPRINTK("set features - xfer mode\n"); |
| 3372 | |
| 3373 | ata_tf_init(dev, &tf); |
| 3374 | tf.command = ATA_CMD_SET_FEATURES; |
| 3375 | tf.feature = SETFEATURES_XFER; |
| 3376 | tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; |
| 3377 | tf.protocol = ATA_PROT_NODATA; |
| 3378 | tf.nsect = dev->xfer_mode; |
| 3379 | |
| 3380 | err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0); |
| 3381 | |
| 3382 | DPRINTK("EXIT, err_mask=%x\n", err_mask); |
| 3383 | return err_mask; |
| 3384 | } |
| 3385 | |
| 3386 | /** |
| 3387 | * ata_dev_init_params - Issue INIT DEV PARAMS command |
| 3388 | * @dev: Device to which command will be sent |
| 3389 | * @heads: Number of heads (taskfile parameter) |
| 3390 | * @sectors: Number of sectors (taskfile parameter) |
| 3391 | * |
| 3392 | * LOCKING: |
| 3393 | * Kernel thread context (may sleep) |
| 3394 | * |
| 3395 | * RETURNS: |
| 3396 | * 0 on success, AC_ERR_* mask otherwise. |
| 3397 | */ |
| 3398 | static unsigned int ata_dev_init_params(struct ata_device *dev, |
| 3399 | u16 heads, u16 sectors) |
| 3400 | { |
| 3401 | struct ata_taskfile tf; |
| 3402 | unsigned int err_mask; |
| 3403 | |
| 3404 | /* Number of sectors per track 1-255. Number of heads 1-16 */ |
| 3405 | if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16) |
| 3406 | return AC_ERR_INVALID; |
| 3407 | |
| 3408 | /* set up init dev params taskfile */ |
| 3409 | DPRINTK("init dev params \n"); |
| 3410 | |
| 3411 | ata_tf_init(dev, &tf); |
| 3412 | tf.command = ATA_CMD_INIT_DEV_PARAMS; |
| 3413 | tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; |
| 3414 | tf.protocol = ATA_PROT_NODATA; |
| 3415 | tf.nsect = sectors; |
| 3416 | tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */ |
| 3417 | |
| 3418 | err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0); |
| 3419 | |
| 3420 | DPRINTK("EXIT, err_mask=%x\n", err_mask); |
| 3421 | return err_mask; |
| 3422 | } |
| 3423 | |
| 3424 | /** |
| 3425 | * ata_sg_clean - Unmap DMA memory associated with command |
| 3426 | * @qc: Command containing DMA memory to be released |
| 3427 | * |
| 3428 | * Unmap all mapped DMA memory associated with this command. |
| 3429 | * |
| 3430 | * LOCKING: |
| 3431 | * spin_lock_irqsave(host lock) |
| 3432 | */ |
| 3433 | void ata_sg_clean(struct ata_queued_cmd *qc) |
| 3434 | { |
| 3435 | struct ata_port *ap = qc->ap; |
| 3436 | struct scatterlist *sg = qc->__sg; |
| 3437 | int dir = qc->dma_dir; |
| 3438 | void *pad_buf = NULL; |
| 3439 | |
| 3440 | WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP)); |
| 3441 | WARN_ON(sg == NULL); |
| 3442 | |
| 3443 | if (qc->flags & ATA_QCFLAG_SINGLE) |
| 3444 | WARN_ON(qc->n_elem > 1); |
| 3445 | |
| 3446 | VPRINTK("unmapping %u sg elements\n", qc->n_elem); |
| 3447 | |
| 3448 | /* if we padded the buffer out to 32-bit bound, and data |
| 3449 | * xfer direction is from-device, we must copy from the |
| 3450 | * pad buffer back into the supplied buffer |
| 3451 | */ |
| 3452 | if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE)) |
| 3453 | pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ); |
| 3454 | |
| 3455 | if (qc->flags & ATA_QCFLAG_SG) { |
| 3456 | if (qc->n_elem) |
| 3457 | dma_unmap_sg(ap->dev, sg, qc->n_elem, dir); |
| 3458 | /* restore last sg */ |
| 3459 | sg[qc->orig_n_elem - 1].length += qc->pad_len; |
| 3460 | if (pad_buf) { |
| 3461 | struct scatterlist *psg = &qc->pad_sgent; |
| 3462 | void *addr = kmap_atomic(psg->page, KM_IRQ0); |
| 3463 | memcpy(addr + psg->offset, pad_buf, qc->pad_len); |
| 3464 | kunmap_atomic(addr, KM_IRQ0); |
| 3465 | } |
| 3466 | } else { |
| 3467 | if (qc->n_elem) |
| 3468 | dma_unmap_single(ap->dev, |
| 3469 | sg_dma_address(&sg[0]), sg_dma_len(&sg[0]), |
| 3470 | dir); |
| 3471 | /* restore sg */ |
| 3472 | sg->length += qc->pad_len; |
| 3473 | if (pad_buf) |
| 3474 | memcpy(qc->buf_virt + sg->length - qc->pad_len, |
| 3475 | pad_buf, qc->pad_len); |
| 3476 | } |
| 3477 | |
| 3478 | qc->flags &= ~ATA_QCFLAG_DMAMAP; |
| 3479 | qc->__sg = NULL; |
| 3480 | } |
| 3481 | |
| 3482 | /** |
| 3483 | * ata_fill_sg - Fill PCI IDE PRD table |
| 3484 | * @qc: Metadata associated with taskfile to be transferred |
| 3485 | * |
| 3486 | * Fill PCI IDE PRD (scatter-gather) table with segments |
| 3487 | * associated with the current disk command. |
| 3488 | * |
| 3489 | * LOCKING: |
| 3490 | * spin_lock_irqsave(host lock) |
| 3491 | * |
| 3492 | */ |
| 3493 | static void ata_fill_sg(struct ata_queued_cmd *qc) |
| 3494 | { |
| 3495 | struct ata_port *ap = qc->ap; |
| 3496 | struct scatterlist *sg; |
| 3497 | unsigned int idx; |
| 3498 | |
| 3499 | WARN_ON(qc->__sg == NULL); |
| 3500 | WARN_ON(qc->n_elem == 0 && qc->pad_len == 0); |
| 3501 | |
| 3502 | idx = 0; |
| 3503 | ata_for_each_sg(sg, qc) { |
| 3504 | u32 addr, offset; |
| 3505 | u32 sg_len, len; |
| 3506 | |
| 3507 | /* determine if physical DMA addr spans 64K boundary. |
| 3508 | * Note h/w doesn't support 64-bit, so we unconditionally |
| 3509 | * truncate dma_addr_t to u32. |
| 3510 | */ |
| 3511 | addr = (u32) sg_dma_address(sg); |
| 3512 | sg_len = sg_dma_len(sg); |
| 3513 | |
| 3514 | while (sg_len) { |
| 3515 | offset = addr & 0xffff; |
| 3516 | len = sg_len; |
| 3517 | if ((offset + sg_len) > 0x10000) |
| 3518 | len = 0x10000 - offset; |
| 3519 | |
| 3520 | ap->prd[idx].addr = cpu_to_le32(addr); |
| 3521 | ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff); |
| 3522 | VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len); |
| 3523 | |
| 3524 | idx++; |
| 3525 | sg_len -= len; |
| 3526 | addr += len; |
| 3527 | } |
| 3528 | } |
| 3529 | |
| 3530 | if (idx) |
| 3531 | ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT); |
| 3532 | } |
| 3533 | /** |
| 3534 | * ata_check_atapi_dma - Check whether ATAPI DMA can be supported |
| 3535 | * @qc: Metadata associated with taskfile to check |
| 3536 | * |
| 3537 | * Allow low-level driver to filter ATA PACKET commands, returning |
| 3538 | * a status indicating whether or not it is OK to use DMA for the |
| 3539 | * supplied PACKET command. |
| 3540 | * |
| 3541 | * LOCKING: |
| 3542 | * spin_lock_irqsave(host lock) |
| 3543 | * |
| 3544 | * RETURNS: 0 when ATAPI DMA can be used |
| 3545 | * nonzero otherwise |
| 3546 | */ |
| 3547 | int ata_check_atapi_dma(struct ata_queued_cmd *qc) |
| 3548 | { |
| 3549 | struct ata_port *ap = qc->ap; |
| 3550 | int rc = 0; /* Assume ATAPI DMA is OK by default */ |
| 3551 | |
| 3552 | if (ap->ops->check_atapi_dma) |
| 3553 | rc = ap->ops->check_atapi_dma(qc); |
| 3554 | |
| 3555 | return rc; |
| 3556 | } |
| 3557 | /** |
| 3558 | * ata_qc_prep - Prepare taskfile for submission |
| 3559 | * @qc: Metadata associated with taskfile to be prepared |
| 3560 | * |
| 3561 | * Prepare ATA taskfile for submission. |
| 3562 | * |
| 3563 | * LOCKING: |
| 3564 | * spin_lock_irqsave(host lock) |
| 3565 | */ |
| 3566 | void ata_qc_prep(struct ata_queued_cmd *qc) |
| 3567 | { |
| 3568 | if (!(qc->flags & ATA_QCFLAG_DMAMAP)) |
| 3569 | return; |
| 3570 | |
| 3571 | ata_fill_sg(qc); |
| 3572 | } |
| 3573 | |
| 3574 | void ata_noop_qc_prep(struct ata_queued_cmd *qc) { } |
| 3575 | |
| 3576 | /** |
| 3577 | * ata_sg_init_one - Associate command with memory buffer |
| 3578 | * @qc: Command to be associated |
| 3579 | * @buf: Memory buffer |
| 3580 | * @buflen: Length of memory buffer, in bytes. |
| 3581 | * |
| 3582 | * Initialize the data-related elements of queued_cmd @qc |
| 3583 | * to point to a single memory buffer, @buf of byte length @buflen. |
| 3584 | * |
| 3585 | * LOCKING: |
| 3586 | * spin_lock_irqsave(host lock) |
| 3587 | */ |
| 3588 | |
| 3589 | void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen) |
| 3590 | { |
| 3591 | qc->flags |= ATA_QCFLAG_SINGLE; |
| 3592 | |
| 3593 | qc->__sg = &qc->sgent; |
| 3594 | qc->n_elem = 1; |
| 3595 | qc->orig_n_elem = 1; |
| 3596 | qc->buf_virt = buf; |
| 3597 | qc->nbytes = buflen; |
| 3598 | |
| 3599 | sg_init_one(&qc->sgent, buf, buflen); |
| 3600 | } |
| 3601 | |
| 3602 | /** |
| 3603 | * ata_sg_init - Associate command with scatter-gather table. |
| 3604 | * @qc: Command to be associated |
| 3605 | * @sg: Scatter-gather table. |
| 3606 | * @n_elem: Number of elements in s/g table. |
| 3607 | * |
| 3608 | * Initialize the data-related elements of queued_cmd @qc |
| 3609 | * to point to a scatter-gather table @sg, containing @n_elem |
| 3610 | * elements. |
| 3611 | * |
| 3612 | * LOCKING: |
| 3613 | * spin_lock_irqsave(host lock) |
| 3614 | */ |
| 3615 | |
| 3616 | void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg, |
| 3617 | unsigned int n_elem) |
| 3618 | { |
| 3619 | qc->flags |= ATA_QCFLAG_SG; |
| 3620 | qc->__sg = sg; |
| 3621 | qc->n_elem = n_elem; |
| 3622 | qc->orig_n_elem = n_elem; |
| 3623 | } |
| 3624 | |
| 3625 | /** |
| 3626 | * ata_sg_setup_one - DMA-map the memory buffer associated with a command. |
| 3627 | * @qc: Command with memory buffer to be mapped. |
| 3628 | * |
| 3629 | * DMA-map the memory buffer associated with queued_cmd @qc. |
| 3630 | * |
| 3631 | * LOCKING: |
| 3632 | * spin_lock_irqsave(host lock) |
| 3633 | * |
| 3634 | * RETURNS: |
| 3635 | * Zero on success, negative on error. |
| 3636 | */ |
| 3637 | |
| 3638 | static int ata_sg_setup_one(struct ata_queued_cmd *qc) |
| 3639 | { |
| 3640 | struct ata_port *ap = qc->ap; |
| 3641 | int dir = qc->dma_dir; |
| 3642 | struct scatterlist *sg = qc->__sg; |
| 3643 | dma_addr_t dma_address; |
| 3644 | int trim_sg = 0; |
| 3645 | |
| 3646 | /* we must lengthen transfers to end on a 32-bit boundary */ |
| 3647 | qc->pad_len = sg->length & 3; |
| 3648 | if (qc->pad_len) { |
| 3649 | void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ); |
| 3650 | struct scatterlist *psg = &qc->pad_sgent; |
| 3651 | |
| 3652 | WARN_ON(qc->dev->class != ATA_DEV_ATAPI); |
| 3653 | |
| 3654 | memset(pad_buf, 0, ATA_DMA_PAD_SZ); |
| 3655 | |
| 3656 | if (qc->tf.flags & ATA_TFLAG_WRITE) |
| 3657 | memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len, |
| 3658 | qc->pad_len); |
| 3659 | |
| 3660 | sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ); |
| 3661 | sg_dma_len(psg) = ATA_DMA_PAD_SZ; |
| 3662 | /* trim sg */ |
| 3663 | sg->length -= qc->pad_len; |
| 3664 | if (sg->length == 0) |
| 3665 | trim_sg = 1; |
| 3666 | |
| 3667 | DPRINTK("padding done, sg->length=%u pad_len=%u\n", |
| 3668 | sg->length, qc->pad_len); |
| 3669 | } |
| 3670 | |
| 3671 | if (trim_sg) { |
| 3672 | qc->n_elem--; |
| 3673 | goto skip_map; |
| 3674 | } |
| 3675 | |
| 3676 | dma_address = dma_map_single(ap->dev, qc->buf_virt, |
| 3677 | sg->length, dir); |
| 3678 | if (dma_mapping_error(dma_address)) { |
| 3679 | /* restore sg */ |
| 3680 | sg->length += qc->pad_len; |
| 3681 | return -1; |
| 3682 | } |
| 3683 | |
| 3684 | sg_dma_address(sg) = dma_address; |
| 3685 | sg_dma_len(sg) = sg->length; |
| 3686 | |
| 3687 | skip_map: |
| 3688 | DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg), |
| 3689 | qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); |
| 3690 | |
| 3691 | return 0; |
| 3692 | } |
| 3693 | |
| 3694 | /** |
| 3695 | * ata_sg_setup - DMA-map the scatter-gather table associated with a command. |
| 3696 | * @qc: Command with scatter-gather table to be mapped. |
| 3697 | * |
| 3698 | * DMA-map the scatter-gather table associated with queued_cmd @qc. |
| 3699 | * |
| 3700 | * LOCKING: |
| 3701 | * spin_lock_irqsave(host lock) |
| 3702 | * |
| 3703 | * RETURNS: |
| 3704 | * Zero on success, negative on error. |
| 3705 | * |
| 3706 | */ |
| 3707 | |
| 3708 | static int ata_sg_setup(struct ata_queued_cmd *qc) |
| 3709 | { |
| 3710 | struct ata_port *ap = qc->ap; |
| 3711 | struct scatterlist *sg = qc->__sg; |
| 3712 | struct scatterlist *lsg = &sg[qc->n_elem - 1]; |
| 3713 | int n_elem, pre_n_elem, dir, trim_sg = 0; |
| 3714 | |
| 3715 | VPRINTK("ENTER, ata%u\n", ap->id); |
| 3716 | WARN_ON(!(qc->flags & ATA_QCFLAG_SG)); |
| 3717 | |
| 3718 | /* we must lengthen transfers to end on a 32-bit boundary */ |
| 3719 | qc->pad_len = lsg->length & 3; |
| 3720 | if (qc->pad_len) { |
| 3721 | void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ); |
| 3722 | struct scatterlist *psg = &qc->pad_sgent; |
| 3723 | unsigned int offset; |
| 3724 | |
| 3725 | WARN_ON(qc->dev->class != ATA_DEV_ATAPI); |
| 3726 | |
| 3727 | memset(pad_buf, 0, ATA_DMA_PAD_SZ); |
| 3728 | |
| 3729 | /* |
| 3730 | * psg->page/offset are used to copy to-be-written |
| 3731 | * data in this function or read data in ata_sg_clean. |
| 3732 | */ |
| 3733 | offset = lsg->offset + lsg->length - qc->pad_len; |
| 3734 | psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT); |
| 3735 | psg->offset = offset_in_page(offset); |
| 3736 | |
| 3737 | if (qc->tf.flags & ATA_TFLAG_WRITE) { |
| 3738 | void *addr = kmap_atomic(psg->page, KM_IRQ0); |
| 3739 | memcpy(pad_buf, addr + psg->offset, qc->pad_len); |
| 3740 | kunmap_atomic(addr, KM_IRQ0); |
| 3741 | } |
| 3742 | |
| 3743 | sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ); |
| 3744 | sg_dma_len(psg) = ATA_DMA_PAD_SZ; |
| 3745 | /* trim last sg */ |
| 3746 | lsg->length -= qc->pad_len; |
| 3747 | if (lsg->length == 0) |
| 3748 | trim_sg = 1; |
| 3749 | |
| 3750 | DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n", |
| 3751 | qc->n_elem - 1, lsg->length, qc->pad_len); |
| 3752 | } |
| 3753 | |
| 3754 | pre_n_elem = qc->n_elem; |
| 3755 | if (trim_sg && pre_n_elem) |
| 3756 | pre_n_elem--; |
| 3757 | |
| 3758 | if (!pre_n_elem) { |
| 3759 | n_elem = 0; |
| 3760 | goto skip_map; |
| 3761 | } |
| 3762 | |
| 3763 | dir = qc->dma_dir; |
| 3764 | n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir); |
| 3765 | if (n_elem < 1) { |
| 3766 | /* restore last sg */ |
| 3767 | lsg->length += qc->pad_len; |
| 3768 | return -1; |
| 3769 | } |
| 3770 | |
| 3771 | DPRINTK("%d sg elements mapped\n", n_elem); |
| 3772 | |
| 3773 | skip_map: |
| 3774 | qc->n_elem = n_elem; |
| 3775 | |
| 3776 | return 0; |
| 3777 | } |
| 3778 | |
| 3779 | /** |
| 3780 | * swap_buf_le16 - swap halves of 16-bit words in place |
| 3781 | * @buf: Buffer to swap |
| 3782 | * @buf_words: Number of 16-bit words in buffer. |
| 3783 | * |
| 3784 | * Swap halves of 16-bit words if needed to convert from |
| 3785 | * little-endian byte order to native cpu byte order, or |
| 3786 | * vice-versa. |
| 3787 | * |
| 3788 | * LOCKING: |
| 3789 | * Inherited from caller. |
| 3790 | */ |
| 3791 | void swap_buf_le16(u16 *buf, unsigned int buf_words) |
| 3792 | { |
| 3793 | #ifdef __BIG_ENDIAN |
| 3794 | unsigned int i; |
| 3795 | |
| 3796 | for (i = 0; i < buf_words; i++) |
| 3797 | buf[i] = le16_to_cpu(buf[i]); |
| 3798 | #endif /* __BIG_ENDIAN */ |
| 3799 | } |
| 3800 | |
| 3801 | /** |
| 3802 | * ata_data_xfer - Transfer data by PIO |
| 3803 | * @adev: device to target |
| 3804 | * @buf: data buffer |
| 3805 | * @buflen: buffer length |
| 3806 | * @write_data: read/write |
| 3807 | * |
| 3808 | * Transfer data from/to the device data register by PIO. |
| 3809 | * |
| 3810 | * LOCKING: |
| 3811 | * Inherited from caller. |
| 3812 | */ |
| 3813 | void ata_data_xfer(struct ata_device *adev, unsigned char *buf, |
| 3814 | unsigned int buflen, int write_data) |
| 3815 | { |
| 3816 | struct ata_port *ap = adev->ap; |
| 3817 | unsigned int words = buflen >> 1; |
| 3818 | |
| 3819 | /* Transfer multiple of 2 bytes */ |
| 3820 | if (write_data) |
| 3821 | iowrite16_rep(ap->ioaddr.data_addr, buf, words); |
| 3822 | else |
| 3823 | ioread16_rep(ap->ioaddr.data_addr, buf, words); |
| 3824 | |
| 3825 | /* Transfer trailing 1 byte, if any. */ |
| 3826 | if (unlikely(buflen & 0x01)) { |
| 3827 | u16 align_buf[1] = { 0 }; |
| 3828 | unsigned char *trailing_buf = buf + buflen - 1; |
| 3829 | |
| 3830 | if (write_data) { |
| 3831 | memcpy(align_buf, trailing_buf, 1); |
| 3832 | iowrite16(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr); |
| 3833 | } else { |
| 3834 | align_buf[0] = cpu_to_le16(ioread16(ap->ioaddr.data_addr)); |
| 3835 | memcpy(trailing_buf, align_buf, 1); |
| 3836 | } |
| 3837 | } |
| 3838 | } |
| 3839 | |
| 3840 | /** |
| 3841 | * ata_data_xfer_noirq - Transfer data by PIO |
| 3842 | * @adev: device to target |
| 3843 | * @buf: data buffer |
| 3844 | * @buflen: buffer length |
| 3845 | * @write_data: read/write |
| 3846 | * |
| 3847 | * Transfer data from/to the device data register by PIO. Do the |
| 3848 | * transfer with interrupts disabled. |
| 3849 | * |
| 3850 | * LOCKING: |
| 3851 | * Inherited from caller. |
| 3852 | */ |
| 3853 | void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf, |
| 3854 | unsigned int buflen, int write_data) |
| 3855 | { |
| 3856 | unsigned long flags; |
| 3857 | local_irq_save(flags); |
| 3858 | ata_data_xfer(adev, buf, buflen, write_data); |
| 3859 | local_irq_restore(flags); |
| 3860 | } |
| 3861 | |
| 3862 | |
| 3863 | /** |
| 3864 | * ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data. |
| 3865 | * @qc: Command on going |
| 3866 | * |
| 3867 | * Transfer ATA_SECT_SIZE of data from/to the ATA device. |
| 3868 | * |
| 3869 | * LOCKING: |
| 3870 | * Inherited from caller. |
| 3871 | */ |
| 3872 | |
| 3873 | static void ata_pio_sector(struct ata_queued_cmd *qc) |
| 3874 | { |
| 3875 | int do_write = (qc->tf.flags & ATA_TFLAG_WRITE); |
| 3876 | struct scatterlist *sg = qc->__sg; |
| 3877 | struct ata_port *ap = qc->ap; |
| 3878 | struct page *page; |
| 3879 | unsigned int offset; |
| 3880 | unsigned char *buf; |
| 3881 | |
| 3882 | if (qc->curbytes == qc->nbytes - ATA_SECT_SIZE) |
| 3883 | ap->hsm_task_state = HSM_ST_LAST; |
| 3884 | |
| 3885 | page = sg[qc->cursg].page; |
| 3886 | offset = sg[qc->cursg].offset + qc->cursg_ofs; |
| 3887 | |
| 3888 | /* get the current page and offset */ |
| 3889 | page = nth_page(page, (offset >> PAGE_SHIFT)); |
| 3890 | offset %= PAGE_SIZE; |
| 3891 | |
| 3892 | DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); |
| 3893 | |
| 3894 | if (PageHighMem(page)) { |
| 3895 | unsigned long flags; |
| 3896 | |
| 3897 | /* FIXME: use a bounce buffer */ |
| 3898 | local_irq_save(flags); |
| 3899 | buf = kmap_atomic(page, KM_IRQ0); |
| 3900 | |
| 3901 | /* do the actual data transfer */ |
| 3902 | ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write); |
| 3903 | |
| 3904 | kunmap_atomic(buf, KM_IRQ0); |
| 3905 | local_irq_restore(flags); |
| 3906 | } else { |
| 3907 | buf = page_address(page); |
| 3908 | ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write); |
| 3909 | } |
| 3910 | |
| 3911 | qc->curbytes += ATA_SECT_SIZE; |
| 3912 | qc->cursg_ofs += ATA_SECT_SIZE; |
| 3913 | |
| 3914 | if (qc->cursg_ofs == (&sg[qc->cursg])->length) { |
| 3915 | qc->cursg++; |
| 3916 | qc->cursg_ofs = 0; |
| 3917 | } |
| 3918 | } |
| 3919 | |
| 3920 | /** |
| 3921 | * ata_pio_sectors - Transfer one or many 512-byte sectors. |
| 3922 | * @qc: Command on going |
| 3923 | * |
| 3924 | * Transfer one or many ATA_SECT_SIZE of data from/to the |
| 3925 | * ATA device for the DRQ request. |
| 3926 | * |
| 3927 | * LOCKING: |
| 3928 | * Inherited from caller. |
| 3929 | */ |
| 3930 | |
| 3931 | static void ata_pio_sectors(struct ata_queued_cmd *qc) |
| 3932 | { |
| 3933 | if (is_multi_taskfile(&qc->tf)) { |
| 3934 | /* READ/WRITE MULTIPLE */ |
| 3935 | unsigned int nsect; |
| 3936 | |
| 3937 | WARN_ON(qc->dev->multi_count == 0); |
| 3938 | |
| 3939 | nsect = min((qc->nbytes - qc->curbytes) / ATA_SECT_SIZE, |
| 3940 | qc->dev->multi_count); |
| 3941 | while (nsect--) |
| 3942 | ata_pio_sector(qc); |
| 3943 | } else |
| 3944 | ata_pio_sector(qc); |
| 3945 | } |
| 3946 | |
| 3947 | /** |
| 3948 | * atapi_send_cdb - Write CDB bytes to hardware |
| 3949 | * @ap: Port to which ATAPI device is attached. |
| 3950 | * @qc: Taskfile currently active |
| 3951 | * |
| 3952 | * When device has indicated its readiness to accept |
| 3953 | * a CDB, this function is called. Send the CDB. |
| 3954 | * |
| 3955 | * LOCKING: |
| 3956 | * caller. |
| 3957 | */ |
| 3958 | |
| 3959 | static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc) |
| 3960 | { |
| 3961 | /* send SCSI cdb */ |
| 3962 | DPRINTK("send cdb\n"); |
| 3963 | WARN_ON(qc->dev->cdb_len < 12); |
| 3964 | |
| 3965 | ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1); |
| 3966 | ata_altstatus(ap); /* flush */ |
| 3967 | |
| 3968 | switch (qc->tf.protocol) { |
| 3969 | case ATA_PROT_ATAPI: |
| 3970 | ap->hsm_task_state = HSM_ST; |
| 3971 | break; |
| 3972 | case ATA_PROT_ATAPI_NODATA: |
| 3973 | ap->hsm_task_state = HSM_ST_LAST; |
| 3974 | break; |
| 3975 | case ATA_PROT_ATAPI_DMA: |
| 3976 | ap->hsm_task_state = HSM_ST_LAST; |
| 3977 | /* initiate bmdma */ |
| 3978 | ap->ops->bmdma_start(qc); |
| 3979 | break; |
| 3980 | } |
| 3981 | } |
| 3982 | |
| 3983 | /** |
| 3984 | * __atapi_pio_bytes - Transfer data from/to the ATAPI device. |
| 3985 | * @qc: Command on going |
| 3986 | * @bytes: number of bytes |
| 3987 | * |
| 3988 | * Transfer Transfer data from/to the ATAPI device. |
| 3989 | * |
| 3990 | * LOCKING: |
| 3991 | * Inherited from caller. |
| 3992 | * |
| 3993 | */ |
| 3994 | |
| 3995 | static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes) |
| 3996 | { |
| 3997 | int do_write = (qc->tf.flags & ATA_TFLAG_WRITE); |
| 3998 | struct scatterlist *sg = qc->__sg; |
| 3999 | struct ata_port *ap = qc->ap; |
| 4000 | struct page *page; |
| 4001 | unsigned char *buf; |
| 4002 | unsigned int offset, count; |
| 4003 | |
| 4004 | if (qc->curbytes + bytes >= qc->nbytes) |
| 4005 | ap->hsm_task_state = HSM_ST_LAST; |
| 4006 | |
| 4007 | next_sg: |
| 4008 | if (unlikely(qc->cursg >= qc->n_elem)) { |
| 4009 | /* |
| 4010 | * The end of qc->sg is reached and the device expects |
| 4011 | * more data to transfer. In order not to overrun qc->sg |
| 4012 | * and fulfill length specified in the byte count register, |
| 4013 | * - for read case, discard trailing data from the device |
| 4014 | * - for write case, padding zero data to the device |
| 4015 | */ |
| 4016 | u16 pad_buf[1] = { 0 }; |
| 4017 | unsigned int words = bytes >> 1; |
| 4018 | unsigned int i; |
| 4019 | |
| 4020 | if (words) /* warning if bytes > 1 */ |
| 4021 | ata_dev_printk(qc->dev, KERN_WARNING, |
| 4022 | "%u bytes trailing data\n", bytes); |
| 4023 | |
| 4024 | for (i = 0; i < words; i++) |
| 4025 | ap->ops->data_xfer(qc->dev, (unsigned char*)pad_buf, 2, do_write); |
| 4026 | |
| 4027 | ap->hsm_task_state = HSM_ST_LAST; |
| 4028 | return; |
| 4029 | } |
| 4030 | |
| 4031 | sg = &qc->__sg[qc->cursg]; |
| 4032 | |
| 4033 | page = sg->page; |
| 4034 | offset = sg->offset + qc->cursg_ofs; |
| 4035 | |
| 4036 | /* get the current page and offset */ |
| 4037 | page = nth_page(page, (offset >> PAGE_SHIFT)); |
| 4038 | offset %= PAGE_SIZE; |
| 4039 | |
| 4040 | /* don't overrun current sg */ |
| 4041 | count = min(sg->length - qc->cursg_ofs, bytes); |
| 4042 | |
| 4043 | /* don't cross page boundaries */ |
| 4044 | count = min(count, (unsigned int)PAGE_SIZE - offset); |
| 4045 | |
| 4046 | DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); |
| 4047 | |
| 4048 | if (PageHighMem(page)) { |
| 4049 | unsigned long flags; |
| 4050 | |
| 4051 | /* FIXME: use bounce buffer */ |
| 4052 | local_irq_save(flags); |
| 4053 | buf = kmap_atomic(page, KM_IRQ0); |
| 4054 | |
| 4055 | /* do the actual data transfer */ |
| 4056 | ap->ops->data_xfer(qc->dev, buf + offset, count, do_write); |
| 4057 | |
| 4058 | kunmap_atomic(buf, KM_IRQ0); |
| 4059 | local_irq_restore(flags); |
| 4060 | } else { |
| 4061 | buf = page_address(page); |
| 4062 | ap->ops->data_xfer(qc->dev, buf + offset, count, do_write); |
| 4063 | } |
| 4064 | |
| 4065 | bytes -= count; |
| 4066 | qc->curbytes += count; |
| 4067 | qc->cursg_ofs += count; |
| 4068 | |
| 4069 | if (qc->cursg_ofs == sg->length) { |
| 4070 | qc->cursg++; |
| 4071 | qc->cursg_ofs = 0; |
| 4072 | } |
| 4073 | |
| 4074 | if (bytes) |
| 4075 | goto next_sg; |
| 4076 | } |
| 4077 | |
| 4078 | /** |
| 4079 | * atapi_pio_bytes - Transfer data from/to the ATAPI device. |
| 4080 | * @qc: Command on going |
| 4081 | * |
| 4082 | * Transfer Transfer data from/to the ATAPI device. |
| 4083 | * |
| 4084 | * LOCKING: |
| 4085 | * Inherited from caller. |
| 4086 | */ |
| 4087 | |
| 4088 | static void atapi_pio_bytes(struct ata_queued_cmd *qc) |
| 4089 | { |
| 4090 | struct ata_port *ap = qc->ap; |
| 4091 | struct ata_device *dev = qc->dev; |
| 4092 | unsigned int ireason, bc_lo, bc_hi, bytes; |
| 4093 | int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0; |
| 4094 | |
| 4095 | /* Abuse qc->result_tf for temp storage of intermediate TF |
| 4096 | * here to save some kernel stack usage. |
| 4097 | * For normal completion, qc->result_tf is not relevant. For |
| 4098 | * error, qc->result_tf is later overwritten by ata_qc_complete(). |
| 4099 | * So, the correctness of qc->result_tf is not affected. |
| 4100 | */ |
| 4101 | ap->ops->tf_read(ap, &qc->result_tf); |
| 4102 | ireason = qc->result_tf.nsect; |
| 4103 | bc_lo = qc->result_tf.lbam; |
| 4104 | bc_hi = qc->result_tf.lbah; |
| 4105 | bytes = (bc_hi << 8) | bc_lo; |
| 4106 | |
| 4107 | /* shall be cleared to zero, indicating xfer of data */ |
| 4108 | if (ireason & (1 << 0)) |
| 4109 | goto err_out; |
| 4110 | |
| 4111 | /* make sure transfer direction matches expected */ |
| 4112 | i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0; |
| 4113 | if (do_write != i_write) |
| 4114 | goto err_out; |
| 4115 | |
| 4116 | VPRINTK("ata%u: xfering %d bytes\n", ap->id, bytes); |
| 4117 | |
| 4118 | __atapi_pio_bytes(qc, bytes); |
| 4119 | |
| 4120 | return; |
| 4121 | |
| 4122 | err_out: |
| 4123 | ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n"); |
| 4124 | qc->err_mask |= AC_ERR_HSM; |
| 4125 | ap->hsm_task_state = HSM_ST_ERR; |
| 4126 | } |
| 4127 | |
| 4128 | /** |
| 4129 | * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue. |
| 4130 | * @ap: the target ata_port |
| 4131 | * @qc: qc on going |
| 4132 | * |
| 4133 | * RETURNS: |
| 4134 | * 1 if ok in workqueue, 0 otherwise. |
| 4135 | */ |
| 4136 | |
| 4137 | static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc) |
| 4138 | { |
| 4139 | if (qc->tf.flags & ATA_TFLAG_POLLING) |
| 4140 | return 1; |
| 4141 | |
| 4142 | if (ap->hsm_task_state == HSM_ST_FIRST) { |
| 4143 | if (qc->tf.protocol == ATA_PROT_PIO && |
| 4144 | (qc->tf.flags & ATA_TFLAG_WRITE)) |
| 4145 | return 1; |
| 4146 | |
| 4147 | if (is_atapi_taskfile(&qc->tf) && |
| 4148 | !(qc->dev->flags & ATA_DFLAG_CDB_INTR)) |
| 4149 | return 1; |
| 4150 | } |
| 4151 | |
| 4152 | return 0; |
| 4153 | } |
| 4154 | |
| 4155 | /** |
| 4156 | * ata_hsm_qc_complete - finish a qc running on standard HSM |
| 4157 | * @qc: Command to complete |
| 4158 | * @in_wq: 1 if called from workqueue, 0 otherwise |
| 4159 | * |
| 4160 | * Finish @qc which is running on standard HSM. |
| 4161 | * |
| 4162 | * LOCKING: |
| 4163 | * If @in_wq is zero, spin_lock_irqsave(host lock). |
| 4164 | * Otherwise, none on entry and grabs host lock. |
| 4165 | */ |
| 4166 | static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq) |
| 4167 | { |
| 4168 | struct ata_port *ap = qc->ap; |
| 4169 | unsigned long flags; |
| 4170 | |
| 4171 | if (ap->ops->error_handler) { |
| 4172 | if (in_wq) { |
| 4173 | spin_lock_irqsave(ap->lock, flags); |
| 4174 | |
| 4175 | /* EH might have kicked in while host lock is |
| 4176 | * released. |
| 4177 | */ |
| 4178 | qc = ata_qc_from_tag(ap, qc->tag); |
| 4179 | if (qc) { |
| 4180 | if (likely(!(qc->err_mask & AC_ERR_HSM))) { |
| 4181 | ap->ops->irq_on(ap); |
| 4182 | ata_qc_complete(qc); |
| 4183 | } else |
| 4184 | ata_port_freeze(ap); |
| 4185 | } |
| 4186 | |
| 4187 | spin_unlock_irqrestore(ap->lock, flags); |
| 4188 | } else { |
| 4189 | if (likely(!(qc->err_mask & AC_ERR_HSM))) |
| 4190 | ata_qc_complete(qc); |
| 4191 | else |
| 4192 | ata_port_freeze(ap); |
| 4193 | } |
| 4194 | } else { |
| 4195 | if (in_wq) { |
| 4196 | spin_lock_irqsave(ap->lock, flags); |
| 4197 | ap->ops->irq_on(ap); |
| 4198 | ata_qc_complete(qc); |
| 4199 | spin_unlock_irqrestore(ap->lock, flags); |
| 4200 | } else |
| 4201 | ata_qc_complete(qc); |
| 4202 | } |
| 4203 | |
| 4204 | ata_altstatus(ap); /* flush */ |
| 4205 | } |
| 4206 | |
| 4207 | /** |
| 4208 | * ata_hsm_move - move the HSM to the next state. |
| 4209 | * @ap: the target ata_port |
| 4210 | * @qc: qc on going |
| 4211 | * @status: current device status |
| 4212 | * @in_wq: 1 if called from workqueue, 0 otherwise |
| 4213 | * |
| 4214 | * RETURNS: |
| 4215 | * 1 when poll next status needed, 0 otherwise. |
| 4216 | */ |
| 4217 | int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, |
| 4218 | u8 status, int in_wq) |
| 4219 | { |
| 4220 | unsigned long flags = 0; |
| 4221 | int poll_next; |
| 4222 | |
| 4223 | WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0); |
| 4224 | |
| 4225 | /* Make sure ata_qc_issue_prot() does not throw things |
| 4226 | * like DMA polling into the workqueue. Notice that |
| 4227 | * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING). |
| 4228 | */ |
| 4229 | WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc)); |
| 4230 | |
| 4231 | fsm_start: |
| 4232 | DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n", |
| 4233 | ap->id, qc->tf.protocol, ap->hsm_task_state, status); |
| 4234 | |
| 4235 | switch (ap->hsm_task_state) { |
| 4236 | case HSM_ST_FIRST: |
| 4237 | /* Send first data block or PACKET CDB */ |
| 4238 | |
| 4239 | /* If polling, we will stay in the work queue after |
| 4240 | * sending the data. Otherwise, interrupt handler |
| 4241 | * takes over after sending the data. |
| 4242 | */ |
| 4243 | poll_next = (qc->tf.flags & ATA_TFLAG_POLLING); |
| 4244 | |
| 4245 | /* check device status */ |
| 4246 | if (unlikely((status & ATA_DRQ) == 0)) { |
| 4247 | /* handle BSY=0, DRQ=0 as error */ |
| 4248 | if (likely(status & (ATA_ERR | ATA_DF))) |
| 4249 | /* device stops HSM for abort/error */ |
| 4250 | qc->err_mask |= AC_ERR_DEV; |
| 4251 | else |
| 4252 | /* HSM violation. Let EH handle this */ |
| 4253 | qc->err_mask |= AC_ERR_HSM; |
| 4254 | |
| 4255 | ap->hsm_task_state = HSM_ST_ERR; |
| 4256 | goto fsm_start; |
| 4257 | } |
| 4258 | |
| 4259 | /* Device should not ask for data transfer (DRQ=1) |
| 4260 | * when it finds something wrong. |
| 4261 | * We ignore DRQ here and stop the HSM by |
| 4262 | * changing hsm_task_state to HSM_ST_ERR and |
| 4263 | * let the EH abort the command or reset the device. |
| 4264 | */ |
| 4265 | if (unlikely(status & (ATA_ERR | ATA_DF))) { |
| 4266 | printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n", |
| 4267 | ap->id, status); |
| 4268 | qc->err_mask |= AC_ERR_HSM; |
| 4269 | ap->hsm_task_state = HSM_ST_ERR; |
| 4270 | goto fsm_start; |
| 4271 | } |
| 4272 | |
| 4273 | /* Send the CDB (atapi) or the first data block (ata pio out). |
| 4274 | * During the state transition, interrupt handler shouldn't |
| 4275 | * be invoked before the data transfer is complete and |
| 4276 | * hsm_task_state is changed. Hence, the following locking. |
| 4277 | */ |
| 4278 | if (in_wq) |
| 4279 | spin_lock_irqsave(ap->lock, flags); |
| 4280 | |
| 4281 | if (qc->tf.protocol == ATA_PROT_PIO) { |
| 4282 | /* PIO data out protocol. |
| 4283 | * send first data block. |
| 4284 | */ |
| 4285 | |
| 4286 | /* ata_pio_sectors() might change the state |
| 4287 | * to HSM_ST_LAST. so, the state is changed here |
| 4288 | * before ata_pio_sectors(). |
| 4289 | */ |
| 4290 | ap->hsm_task_state = HSM_ST; |
| 4291 | ata_pio_sectors(qc); |
| 4292 | ata_altstatus(ap); /* flush */ |
| 4293 | } else |
| 4294 | /* send CDB */ |
| 4295 | atapi_send_cdb(ap, qc); |
| 4296 | |
| 4297 | if (in_wq) |
| 4298 | spin_unlock_irqrestore(ap->lock, flags); |
| 4299 | |
| 4300 | /* if polling, ata_pio_task() handles the rest. |
| 4301 | * otherwise, interrupt handler takes over from here. |
| 4302 | */ |
| 4303 | break; |
| 4304 | |
| 4305 | case HSM_ST: |
| 4306 | /* complete command or read/write the data register */ |
| 4307 | if (qc->tf.protocol == ATA_PROT_ATAPI) { |
| 4308 | /* ATAPI PIO protocol */ |
| 4309 | if ((status & ATA_DRQ) == 0) { |
| 4310 | /* No more data to transfer or device error. |
| 4311 | * Device error will be tagged in HSM_ST_LAST. |
| 4312 | */ |
| 4313 | ap->hsm_task_state = HSM_ST_LAST; |
| 4314 | goto fsm_start; |
| 4315 | } |
| 4316 | |
| 4317 | /* Device should not ask for data transfer (DRQ=1) |
| 4318 | * when it finds something wrong. |
| 4319 | * We ignore DRQ here and stop the HSM by |
| 4320 | * changing hsm_task_state to HSM_ST_ERR and |
| 4321 | * let the EH abort the command or reset the device. |
| 4322 | */ |
| 4323 | if (unlikely(status & (ATA_ERR | ATA_DF))) { |
| 4324 | printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n", |
| 4325 | ap->id, status); |
| 4326 | qc->err_mask |= AC_ERR_HSM; |
| 4327 | ap->hsm_task_state = HSM_ST_ERR; |
| 4328 | goto fsm_start; |
| 4329 | } |
| 4330 | |
| 4331 | atapi_pio_bytes(qc); |
| 4332 | |
| 4333 | if (unlikely(ap->hsm_task_state == HSM_ST_ERR)) |
| 4334 | /* bad ireason reported by device */ |
| 4335 | goto fsm_start; |
| 4336 | |
| 4337 | } else { |
| 4338 | /* ATA PIO protocol */ |
| 4339 | if (unlikely((status & ATA_DRQ) == 0)) { |
| 4340 | /* handle BSY=0, DRQ=0 as error */ |
| 4341 | if (likely(status & (ATA_ERR | ATA_DF))) |
| 4342 | /* device stops HSM for abort/error */ |
| 4343 | qc->err_mask |= AC_ERR_DEV; |
| 4344 | else |
| 4345 | /* HSM violation. Let EH handle this. |
| 4346 | * Phantom devices also trigger this |
| 4347 | * condition. Mark hint. |
| 4348 | */ |
| 4349 | qc->err_mask |= AC_ERR_HSM | |
| 4350 | AC_ERR_NODEV_HINT; |
| 4351 | |
| 4352 | ap->hsm_task_state = HSM_ST_ERR; |
| 4353 | goto fsm_start; |
| 4354 | } |
| 4355 | |
| 4356 | /* For PIO reads, some devices may ask for |
| 4357 | * data transfer (DRQ=1) alone with ERR=1. |
| 4358 | * We respect DRQ here and transfer one |
| 4359 | * block of junk data before changing the |
| 4360 | * hsm_task_state to HSM_ST_ERR. |
| 4361 | * |
| 4362 | * For PIO writes, ERR=1 DRQ=1 doesn't make |
| 4363 | * sense since the data block has been |
| 4364 | * transferred to the device. |
| 4365 | */ |
| 4366 | if (unlikely(status & (ATA_ERR | ATA_DF))) { |
| 4367 | /* data might be corrputed */ |
| 4368 | qc->err_mask |= AC_ERR_DEV; |
| 4369 | |
| 4370 | if (!(qc->tf.flags & ATA_TFLAG_WRITE)) { |
| 4371 | ata_pio_sectors(qc); |
| 4372 | ata_altstatus(ap); |
| 4373 | status = ata_wait_idle(ap); |
| 4374 | } |
| 4375 | |
| 4376 | if (status & (ATA_BUSY | ATA_DRQ)) |
| 4377 | qc->err_mask |= AC_ERR_HSM; |
| 4378 | |
| 4379 | /* ata_pio_sectors() might change the |
| 4380 | * state to HSM_ST_LAST. so, the state |
| 4381 | * is changed after ata_pio_sectors(). |
| 4382 | */ |
| 4383 | ap->hsm_task_state = HSM_ST_ERR; |
| 4384 | goto fsm_start; |
| 4385 | } |
| 4386 | |
| 4387 | ata_pio_sectors(qc); |
| 4388 | |
| 4389 | if (ap->hsm_task_state == HSM_ST_LAST && |
| 4390 | (!(qc->tf.flags & ATA_TFLAG_WRITE))) { |
| 4391 | /* all data read */ |
| 4392 | ata_altstatus(ap); |
| 4393 | status = ata_wait_idle(ap); |
| 4394 | goto fsm_start; |
| 4395 | } |
| 4396 | } |
| 4397 | |
| 4398 | ata_altstatus(ap); /* flush */ |
| 4399 | poll_next = 1; |
| 4400 | break; |
| 4401 | |
| 4402 | case HSM_ST_LAST: |
| 4403 | if (unlikely(!ata_ok(status))) { |
| 4404 | qc->err_mask |= __ac_err_mask(status); |
| 4405 | ap->hsm_task_state = HSM_ST_ERR; |
| 4406 | goto fsm_start; |
| 4407 | } |
| 4408 | |
| 4409 | /* no more data to transfer */ |
| 4410 | DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n", |
| 4411 | ap->id, qc->dev->devno, status); |
| 4412 | |
| 4413 | WARN_ON(qc->err_mask); |
| 4414 | |
| 4415 | ap->hsm_task_state = HSM_ST_IDLE; |
| 4416 | |
| 4417 | /* complete taskfile transaction */ |
| 4418 | ata_hsm_qc_complete(qc, in_wq); |
| 4419 | |
| 4420 | poll_next = 0; |
| 4421 | break; |
| 4422 | |
| 4423 | case HSM_ST_ERR: |
| 4424 | /* make sure qc->err_mask is available to |
| 4425 | * know what's wrong and recover |
| 4426 | */ |
| 4427 | WARN_ON(qc->err_mask == 0); |
| 4428 | |
| 4429 | ap->hsm_task_state = HSM_ST_IDLE; |
| 4430 | |
| 4431 | /* complete taskfile transaction */ |
| 4432 | ata_hsm_qc_complete(qc, in_wq); |
| 4433 | |
| 4434 | poll_next = 0; |
| 4435 | break; |
| 4436 | default: |
| 4437 | poll_next = 0; |
| 4438 | BUG(); |
| 4439 | } |
| 4440 | |
| 4441 | return poll_next; |
| 4442 | } |
| 4443 | |
| 4444 | static void ata_pio_task(struct work_struct *work) |
| 4445 | { |
| 4446 | struct ata_port *ap = |
| 4447 | container_of(work, struct ata_port, port_task.work); |
| 4448 | struct ata_queued_cmd *qc = ap->port_task_data; |
| 4449 | u8 status; |
| 4450 | int poll_next; |
| 4451 | |
| 4452 | fsm_start: |
| 4453 | WARN_ON(ap->hsm_task_state == HSM_ST_IDLE); |
| 4454 | |
| 4455 | /* |
| 4456 | * This is purely heuristic. This is a fast path. |
| 4457 | * Sometimes when we enter, BSY will be cleared in |
| 4458 | * a chk-status or two. If not, the drive is probably seeking |
| 4459 | * or something. Snooze for a couple msecs, then |
| 4460 | * chk-status again. If still busy, queue delayed work. |
| 4461 | */ |
| 4462 | status = ata_busy_wait(ap, ATA_BUSY, 5); |
| 4463 | if (status & ATA_BUSY) { |
| 4464 | msleep(2); |
| 4465 | status = ata_busy_wait(ap, ATA_BUSY, 10); |
| 4466 | if (status & ATA_BUSY) { |
| 4467 | ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE); |
| 4468 | return; |
| 4469 | } |
| 4470 | } |
| 4471 | |
| 4472 | /* move the HSM */ |
| 4473 | poll_next = ata_hsm_move(ap, qc, status, 1); |
| 4474 | |
| 4475 | /* another command or interrupt handler |
| 4476 | * may be running at this point. |
| 4477 | */ |
| 4478 | if (poll_next) |
| 4479 | goto fsm_start; |
| 4480 | } |
| 4481 | |
| 4482 | /** |
| 4483 | * ata_qc_new - Request an available ATA command, for queueing |
| 4484 | * @ap: Port associated with device @dev |
| 4485 | * @dev: Device from whom we request an available command structure |
| 4486 | * |
| 4487 | * LOCKING: |
| 4488 | * None. |
| 4489 | */ |
| 4490 | |
| 4491 | static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap) |
| 4492 | { |
| 4493 | struct ata_queued_cmd *qc = NULL; |
| 4494 | unsigned int i; |
| 4495 | |
| 4496 | /* no command while frozen */ |
| 4497 | if (unlikely(ap->pflags & ATA_PFLAG_FROZEN)) |
| 4498 | return NULL; |
| 4499 | |
| 4500 | /* the last tag is reserved for internal command. */ |
| 4501 | for (i = 0; i < ATA_MAX_QUEUE - 1; i++) |
| 4502 | if (!test_and_set_bit(i, &ap->qc_allocated)) { |
| 4503 | qc = __ata_qc_from_tag(ap, i); |
| 4504 | break; |
| 4505 | } |
| 4506 | |
| 4507 | if (qc) |
| 4508 | qc->tag = i; |
| 4509 | |
| 4510 | return qc; |
| 4511 | } |
| 4512 | |
| 4513 | /** |
| 4514 | * ata_qc_new_init - Request an available ATA command, and initialize it |
| 4515 | * @dev: Device from whom we request an available command structure |
| 4516 | * |
| 4517 | * LOCKING: |
| 4518 | * None. |
| 4519 | */ |
| 4520 | |
| 4521 | struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev) |
| 4522 | { |
| 4523 | struct ata_port *ap = dev->ap; |
| 4524 | struct ata_queued_cmd *qc; |
| 4525 | |
| 4526 | qc = ata_qc_new(ap); |
| 4527 | if (qc) { |
| 4528 | qc->scsicmd = NULL; |
| 4529 | qc->ap = ap; |
| 4530 | qc->dev = dev; |
| 4531 | |
| 4532 | ata_qc_reinit(qc); |
| 4533 | } |
| 4534 | |
| 4535 | return qc; |
| 4536 | } |
| 4537 | |
| 4538 | /** |
| 4539 | * ata_qc_free - free unused ata_queued_cmd |
| 4540 | * @qc: Command to complete |
| 4541 | * |
| 4542 | * Designed to free unused ata_queued_cmd object |
| 4543 | * in case something prevents using it. |
| 4544 | * |
| 4545 | * LOCKING: |
| 4546 | * spin_lock_irqsave(host lock) |
| 4547 | */ |
| 4548 | void ata_qc_free(struct ata_queued_cmd *qc) |
| 4549 | { |
| 4550 | struct ata_port *ap = qc->ap; |
| 4551 | unsigned int tag; |
| 4552 | |
| 4553 | WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ |
| 4554 | |
| 4555 | qc->flags = 0; |
| 4556 | tag = qc->tag; |
| 4557 | if (likely(ata_tag_valid(tag))) { |
| 4558 | qc->tag = ATA_TAG_POISON; |
| 4559 | clear_bit(tag, &ap->qc_allocated); |
| 4560 | } |
| 4561 | } |
| 4562 | |
| 4563 | void __ata_qc_complete(struct ata_queued_cmd *qc) |
| 4564 | { |
| 4565 | struct ata_port *ap = qc->ap; |
| 4566 | |
| 4567 | WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ |
| 4568 | WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE)); |
| 4569 | |
| 4570 | if (likely(qc->flags & ATA_QCFLAG_DMAMAP)) |
| 4571 | ata_sg_clean(qc); |
| 4572 | |
| 4573 | /* command should be marked inactive atomically with qc completion */ |
| 4574 | if (qc->tf.protocol == ATA_PROT_NCQ) |
| 4575 | ap->sactive &= ~(1 << qc->tag); |
| 4576 | else |
| 4577 | ap->active_tag = ATA_TAG_POISON; |
| 4578 | |
| 4579 | /* atapi: mark qc as inactive to prevent the interrupt handler |
| 4580 | * from completing the command twice later, before the error handler |
| 4581 | * is called. (when rc != 0 and atapi request sense is needed) |
| 4582 | */ |
| 4583 | qc->flags &= ~ATA_QCFLAG_ACTIVE; |
| 4584 | ap->qc_active &= ~(1 << qc->tag); |
| 4585 | |
| 4586 | /* call completion callback */ |
| 4587 | qc->complete_fn(qc); |
| 4588 | } |
| 4589 | |
| 4590 | static void fill_result_tf(struct ata_queued_cmd *qc) |
| 4591 | { |
| 4592 | struct ata_port *ap = qc->ap; |
| 4593 | |
| 4594 | ap->ops->tf_read(ap, &qc->result_tf); |
| 4595 | qc->result_tf.flags = qc->tf.flags; |
| 4596 | } |
| 4597 | |
| 4598 | /** |
| 4599 | * ata_qc_complete - Complete an active ATA command |
| 4600 | * @qc: Command to complete |
| 4601 | * @err_mask: ATA Status register contents |
| 4602 | * |
| 4603 | * Indicate to the mid and upper layers that an ATA |
| 4604 | * command has completed, with either an ok or not-ok status. |
| 4605 | * |
| 4606 | * LOCKING: |
| 4607 | * spin_lock_irqsave(host lock) |
| 4608 | */ |
| 4609 | void ata_qc_complete(struct ata_queued_cmd *qc) |
| 4610 | { |
| 4611 | struct ata_port *ap = qc->ap; |
| 4612 | |
| 4613 | /* XXX: New EH and old EH use different mechanisms to |
| 4614 | * synchronize EH with regular execution path. |
| 4615 | * |
| 4616 | * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED. |
| 4617 | * Normal execution path is responsible for not accessing a |
| 4618 | * failed qc. libata core enforces the rule by returning NULL |
| 4619 | * from ata_qc_from_tag() for failed qcs. |
| 4620 | * |
| 4621 | * Old EH depends on ata_qc_complete() nullifying completion |
| 4622 | * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does |
| 4623 | * not synchronize with interrupt handler. Only PIO task is |
| 4624 | * taken care of. |
| 4625 | */ |
| 4626 | if (ap->ops->error_handler) { |
| 4627 | WARN_ON(ap->pflags & ATA_PFLAG_FROZEN); |
| 4628 | |
| 4629 | if (unlikely(qc->err_mask)) |
| 4630 | qc->flags |= ATA_QCFLAG_FAILED; |
| 4631 | |
| 4632 | if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) { |
| 4633 | if (!ata_tag_internal(qc->tag)) { |
| 4634 | /* always fill result TF for failed qc */ |
| 4635 | fill_result_tf(qc); |
| 4636 | ata_qc_schedule_eh(qc); |
| 4637 | return; |
| 4638 | } |
| 4639 | } |
| 4640 | |
| 4641 | /* read result TF if requested */ |
| 4642 | if (qc->flags & ATA_QCFLAG_RESULT_TF) |
| 4643 | fill_result_tf(qc); |
| 4644 | |
| 4645 | __ata_qc_complete(qc); |
| 4646 | } else { |
| 4647 | if (qc->flags & ATA_QCFLAG_EH_SCHEDULED) |
| 4648 | return; |
| 4649 | |
| 4650 | /* read result TF if failed or requested */ |
| 4651 | if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF) |
| 4652 | fill_result_tf(qc); |
| 4653 | |
| 4654 | __ata_qc_complete(qc); |
| 4655 | } |
| 4656 | } |
| 4657 | |
| 4658 | /** |
| 4659 | * ata_qc_complete_multiple - Complete multiple qcs successfully |
| 4660 | * @ap: port in question |
| 4661 | * @qc_active: new qc_active mask |
| 4662 | * @finish_qc: LLDD callback invoked before completing a qc |
| 4663 | * |
| 4664 | * Complete in-flight commands. This functions is meant to be |
| 4665 | * called from low-level driver's interrupt routine to complete |
| 4666 | * requests normally. ap->qc_active and @qc_active is compared |
| 4667 | * and commands are completed accordingly. |
| 4668 | * |
| 4669 | * LOCKING: |
| 4670 | * spin_lock_irqsave(host lock) |
| 4671 | * |
| 4672 | * RETURNS: |
| 4673 | * Number of completed commands on success, -errno otherwise. |
| 4674 | */ |
| 4675 | int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active, |
| 4676 | void (*finish_qc)(struct ata_queued_cmd *)) |
| 4677 | { |
| 4678 | int nr_done = 0; |
| 4679 | u32 done_mask; |
| 4680 | int i; |
| 4681 | |
| 4682 | done_mask = ap->qc_active ^ qc_active; |
| 4683 | |
| 4684 | if (unlikely(done_mask & qc_active)) { |
| 4685 | ata_port_printk(ap, KERN_ERR, "illegal qc_active transition " |
| 4686 | "(%08x->%08x)\n", ap->qc_active, qc_active); |
| 4687 | return -EINVAL; |
| 4688 | } |
| 4689 | |
| 4690 | for (i = 0; i < ATA_MAX_QUEUE; i++) { |
| 4691 | struct ata_queued_cmd *qc; |
| 4692 | |
| 4693 | if (!(done_mask & (1 << i))) |
| 4694 | continue; |
| 4695 | |
| 4696 | if ((qc = ata_qc_from_tag(ap, i))) { |
| 4697 | if (finish_qc) |
| 4698 | finish_qc(qc); |
| 4699 | ata_qc_complete(qc); |
| 4700 | nr_done++; |
| 4701 | } |
| 4702 | } |
| 4703 | |
| 4704 | return nr_done; |
| 4705 | } |
| 4706 | |
| 4707 | static inline int ata_should_dma_map(struct ata_queued_cmd *qc) |
| 4708 | { |
| 4709 | struct ata_port *ap = qc->ap; |
| 4710 | |
| 4711 | switch (qc->tf.protocol) { |
| 4712 | case ATA_PROT_NCQ: |
| 4713 | case ATA_PROT_DMA: |
| 4714 | case ATA_PROT_ATAPI_DMA: |
| 4715 | return 1; |
| 4716 | |
| 4717 | case ATA_PROT_ATAPI: |
| 4718 | case ATA_PROT_PIO: |
| 4719 | if (ap->flags & ATA_FLAG_PIO_DMA) |
| 4720 | return 1; |
| 4721 | |
| 4722 | /* fall through */ |
| 4723 | |
| 4724 | default: |
| 4725 | return 0; |
| 4726 | } |
| 4727 | |
| 4728 | /* never reached */ |
| 4729 | } |
| 4730 | |
| 4731 | /** |
| 4732 | * ata_qc_issue - issue taskfile to device |
| 4733 | * @qc: command to issue to device |
| 4734 | * |
| 4735 | * Prepare an ATA command to submission to device. |
| 4736 | * This includes mapping the data into a DMA-able |
| 4737 | * area, filling in the S/G table, and finally |
| 4738 | * writing the taskfile to hardware, starting the command. |
| 4739 | * |
| 4740 | * LOCKING: |
| 4741 | * spin_lock_irqsave(host lock) |
| 4742 | */ |
| 4743 | void ata_qc_issue(struct ata_queued_cmd *qc) |
| 4744 | { |
| 4745 | struct ata_port *ap = qc->ap; |
| 4746 | |
| 4747 | /* Make sure only one non-NCQ command is outstanding. The |
| 4748 | * check is skipped for old EH because it reuses active qc to |
| 4749 | * request ATAPI sense. |
| 4750 | */ |
| 4751 | WARN_ON(ap->ops->error_handler && ata_tag_valid(ap->active_tag)); |
| 4752 | |
| 4753 | if (qc->tf.protocol == ATA_PROT_NCQ) { |
| 4754 | WARN_ON(ap->sactive & (1 << qc->tag)); |
| 4755 | ap->sactive |= 1 << qc->tag; |
| 4756 | } else { |
| 4757 | WARN_ON(ap->sactive); |
| 4758 | ap->active_tag = qc->tag; |
| 4759 | } |
| 4760 | |
| 4761 | qc->flags |= ATA_QCFLAG_ACTIVE; |
| 4762 | ap->qc_active |= 1 << qc->tag; |
| 4763 | |
| 4764 | if (ata_should_dma_map(qc)) { |
| 4765 | if (qc->flags & ATA_QCFLAG_SG) { |
| 4766 | if (ata_sg_setup(qc)) |
| 4767 | goto sg_err; |
| 4768 | } else if (qc->flags & ATA_QCFLAG_SINGLE) { |
| 4769 | if (ata_sg_setup_one(qc)) |
| 4770 | goto sg_err; |
| 4771 | } |
| 4772 | } else { |
| 4773 | qc->flags &= ~ATA_QCFLAG_DMAMAP; |
| 4774 | } |
| 4775 | |
| 4776 | ap->ops->qc_prep(qc); |
| 4777 | |
| 4778 | qc->err_mask |= ap->ops->qc_issue(qc); |
| 4779 | if (unlikely(qc->err_mask)) |
| 4780 | goto err; |
| 4781 | return; |
| 4782 | |
| 4783 | sg_err: |
| 4784 | qc->flags &= ~ATA_QCFLAG_DMAMAP; |
| 4785 | qc->err_mask |= AC_ERR_SYSTEM; |
| 4786 | err: |
| 4787 | ata_qc_complete(qc); |
| 4788 | } |
| 4789 | |
| 4790 | /** |
| 4791 | * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner |
| 4792 | * @qc: command to issue to device |
| 4793 | * |
| 4794 | * Using various libata functions and hooks, this function |
| 4795 | * starts an ATA command. ATA commands are grouped into |
| 4796 | * classes called "protocols", and issuing each type of protocol |
| 4797 | * is slightly different. |
| 4798 | * |
| 4799 | * May be used as the qc_issue() entry in ata_port_operations. |
| 4800 | * |
| 4801 | * LOCKING: |
| 4802 | * spin_lock_irqsave(host lock) |
| 4803 | * |
| 4804 | * RETURNS: |
| 4805 | * Zero on success, AC_ERR_* mask on failure |
| 4806 | */ |
| 4807 | |
| 4808 | unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc) |
| 4809 | { |
| 4810 | struct ata_port *ap = qc->ap; |
| 4811 | |
| 4812 | /* Use polling pio if the LLD doesn't handle |
| 4813 | * interrupt driven pio and atapi CDB interrupt. |
| 4814 | */ |
| 4815 | if (ap->flags & ATA_FLAG_PIO_POLLING) { |
| 4816 | switch (qc->tf.protocol) { |
| 4817 | case ATA_PROT_PIO: |
| 4818 | case ATA_PROT_NODATA: |
| 4819 | case ATA_PROT_ATAPI: |
| 4820 | case ATA_PROT_ATAPI_NODATA: |
| 4821 | qc->tf.flags |= ATA_TFLAG_POLLING; |
| 4822 | break; |
| 4823 | case ATA_PROT_ATAPI_DMA: |
| 4824 | if (qc->dev->flags & ATA_DFLAG_CDB_INTR) |
| 4825 | /* see ata_dma_blacklisted() */ |
| 4826 | BUG(); |
| 4827 | break; |
| 4828 | default: |
| 4829 | break; |
| 4830 | } |
| 4831 | } |
| 4832 | |
| 4833 | /* Some controllers show flaky interrupt behavior after |
| 4834 | * setting xfer mode. Use polling instead. |
| 4835 | */ |
| 4836 | if (unlikely(qc->tf.command == ATA_CMD_SET_FEATURES && |
| 4837 | qc->tf.feature == SETFEATURES_XFER) && |
| 4838 | (ap->flags & ATA_FLAG_SETXFER_POLLING)) |
| 4839 | qc->tf.flags |= ATA_TFLAG_POLLING; |
| 4840 | |
| 4841 | /* select the device */ |
| 4842 | ata_dev_select(ap, qc->dev->devno, 1, 0); |
| 4843 | |
| 4844 | /* start the command */ |
| 4845 | switch (qc->tf.protocol) { |
| 4846 | case ATA_PROT_NODATA: |
| 4847 | if (qc->tf.flags & ATA_TFLAG_POLLING) |
| 4848 | ata_qc_set_polling(qc); |
| 4849 | |
| 4850 | ata_tf_to_host(ap, &qc->tf); |
| 4851 | ap->hsm_task_state = HSM_ST_LAST; |
| 4852 | |
| 4853 | if (qc->tf.flags & ATA_TFLAG_POLLING) |
| 4854 | ata_port_queue_task(ap, ata_pio_task, qc, 0); |
| 4855 | |
| 4856 | break; |
| 4857 | |
| 4858 | case ATA_PROT_DMA: |
| 4859 | WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING); |
| 4860 | |
| 4861 | ap->ops->tf_load(ap, &qc->tf); /* load tf registers */ |
| 4862 | ap->ops->bmdma_setup(qc); /* set up bmdma */ |
| 4863 | ap->ops->bmdma_start(qc); /* initiate bmdma */ |
| 4864 | ap->hsm_task_state = HSM_ST_LAST; |
| 4865 | break; |
| 4866 | |
| 4867 | case ATA_PROT_PIO: |
| 4868 | if (qc->tf.flags & ATA_TFLAG_POLLING) |
| 4869 | ata_qc_set_polling(qc); |
| 4870 | |
| 4871 | ata_tf_to_host(ap, &qc->tf); |
| 4872 | |
| 4873 | if (qc->tf.flags & ATA_TFLAG_WRITE) { |
| 4874 | /* PIO data out protocol */ |
| 4875 | ap->hsm_task_state = HSM_ST_FIRST; |
| 4876 | ata_port_queue_task(ap, ata_pio_task, qc, 0); |
| 4877 | |
| 4878 | /* always send first data block using |
| 4879 | * the ata_pio_task() codepath. |
| 4880 | */ |
| 4881 | } else { |
| 4882 | /* PIO data in protocol */ |
| 4883 | ap->hsm_task_state = HSM_ST; |
| 4884 | |
| 4885 | if (qc->tf.flags & ATA_TFLAG_POLLING) |
| 4886 | ata_port_queue_task(ap, ata_pio_task, qc, 0); |
| 4887 | |
| 4888 | /* if polling, ata_pio_task() handles the rest. |
| 4889 | * otherwise, interrupt handler takes over from here. |
| 4890 | */ |
| 4891 | } |
| 4892 | |
| 4893 | break; |
| 4894 | |
| 4895 | case ATA_PROT_ATAPI: |
| 4896 | case ATA_PROT_ATAPI_NODATA: |
| 4897 | if (qc->tf.flags & ATA_TFLAG_POLLING) |
| 4898 | ata_qc_set_polling(qc); |
| 4899 | |
| 4900 | ata_tf_to_host(ap, &qc->tf); |
| 4901 | |
| 4902 | ap->hsm_task_state = HSM_ST_FIRST; |
| 4903 | |
| 4904 | /* send cdb by polling if no cdb interrupt */ |
| 4905 | if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) || |
| 4906 | (qc->tf.flags & ATA_TFLAG_POLLING)) |
| 4907 | ata_port_queue_task(ap, ata_pio_task, qc, 0); |
| 4908 | break; |
| 4909 | |
| 4910 | case ATA_PROT_ATAPI_DMA: |
| 4911 | WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING); |
| 4912 | |
| 4913 | ap->ops->tf_load(ap, &qc->tf); /* load tf registers */ |
| 4914 | ap->ops->bmdma_setup(qc); /* set up bmdma */ |
| 4915 | ap->hsm_task_state = HSM_ST_FIRST; |
| 4916 | |
| 4917 | /* send cdb by polling if no cdb interrupt */ |
| 4918 | if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) |
| 4919 | ata_port_queue_task(ap, ata_pio_task, qc, 0); |
| 4920 | break; |
| 4921 | |
| 4922 | default: |
| 4923 | WARN_ON(1); |
| 4924 | return AC_ERR_SYSTEM; |
| 4925 | } |
| 4926 | |
| 4927 | return 0; |
| 4928 | } |
| 4929 | |
| 4930 | /** |
| 4931 | * ata_host_intr - Handle host interrupt for given (port, task) |
| 4932 | * @ap: Port on which interrupt arrived (possibly...) |
| 4933 | * @qc: Taskfile currently active in engine |
| 4934 | * |
| 4935 | * Handle host interrupt for given queued command. Currently, |
| 4936 | * only DMA interrupts are handled. All other commands are |
| 4937 | * handled via polling with interrupts disabled (nIEN bit). |
| 4938 | * |
| 4939 | * LOCKING: |
| 4940 | * spin_lock_irqsave(host lock) |
| 4941 | * |
| 4942 | * RETURNS: |
| 4943 | * One if interrupt was handled, zero if not (shared irq). |
| 4944 | */ |
| 4945 | |
| 4946 | inline unsigned int ata_host_intr (struct ata_port *ap, |
| 4947 | struct ata_queued_cmd *qc) |
| 4948 | { |
| 4949 | struct ata_eh_info *ehi = &ap->eh_info; |
| 4950 | u8 status, host_stat = 0; |
| 4951 | |
| 4952 | VPRINTK("ata%u: protocol %d task_state %d\n", |
| 4953 | ap->id, qc->tf.protocol, ap->hsm_task_state); |
| 4954 | |
| 4955 | /* Check whether we are expecting interrupt in this state */ |
| 4956 | switch (ap->hsm_task_state) { |
| 4957 | case HSM_ST_FIRST: |
| 4958 | /* Some pre-ATAPI-4 devices assert INTRQ |
| 4959 | * at this state when ready to receive CDB. |
| 4960 | */ |
| 4961 | |
| 4962 | /* Check the ATA_DFLAG_CDB_INTR flag is enough here. |
| 4963 | * The flag was turned on only for atapi devices. |
| 4964 | * No need to check is_atapi_taskfile(&qc->tf) again. |
| 4965 | */ |
| 4966 | if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) |
| 4967 | goto idle_irq; |
| 4968 | break; |
| 4969 | case HSM_ST_LAST: |
| 4970 | if (qc->tf.protocol == ATA_PROT_DMA || |
| 4971 | qc->tf.protocol == ATA_PROT_ATAPI_DMA) { |
| 4972 | /* check status of DMA engine */ |
| 4973 | host_stat = ap->ops->bmdma_status(ap); |
| 4974 | VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat); |
| 4975 | |
| 4976 | /* if it's not our irq... */ |
| 4977 | if (!(host_stat & ATA_DMA_INTR)) |
| 4978 | goto idle_irq; |
| 4979 | |
| 4980 | /* before we do anything else, clear DMA-Start bit */ |
| 4981 | ap->ops->bmdma_stop(qc); |
| 4982 | |
| 4983 | if (unlikely(host_stat & ATA_DMA_ERR)) { |
| 4984 | /* error when transfering data to/from memory */ |
| 4985 | qc->err_mask |= AC_ERR_HOST_BUS; |
| 4986 | ap->hsm_task_state = HSM_ST_ERR; |
| 4987 | } |
| 4988 | } |
| 4989 | break; |
| 4990 | case HSM_ST: |
| 4991 | break; |
| 4992 | default: |
| 4993 | goto idle_irq; |
| 4994 | } |
| 4995 | |
| 4996 | /* check altstatus */ |
| 4997 | status = ata_altstatus(ap); |
| 4998 | if (status & ATA_BUSY) |
| 4999 | goto idle_irq; |
| 5000 | |
| 5001 | /* check main status, clearing INTRQ */ |
| 5002 | status = ata_chk_status(ap); |
| 5003 | if (unlikely(status & ATA_BUSY)) |
| 5004 | goto idle_irq; |
| 5005 | |
| 5006 | /* ack bmdma irq events */ |
| 5007 | ap->ops->irq_clear(ap); |
| 5008 | |
| 5009 | ata_hsm_move(ap, qc, status, 0); |
| 5010 | |
| 5011 | if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA || |
| 5012 | qc->tf.protocol == ATA_PROT_ATAPI_DMA)) |
| 5013 | ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat); |
| 5014 | |
| 5015 | return 1; /* irq handled */ |
| 5016 | |
| 5017 | idle_irq: |
| 5018 | ap->stats.idle_irq++; |
| 5019 | |
| 5020 | #ifdef ATA_IRQ_TRAP |
| 5021 | if ((ap->stats.idle_irq % 1000) == 0) { |
| 5022 | ap->ops->irq_ack(ap, 0); /* debug trap */ |
| 5023 | ata_port_printk(ap, KERN_WARNING, "irq trap\n"); |
| 5024 | return 1; |
| 5025 | } |
| 5026 | #endif |
| 5027 | return 0; /* irq not handled */ |
| 5028 | } |
| 5029 | |
| 5030 | /** |
| 5031 | * ata_interrupt - Default ATA host interrupt handler |
| 5032 | * @irq: irq line (unused) |
| 5033 | * @dev_instance: pointer to our ata_host information structure |
| 5034 | * |
| 5035 | * Default interrupt handler for PCI IDE devices. Calls |
| 5036 | * ata_host_intr() for each port that is not disabled. |
| 5037 | * |
| 5038 | * LOCKING: |
| 5039 | * Obtains host lock during operation. |
| 5040 | * |
| 5041 | * RETURNS: |
| 5042 | * IRQ_NONE or IRQ_HANDLED. |
| 5043 | */ |
| 5044 | |
| 5045 | irqreturn_t ata_interrupt (int irq, void *dev_instance) |
| 5046 | { |
| 5047 | struct ata_host *host = dev_instance; |
| 5048 | unsigned int i; |
| 5049 | unsigned int handled = 0; |
| 5050 | unsigned long flags; |
| 5051 | |
| 5052 | /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */ |
| 5053 | spin_lock_irqsave(&host->lock, flags); |
| 5054 | |
| 5055 | for (i = 0; i < host->n_ports; i++) { |
| 5056 | struct ata_port *ap; |
| 5057 | |
| 5058 | ap = host->ports[i]; |
| 5059 | if (ap && |
| 5060 | !(ap->flags & ATA_FLAG_DISABLED)) { |
| 5061 | struct ata_queued_cmd *qc; |
| 5062 | |
| 5063 | qc = ata_qc_from_tag(ap, ap->active_tag); |
| 5064 | if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) && |
| 5065 | (qc->flags & ATA_QCFLAG_ACTIVE)) |
| 5066 | handled |= ata_host_intr(ap, qc); |
| 5067 | } |
| 5068 | } |
| 5069 | |
| 5070 | spin_unlock_irqrestore(&host->lock, flags); |
| 5071 | |
| 5072 | return IRQ_RETVAL(handled); |
| 5073 | } |
| 5074 | |
| 5075 | /** |
| 5076 | * sata_scr_valid - test whether SCRs are accessible |
| 5077 | * @ap: ATA port to test SCR accessibility for |
| 5078 | * |
| 5079 | * Test whether SCRs are accessible for @ap. |
| 5080 | * |
| 5081 | * LOCKING: |
| 5082 | * None. |
| 5083 | * |
| 5084 | * RETURNS: |
| 5085 | * 1 if SCRs are accessible, 0 otherwise. |
| 5086 | */ |
| 5087 | int sata_scr_valid(struct ata_port *ap) |
| 5088 | { |
| 5089 | return ap->cbl == ATA_CBL_SATA && ap->ops->scr_read; |
| 5090 | } |
| 5091 | |
| 5092 | /** |
| 5093 | * sata_scr_read - read SCR register of the specified port |
| 5094 | * @ap: ATA port to read SCR for |
| 5095 | * @reg: SCR to read |
| 5096 | * @val: Place to store read value |
| 5097 | * |
| 5098 | * Read SCR register @reg of @ap into *@val. This function is |
| 5099 | * guaranteed to succeed if the cable type of the port is SATA |
| 5100 | * and the port implements ->scr_read. |
| 5101 | * |
| 5102 | * LOCKING: |
| 5103 | * None. |
| 5104 | * |
| 5105 | * RETURNS: |
| 5106 | * 0 on success, negative errno on failure. |
| 5107 | */ |
| 5108 | int sata_scr_read(struct ata_port *ap, int reg, u32 *val) |
| 5109 | { |
| 5110 | if (sata_scr_valid(ap)) { |
| 5111 | *val = ap->ops->scr_read(ap, reg); |
| 5112 | return 0; |
| 5113 | } |
| 5114 | return -EOPNOTSUPP; |
| 5115 | } |
| 5116 | |
| 5117 | /** |
| 5118 | * sata_scr_write - write SCR register of the specified port |
| 5119 | * @ap: ATA port to write SCR for |
| 5120 | * @reg: SCR to write |
| 5121 | * @val: value to write |
| 5122 | * |
| 5123 | * Write @val to SCR register @reg of @ap. This function is |
| 5124 | * guaranteed to succeed if the cable type of the port is SATA |
| 5125 | * and the port implements ->scr_read. |
| 5126 | * |
| 5127 | * LOCKING: |
| 5128 | * None. |
| 5129 | * |
| 5130 | * RETURNS: |
| 5131 | * 0 on success, negative errno on failure. |
| 5132 | */ |
| 5133 | int sata_scr_write(struct ata_port *ap, int reg, u32 val) |
| 5134 | { |
| 5135 | if (sata_scr_valid(ap)) { |
| 5136 | ap->ops->scr_write(ap, reg, val); |
| 5137 | return 0; |
| 5138 | } |
| 5139 | return -EOPNOTSUPP; |
| 5140 | } |
| 5141 | |
| 5142 | /** |
| 5143 | * sata_scr_write_flush - write SCR register of the specified port and flush |
| 5144 | * @ap: ATA port to write SCR for |
| 5145 | * @reg: SCR to write |
| 5146 | * @val: value to write |
| 5147 | * |
| 5148 | * This function is identical to sata_scr_write() except that this |
| 5149 | * function performs flush after writing to the register. |
| 5150 | * |
| 5151 | * LOCKING: |
| 5152 | * None. |
| 5153 | * |
| 5154 | * RETURNS: |
| 5155 | * 0 on success, negative errno on failure. |
| 5156 | */ |
| 5157 | int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val) |
| 5158 | { |
| 5159 | if (sata_scr_valid(ap)) { |
| 5160 | ap->ops->scr_write(ap, reg, val); |
| 5161 | ap->ops->scr_read(ap, reg); |
| 5162 | return 0; |
| 5163 | } |
| 5164 | return -EOPNOTSUPP; |
| 5165 | } |
| 5166 | |
| 5167 | /** |
| 5168 | * ata_port_online - test whether the given port is online |
| 5169 | * @ap: ATA port to test |
| 5170 | * |
| 5171 | * Test whether @ap is online. Note that this function returns 0 |
| 5172 | * if online status of @ap cannot be obtained, so |
| 5173 | * ata_port_online(ap) != !ata_port_offline(ap). |
| 5174 | * |
| 5175 | * LOCKING: |
| 5176 | * None. |
| 5177 | * |
| 5178 | * RETURNS: |
| 5179 | * 1 if the port online status is available and online. |
| 5180 | */ |
| 5181 | int ata_port_online(struct ata_port *ap) |
| 5182 | { |
| 5183 | u32 sstatus; |
| 5184 | |
| 5185 | if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) == 0x3) |
| 5186 | return 1; |
| 5187 | return 0; |
| 5188 | } |
| 5189 | |
| 5190 | /** |
| 5191 | * ata_port_offline - test whether the given port is offline |
| 5192 | * @ap: ATA port to test |
| 5193 | * |
| 5194 | * Test whether @ap is offline. Note that this function returns |
| 5195 | * 0 if offline status of @ap cannot be obtained, so |
| 5196 | * ata_port_online(ap) != !ata_port_offline(ap). |
| 5197 | * |
| 5198 | * LOCKING: |
| 5199 | * None. |
| 5200 | * |
| 5201 | * RETURNS: |
| 5202 | * 1 if the port offline status is available and offline. |
| 5203 | */ |
| 5204 | int ata_port_offline(struct ata_port *ap) |
| 5205 | { |
| 5206 | u32 sstatus; |
| 5207 | |
| 5208 | if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) != 0x3) |
| 5209 | return 1; |
| 5210 | return 0; |
| 5211 | } |
| 5212 | |
| 5213 | int ata_flush_cache(struct ata_device *dev) |
| 5214 | { |
| 5215 | unsigned int err_mask; |
| 5216 | u8 cmd; |
| 5217 | |
| 5218 | if (!ata_try_flush_cache(dev)) |
| 5219 | return 0; |
| 5220 | |
| 5221 | if (dev->flags & ATA_DFLAG_FLUSH_EXT) |
| 5222 | cmd = ATA_CMD_FLUSH_EXT; |
| 5223 | else |
| 5224 | cmd = ATA_CMD_FLUSH; |
| 5225 | |
| 5226 | err_mask = ata_do_simple_cmd(dev, cmd); |
| 5227 | if (err_mask) { |
| 5228 | ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n"); |
| 5229 | return -EIO; |
| 5230 | } |
| 5231 | |
| 5232 | return 0; |
| 5233 | } |
| 5234 | |
| 5235 | static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg, |
| 5236 | unsigned int action, unsigned int ehi_flags, |
| 5237 | int wait) |
| 5238 | { |
| 5239 | unsigned long flags; |
| 5240 | int i, rc; |
| 5241 | |
| 5242 | for (i = 0; i < host->n_ports; i++) { |
| 5243 | struct ata_port *ap = host->ports[i]; |
| 5244 | |
| 5245 | /* Previous resume operation might still be in |
| 5246 | * progress. Wait for PM_PENDING to clear. |
| 5247 | */ |
| 5248 | if (ap->pflags & ATA_PFLAG_PM_PENDING) { |
| 5249 | ata_port_wait_eh(ap); |
| 5250 | WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING); |
| 5251 | } |
| 5252 | |
| 5253 | /* request PM ops to EH */ |
| 5254 | spin_lock_irqsave(ap->lock, flags); |
| 5255 | |
| 5256 | ap->pm_mesg = mesg; |
| 5257 | if (wait) { |
| 5258 | rc = 0; |
| 5259 | ap->pm_result = &rc; |
| 5260 | } |
| 5261 | |
| 5262 | ap->pflags |= ATA_PFLAG_PM_PENDING; |
| 5263 | ap->eh_info.action |= action; |
| 5264 | ap->eh_info.flags |= ehi_flags; |
| 5265 | |
| 5266 | ata_port_schedule_eh(ap); |
| 5267 | |
| 5268 | spin_unlock_irqrestore(ap->lock, flags); |
| 5269 | |
| 5270 | /* wait and check result */ |
| 5271 | if (wait) { |
| 5272 | ata_port_wait_eh(ap); |
| 5273 | WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING); |
| 5274 | if (rc) |
| 5275 | return rc; |
| 5276 | } |
| 5277 | } |
| 5278 | |
| 5279 | return 0; |
| 5280 | } |
| 5281 | |
| 5282 | /** |
| 5283 | * ata_host_suspend - suspend host |
| 5284 | * @host: host to suspend |
| 5285 | * @mesg: PM message |
| 5286 | * |
| 5287 | * Suspend @host. Actual operation is performed by EH. This |
| 5288 | * function requests EH to perform PM operations and waits for EH |
| 5289 | * to finish. |
| 5290 | * |
| 5291 | * LOCKING: |
| 5292 | * Kernel thread context (may sleep). |
| 5293 | * |
| 5294 | * RETURNS: |
| 5295 | * 0 on success, -errno on failure. |
| 5296 | */ |
| 5297 | int ata_host_suspend(struct ata_host *host, pm_message_t mesg) |
| 5298 | { |
| 5299 | int i, j, rc; |
| 5300 | |
| 5301 | rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1); |
| 5302 | if (rc) |
| 5303 | goto fail; |
| 5304 | |
| 5305 | /* EH is quiescent now. Fail if we have any ready device. |
| 5306 | * This happens if hotplug occurs between completion of device |
| 5307 | * suspension and here. |
| 5308 | */ |
| 5309 | for (i = 0; i < host->n_ports; i++) { |
| 5310 | struct ata_port *ap = host->ports[i]; |
| 5311 | |
| 5312 | for (j = 0; j < ATA_MAX_DEVICES; j++) { |
| 5313 | struct ata_device *dev = &ap->device[j]; |
| 5314 | |
| 5315 | if (ata_dev_ready(dev)) { |
| 5316 | ata_port_printk(ap, KERN_WARNING, |
| 5317 | "suspend failed, device %d " |
| 5318 | "still active\n", dev->devno); |
| 5319 | rc = -EBUSY; |
| 5320 | goto fail; |
| 5321 | } |
| 5322 | } |
| 5323 | } |
| 5324 | |
| 5325 | host->dev->power.power_state = mesg; |
| 5326 | return 0; |
| 5327 | |
| 5328 | fail: |
| 5329 | ata_host_resume(host); |
| 5330 | return rc; |
| 5331 | } |
| 5332 | |
| 5333 | /** |
| 5334 | * ata_host_resume - resume host |
| 5335 | * @host: host to resume |
| 5336 | * |
| 5337 | * Resume @host. Actual operation is performed by EH. This |
| 5338 | * function requests EH to perform PM operations and returns. |
| 5339 | * Note that all resume operations are performed parallely. |
| 5340 | * |
| 5341 | * LOCKING: |
| 5342 | * Kernel thread context (may sleep). |
| 5343 | */ |
| 5344 | void ata_host_resume(struct ata_host *host) |
| 5345 | { |
| 5346 | ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET, |
| 5347 | ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0); |
| 5348 | host->dev->power.power_state = PMSG_ON; |
| 5349 | } |
| 5350 | |
| 5351 | /** |
| 5352 | * ata_port_start - Set port up for dma. |
| 5353 | * @ap: Port to initialize |
| 5354 | * |
| 5355 | * Called just after data structures for each port are |
| 5356 | * initialized. Allocates space for PRD table. |
| 5357 | * |
| 5358 | * May be used as the port_start() entry in ata_port_operations. |
| 5359 | * |
| 5360 | * LOCKING: |
| 5361 | * Inherited from caller. |
| 5362 | */ |
| 5363 | int ata_port_start(struct ata_port *ap) |
| 5364 | { |
| 5365 | struct device *dev = ap->dev; |
| 5366 | int rc; |
| 5367 | |
| 5368 | ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, |
| 5369 | GFP_KERNEL); |
| 5370 | if (!ap->prd) |
| 5371 | return -ENOMEM; |
| 5372 | |
| 5373 | rc = ata_pad_alloc(ap, dev); |
| 5374 | if (rc) |
| 5375 | return rc; |
| 5376 | |
| 5377 | DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, |
| 5378 | (unsigned long long)ap->prd_dma); |
| 5379 | return 0; |
| 5380 | } |
| 5381 | |
| 5382 | /** |
| 5383 | * ata_dev_init - Initialize an ata_device structure |
| 5384 | * @dev: Device structure to initialize |
| 5385 | * |
| 5386 | * Initialize @dev in preparation for probing. |
| 5387 | * |
| 5388 | * LOCKING: |
| 5389 | * Inherited from caller. |
| 5390 | */ |
| 5391 | void ata_dev_init(struct ata_device *dev) |
| 5392 | { |
| 5393 | struct ata_port *ap = dev->ap; |
| 5394 | unsigned long flags; |
| 5395 | |
| 5396 | /* SATA spd limit is bound to the first device */ |
| 5397 | ap->sata_spd_limit = ap->hw_sata_spd_limit; |
| 5398 | |
| 5399 | /* High bits of dev->flags are used to record warm plug |
| 5400 | * requests which occur asynchronously. Synchronize using |
| 5401 | * host lock. |
| 5402 | */ |
| 5403 | spin_lock_irqsave(ap->lock, flags); |
| 5404 | dev->flags &= ~ATA_DFLAG_INIT_MASK; |
| 5405 | spin_unlock_irqrestore(ap->lock, flags); |
| 5406 | |
| 5407 | memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0, |
| 5408 | sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET); |
| 5409 | dev->pio_mask = UINT_MAX; |
| 5410 | dev->mwdma_mask = UINT_MAX; |
| 5411 | dev->udma_mask = UINT_MAX; |
| 5412 | } |
| 5413 | |
| 5414 | /** |
| 5415 | * ata_port_init - Initialize an ata_port structure |
| 5416 | * @ap: Structure to initialize |
| 5417 | * @host: Collection of hosts to which @ap belongs |
| 5418 | * @ent: Probe information provided by low-level driver |
| 5419 | * @port_no: Port number associated with this ata_port |
| 5420 | * |
| 5421 | * Initialize a new ata_port structure. |
| 5422 | * |
| 5423 | * LOCKING: |
| 5424 | * Inherited from caller. |
| 5425 | */ |
| 5426 | void ata_port_init(struct ata_port *ap, struct ata_host *host, |
| 5427 | const struct ata_probe_ent *ent, unsigned int port_no) |
| 5428 | { |
| 5429 | unsigned int i; |
| 5430 | |
| 5431 | ap->lock = &host->lock; |
| 5432 | ap->flags = ATA_FLAG_DISABLED; |
| 5433 | ap->id = ata_unique_id++; |
| 5434 | ap->ctl = ATA_DEVCTL_OBS; |
| 5435 | ap->host = host; |
| 5436 | ap->dev = ent->dev; |
| 5437 | ap->port_no = port_no; |
| 5438 | if (port_no == 1 && ent->pinfo2) { |
| 5439 | ap->pio_mask = ent->pinfo2->pio_mask; |
| 5440 | ap->mwdma_mask = ent->pinfo2->mwdma_mask; |
| 5441 | ap->udma_mask = ent->pinfo2->udma_mask; |
| 5442 | ap->flags |= ent->pinfo2->flags; |
| 5443 | ap->ops = ent->pinfo2->port_ops; |
| 5444 | } else { |
| 5445 | ap->pio_mask = ent->pio_mask; |
| 5446 | ap->mwdma_mask = ent->mwdma_mask; |
| 5447 | ap->udma_mask = ent->udma_mask; |
| 5448 | ap->flags |= ent->port_flags; |
| 5449 | ap->ops = ent->port_ops; |
| 5450 | } |
| 5451 | ap->hw_sata_spd_limit = UINT_MAX; |
| 5452 | ap->active_tag = ATA_TAG_POISON; |
| 5453 | ap->last_ctl = 0xFF; |
| 5454 | |
| 5455 | #if defined(ATA_VERBOSE_DEBUG) |
| 5456 | /* turn on all debugging levels */ |
| 5457 | ap->msg_enable = 0x00FF; |
| 5458 | #elif defined(ATA_DEBUG) |
| 5459 | ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR; |
| 5460 | #else |
| 5461 | ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN; |
| 5462 | #endif |
| 5463 | |
| 5464 | INIT_DELAYED_WORK(&ap->port_task, NULL); |
| 5465 | INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug); |
| 5466 | INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan); |
| 5467 | INIT_LIST_HEAD(&ap->eh_done_q); |
| 5468 | init_waitqueue_head(&ap->eh_wait_q); |
| 5469 | |
| 5470 | /* set cable type */ |
| 5471 | ap->cbl = ATA_CBL_NONE; |
| 5472 | if (ap->flags & ATA_FLAG_SATA) |
| 5473 | ap->cbl = ATA_CBL_SATA; |
| 5474 | |
| 5475 | for (i = 0; i < ATA_MAX_DEVICES; i++) { |
| 5476 | struct ata_device *dev = &ap->device[i]; |
| 5477 | dev->ap = ap; |
| 5478 | dev->devno = i; |
| 5479 | ata_dev_init(dev); |
| 5480 | } |
| 5481 | |
| 5482 | #ifdef ATA_IRQ_TRAP |
| 5483 | ap->stats.unhandled_irq = 1; |
| 5484 | ap->stats.idle_irq = 1; |
| 5485 | #endif |
| 5486 | |
| 5487 | memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports)); |
| 5488 | } |
| 5489 | |
| 5490 | /** |
| 5491 | * ata_port_init_shost - Initialize SCSI host associated with ATA port |
| 5492 | * @ap: ATA port to initialize SCSI host for |
| 5493 | * @shost: SCSI host associated with @ap |
| 5494 | * |
| 5495 | * Initialize SCSI host @shost associated with ATA port @ap. |
| 5496 | * |
| 5497 | * LOCKING: |
| 5498 | * Inherited from caller. |
| 5499 | */ |
| 5500 | static void ata_port_init_shost(struct ata_port *ap, struct Scsi_Host *shost) |
| 5501 | { |
| 5502 | ap->scsi_host = shost; |
| 5503 | |
| 5504 | shost->unique_id = ap->id; |
| 5505 | shost->max_id = 16; |
| 5506 | shost->max_lun = 1; |
| 5507 | shost->max_channel = 1; |
| 5508 | shost->max_cmd_len = 12; |
| 5509 | } |
| 5510 | |
| 5511 | /** |
| 5512 | * ata_port_add - Attach low-level ATA driver to system |
| 5513 | * @ent: Information provided by low-level driver |
| 5514 | * @host: Collections of ports to which we add |
| 5515 | * @port_no: Port number associated with this host |
| 5516 | * |
| 5517 | * Attach low-level ATA driver to system. |
| 5518 | * |
| 5519 | * LOCKING: |
| 5520 | * PCI/etc. bus probe sem. |
| 5521 | * |
| 5522 | * RETURNS: |
| 5523 | * New ata_port on success, for NULL on error. |
| 5524 | */ |
| 5525 | static struct ata_port * ata_port_add(const struct ata_probe_ent *ent, |
| 5526 | struct ata_host *host, |
| 5527 | unsigned int port_no) |
| 5528 | { |
| 5529 | struct Scsi_Host *shost; |
| 5530 | struct ata_port *ap; |
| 5531 | |
| 5532 | DPRINTK("ENTER\n"); |
| 5533 | |
| 5534 | if (!ent->port_ops->error_handler && |
| 5535 | !(ent->port_flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST))) { |
| 5536 | printk(KERN_ERR "ata%u: no reset mechanism available\n", |
| 5537 | port_no); |
| 5538 | return NULL; |
| 5539 | } |
| 5540 | |
| 5541 | shost = scsi_host_alloc(ent->sht, sizeof(struct ata_port)); |
| 5542 | if (!shost) |
| 5543 | return NULL; |
| 5544 | |
| 5545 | shost->transportt = &ata_scsi_transport_template; |
| 5546 | |
| 5547 | ap = ata_shost_to_port(shost); |
| 5548 | |
| 5549 | ata_port_init(ap, host, ent, port_no); |
| 5550 | ata_port_init_shost(ap, shost); |
| 5551 | |
| 5552 | return ap; |
| 5553 | } |
| 5554 | |
| 5555 | static void ata_host_release(struct device *gendev, void *res) |
| 5556 | { |
| 5557 | struct ata_host *host = dev_get_drvdata(gendev); |
| 5558 | int i; |
| 5559 | |
| 5560 | for (i = 0; i < host->n_ports; i++) { |
| 5561 | struct ata_port *ap = host->ports[i]; |
| 5562 | |
| 5563 | if (!ap) |
| 5564 | continue; |
| 5565 | |
| 5566 | if (ap->ops->port_stop) |
| 5567 | ap->ops->port_stop(ap); |
| 5568 | |
| 5569 | scsi_host_put(ap->scsi_host); |
| 5570 | } |
| 5571 | |
| 5572 | if (host->ops->host_stop) |
| 5573 | host->ops->host_stop(host); |
| 5574 | } |
| 5575 | |
| 5576 | /** |
| 5577 | * ata_sas_host_init - Initialize a host struct |
| 5578 | * @host: host to initialize |
| 5579 | * @dev: device host is attached to |
| 5580 | * @flags: host flags |
| 5581 | * @ops: port_ops |
| 5582 | * |
| 5583 | * LOCKING: |
| 5584 | * PCI/etc. bus probe sem. |
| 5585 | * |
| 5586 | */ |
| 5587 | |
| 5588 | void ata_host_init(struct ata_host *host, struct device *dev, |
| 5589 | unsigned long flags, const struct ata_port_operations *ops) |
| 5590 | { |
| 5591 | spin_lock_init(&host->lock); |
| 5592 | host->dev = dev; |
| 5593 | host->flags = flags; |
| 5594 | host->ops = ops; |
| 5595 | } |
| 5596 | |
| 5597 | /** |
| 5598 | * ata_device_add - Register hardware device with ATA and SCSI layers |
| 5599 | * @ent: Probe information describing hardware device to be registered |
| 5600 | * |
| 5601 | * This function processes the information provided in the probe |
| 5602 | * information struct @ent, allocates the necessary ATA and SCSI |
| 5603 | * host information structures, initializes them, and registers |
| 5604 | * everything with requisite kernel subsystems. |
| 5605 | * |
| 5606 | * This function requests irqs, probes the ATA bus, and probes |
| 5607 | * the SCSI bus. |
| 5608 | * |
| 5609 | * LOCKING: |
| 5610 | * PCI/etc. bus probe sem. |
| 5611 | * |
| 5612 | * RETURNS: |
| 5613 | * Number of ports registered. Zero on error (no ports registered). |
| 5614 | */ |
| 5615 | int ata_device_add(const struct ata_probe_ent *ent) |
| 5616 | { |
| 5617 | unsigned int i; |
| 5618 | struct device *dev = ent->dev; |
| 5619 | struct ata_host *host; |
| 5620 | int rc; |
| 5621 | |
| 5622 | DPRINTK("ENTER\n"); |
| 5623 | |
| 5624 | if (ent->irq == 0) { |
| 5625 | dev_printk(KERN_ERR, dev, "is not available: No interrupt assigned.\n"); |
| 5626 | return 0; |
| 5627 | } |
| 5628 | |
| 5629 | if (!devres_open_group(dev, ata_device_add, GFP_KERNEL)) |
| 5630 | return 0; |
| 5631 | |
| 5632 | /* alloc a container for our list of ATA ports (buses) */ |
| 5633 | host = devres_alloc(ata_host_release, sizeof(struct ata_host) + |
| 5634 | (ent->n_ports * sizeof(void *)), GFP_KERNEL); |
| 5635 | if (!host) |
| 5636 | goto err_out; |
| 5637 | devres_add(dev, host); |
| 5638 | dev_set_drvdata(dev, host); |
| 5639 | |
| 5640 | ata_host_init(host, dev, ent->_host_flags, ent->port_ops); |
| 5641 | host->n_ports = ent->n_ports; |
| 5642 | host->irq = ent->irq; |
| 5643 | host->irq2 = ent->irq2; |
| 5644 | host->iomap = ent->iomap; |
| 5645 | host->private_data = ent->private_data; |
| 5646 | |
| 5647 | /* register each port bound to this device */ |
| 5648 | for (i = 0; i < host->n_ports; i++) { |
| 5649 | struct ata_port *ap; |
| 5650 | unsigned long xfer_mode_mask; |
| 5651 | int irq_line = ent->irq; |
| 5652 | |
| 5653 | ap = ata_port_add(ent, host, i); |
| 5654 | host->ports[i] = ap; |
| 5655 | if (!ap) |
| 5656 | goto err_out; |
| 5657 | |
| 5658 | /* dummy? */ |
| 5659 | if (ent->dummy_port_mask & (1 << i)) { |
| 5660 | ata_port_printk(ap, KERN_INFO, "DUMMY\n"); |
| 5661 | ap->ops = &ata_dummy_port_ops; |
| 5662 | continue; |
| 5663 | } |
| 5664 | |
| 5665 | /* start port */ |
| 5666 | rc = ap->ops->port_start(ap); |
| 5667 | if (rc) { |
| 5668 | host->ports[i] = NULL; |
| 5669 | scsi_host_put(ap->scsi_host); |
| 5670 | goto err_out; |
| 5671 | } |
| 5672 | |
| 5673 | /* Report the secondary IRQ for second channel legacy */ |
| 5674 | if (i == 1 && ent->irq2) |
| 5675 | irq_line = ent->irq2; |
| 5676 | |
| 5677 | xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) | |
| 5678 | (ap->mwdma_mask << ATA_SHIFT_MWDMA) | |
| 5679 | (ap->pio_mask << ATA_SHIFT_PIO); |
| 5680 | |
| 5681 | /* print per-port info to dmesg */ |
| 5682 | ata_port_printk(ap, KERN_INFO, "%cATA max %s cmd 0x%p " |
| 5683 | "ctl 0x%p bmdma 0x%p irq %d\n", |
| 5684 | ap->flags & ATA_FLAG_SATA ? 'S' : 'P', |
| 5685 | ata_mode_string(xfer_mode_mask), |
| 5686 | ap->ioaddr.cmd_addr, |
| 5687 | ap->ioaddr.ctl_addr, |
| 5688 | ap->ioaddr.bmdma_addr, |
| 5689 | irq_line); |
| 5690 | |
| 5691 | /* freeze port before requesting IRQ */ |
| 5692 | ata_eh_freeze_port(ap); |
| 5693 | } |
| 5694 | |
| 5695 | /* obtain irq, that may be shared between channels */ |
| 5696 | rc = devm_request_irq(dev, ent->irq, ent->port_ops->irq_handler, |
| 5697 | ent->irq_flags, DRV_NAME, host); |
| 5698 | if (rc) { |
| 5699 | dev_printk(KERN_ERR, dev, "irq %lu request failed: %d\n", |
| 5700 | ent->irq, rc); |
| 5701 | goto err_out; |
| 5702 | } |
| 5703 | |
| 5704 | /* do we have a second IRQ for the other channel, eg legacy mode */ |
| 5705 | if (ent->irq2) { |
| 5706 | /* We will get weird core code crashes later if this is true |
| 5707 | so trap it now */ |
| 5708 | BUG_ON(ent->irq == ent->irq2); |
| 5709 | |
| 5710 | rc = devm_request_irq(dev, ent->irq2, |
| 5711 | ent->port_ops->irq_handler, ent->irq_flags, |
| 5712 | DRV_NAME, host); |
| 5713 | if (rc) { |
| 5714 | dev_printk(KERN_ERR, dev, "irq %lu request failed: %d\n", |
| 5715 | ent->irq2, rc); |
| 5716 | goto err_out; |
| 5717 | } |
| 5718 | } |
| 5719 | |
| 5720 | /* resource acquisition complete */ |
| 5721 | devres_remove_group(dev, ata_device_add); |
| 5722 | |
| 5723 | /* perform each probe synchronously */ |
| 5724 | DPRINTK("probe begin\n"); |
| 5725 | for (i = 0; i < host->n_ports; i++) { |
| 5726 | struct ata_port *ap = host->ports[i]; |
| 5727 | u32 scontrol; |
| 5728 | int rc; |
| 5729 | |
| 5730 | /* init sata_spd_limit to the current value */ |
| 5731 | if (sata_scr_read(ap, SCR_CONTROL, &scontrol) == 0) { |
| 5732 | int spd = (scontrol >> 4) & 0xf; |
| 5733 | ap->hw_sata_spd_limit &= (1 << spd) - 1; |
| 5734 | } |
| 5735 | ap->sata_spd_limit = ap->hw_sata_spd_limit; |
| 5736 | |
| 5737 | rc = scsi_add_host(ap->scsi_host, dev); |
| 5738 | if (rc) { |
| 5739 | ata_port_printk(ap, KERN_ERR, "scsi_add_host failed\n"); |
| 5740 | /* FIXME: do something useful here */ |
| 5741 | /* FIXME: handle unconditional calls to |
| 5742 | * scsi_scan_host and ata_host_remove, below, |
| 5743 | * at the very least |
| 5744 | */ |
| 5745 | } |
| 5746 | |
| 5747 | if (ap->ops->error_handler) { |
| 5748 | struct ata_eh_info *ehi = &ap->eh_info; |
| 5749 | unsigned long flags; |
| 5750 | |
| 5751 | ata_port_probe(ap); |
| 5752 | |
| 5753 | /* kick EH for boot probing */ |
| 5754 | spin_lock_irqsave(ap->lock, flags); |
| 5755 | |
| 5756 | ehi->probe_mask = (1 << ATA_MAX_DEVICES) - 1; |
| 5757 | ehi->action |= ATA_EH_SOFTRESET; |
| 5758 | ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET; |
| 5759 | |
| 5760 | ap->pflags |= ATA_PFLAG_LOADING; |
| 5761 | ata_port_schedule_eh(ap); |
| 5762 | |
| 5763 | spin_unlock_irqrestore(ap->lock, flags); |
| 5764 | |
| 5765 | /* wait for EH to finish */ |
| 5766 | ata_port_wait_eh(ap); |
| 5767 | } else { |
| 5768 | DPRINTK("ata%u: bus probe begin\n", ap->id); |
| 5769 | rc = ata_bus_probe(ap); |
| 5770 | DPRINTK("ata%u: bus probe end\n", ap->id); |
| 5771 | |
| 5772 | if (rc) { |
| 5773 | /* FIXME: do something useful here? |
| 5774 | * Current libata behavior will |
| 5775 | * tear down everything when |
| 5776 | * the module is removed |
| 5777 | * or the h/w is unplugged. |
| 5778 | */ |
| 5779 | } |
| 5780 | } |
| 5781 | } |
| 5782 | |
| 5783 | /* probes are done, now scan each port's disk(s) */ |
| 5784 | DPRINTK("host probe begin\n"); |
| 5785 | for (i = 0; i < host->n_ports; i++) { |
| 5786 | struct ata_port *ap = host->ports[i]; |
| 5787 | |
| 5788 | ata_scsi_scan_host(ap); |
| 5789 | } |
| 5790 | |
| 5791 | VPRINTK("EXIT, returning %u\n", ent->n_ports); |
| 5792 | return ent->n_ports; /* success */ |
| 5793 | |
| 5794 | err_out: |
| 5795 | devres_release_group(dev, ata_device_add); |
| 5796 | dev_set_drvdata(dev, NULL); |
| 5797 | VPRINTK("EXIT, returning %d\n", rc); |
| 5798 | return 0; |
| 5799 | } |
| 5800 | |
| 5801 | /** |
| 5802 | * ata_port_detach - Detach ATA port in prepration of device removal |
| 5803 | * @ap: ATA port to be detached |
| 5804 | * |
| 5805 | * Detach all ATA devices and the associated SCSI devices of @ap; |
| 5806 | * then, remove the associated SCSI host. @ap is guaranteed to |
| 5807 | * be quiescent on return from this function. |
| 5808 | * |
| 5809 | * LOCKING: |
| 5810 | * Kernel thread context (may sleep). |
| 5811 | */ |
| 5812 | void ata_port_detach(struct ata_port *ap) |
| 5813 | { |
| 5814 | unsigned long flags; |
| 5815 | int i; |
| 5816 | |
| 5817 | if (!ap->ops->error_handler) |
| 5818 | goto skip_eh; |
| 5819 | |
| 5820 | /* tell EH we're leaving & flush EH */ |
| 5821 | spin_lock_irqsave(ap->lock, flags); |
| 5822 | ap->pflags |= ATA_PFLAG_UNLOADING; |
| 5823 | spin_unlock_irqrestore(ap->lock, flags); |
| 5824 | |
| 5825 | ata_port_wait_eh(ap); |
| 5826 | |
| 5827 | /* EH is now guaranteed to see UNLOADING, so no new device |
| 5828 | * will be attached. Disable all existing devices. |
| 5829 | */ |
| 5830 | spin_lock_irqsave(ap->lock, flags); |
| 5831 | |
| 5832 | for (i = 0; i < ATA_MAX_DEVICES; i++) |
| 5833 | ata_dev_disable(&ap->device[i]); |
| 5834 | |
| 5835 | spin_unlock_irqrestore(ap->lock, flags); |
| 5836 | |
| 5837 | /* Final freeze & EH. All in-flight commands are aborted. EH |
| 5838 | * will be skipped and retrials will be terminated with bad |
| 5839 | * target. |
| 5840 | */ |
| 5841 | spin_lock_irqsave(ap->lock, flags); |
| 5842 | ata_port_freeze(ap); /* won't be thawed */ |
| 5843 | spin_unlock_irqrestore(ap->lock, flags); |
| 5844 | |
| 5845 | ata_port_wait_eh(ap); |
| 5846 | |
| 5847 | /* Flush hotplug task. The sequence is similar to |
| 5848 | * ata_port_flush_task(). |
| 5849 | */ |
| 5850 | flush_workqueue(ata_aux_wq); |
| 5851 | cancel_delayed_work(&ap->hotplug_task); |
| 5852 | flush_workqueue(ata_aux_wq); |
| 5853 | |
| 5854 | skip_eh: |
| 5855 | /* remove the associated SCSI host */ |
| 5856 | scsi_remove_host(ap->scsi_host); |
| 5857 | } |
| 5858 | |
| 5859 | /** |
| 5860 | * ata_host_detach - Detach all ports of an ATA host |
| 5861 | * @host: Host to detach |
| 5862 | * |
| 5863 | * Detach all ports of @host. |
| 5864 | * |
| 5865 | * LOCKING: |
| 5866 | * Kernel thread context (may sleep). |
| 5867 | */ |
| 5868 | void ata_host_detach(struct ata_host *host) |
| 5869 | { |
| 5870 | int i; |
| 5871 | |
| 5872 | for (i = 0; i < host->n_ports; i++) |
| 5873 | ata_port_detach(host->ports[i]); |
| 5874 | } |
| 5875 | |
| 5876 | struct ata_probe_ent * |
| 5877 | ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port) |
| 5878 | { |
| 5879 | struct ata_probe_ent *probe_ent; |
| 5880 | |
| 5881 | /* XXX - the following if can go away once all LLDs are managed */ |
| 5882 | if (!list_empty(&dev->devres_head)) |
| 5883 | probe_ent = devm_kzalloc(dev, sizeof(*probe_ent), GFP_KERNEL); |
| 5884 | else |
| 5885 | probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL); |
| 5886 | if (!probe_ent) { |
| 5887 | printk(KERN_ERR DRV_NAME "(%s): out of memory\n", |
| 5888 | kobject_name(&(dev->kobj))); |
| 5889 | return NULL; |
| 5890 | } |
| 5891 | |
| 5892 | INIT_LIST_HEAD(&probe_ent->node); |
| 5893 | probe_ent->dev = dev; |
| 5894 | |
| 5895 | probe_ent->sht = port->sht; |
| 5896 | probe_ent->port_flags = port->flags; |
| 5897 | probe_ent->pio_mask = port->pio_mask; |
| 5898 | probe_ent->mwdma_mask = port->mwdma_mask; |
| 5899 | probe_ent->udma_mask = port->udma_mask; |
| 5900 | probe_ent->port_ops = port->port_ops; |
| 5901 | probe_ent->private_data = port->private_data; |
| 5902 | |
| 5903 | return probe_ent; |
| 5904 | } |
| 5905 | |
| 5906 | /** |
| 5907 | * ata_std_ports - initialize ioaddr with standard port offsets. |
| 5908 | * @ioaddr: IO address structure to be initialized |
| 5909 | * |
| 5910 | * Utility function which initializes data_addr, error_addr, |
| 5911 | * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr, |
| 5912 | * device_addr, status_addr, and command_addr to standard offsets |
| 5913 | * relative to cmd_addr. |
| 5914 | * |
| 5915 | * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr. |
| 5916 | */ |
| 5917 | |
| 5918 | void ata_std_ports(struct ata_ioports *ioaddr) |
| 5919 | { |
| 5920 | ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA; |
| 5921 | ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR; |
| 5922 | ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE; |
| 5923 | ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT; |
| 5924 | ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL; |
| 5925 | ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM; |
| 5926 | ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH; |
| 5927 | ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE; |
| 5928 | ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS; |
| 5929 | ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD; |
| 5930 | } |
| 5931 | |
| 5932 | |
| 5933 | #ifdef CONFIG_PCI |
| 5934 | |
| 5935 | /** |
| 5936 | * ata_pci_remove_one - PCI layer callback for device removal |
| 5937 | * @pdev: PCI device that was removed |
| 5938 | * |
| 5939 | * PCI layer indicates to libata via this hook that hot-unplug or |
| 5940 | * module unload event has occurred. Detach all ports. Resource |
| 5941 | * release is handled via devres. |
| 5942 | * |
| 5943 | * LOCKING: |
| 5944 | * Inherited from PCI layer (may sleep). |
| 5945 | */ |
| 5946 | void ata_pci_remove_one(struct pci_dev *pdev) |
| 5947 | { |
| 5948 | struct device *dev = pci_dev_to_dev(pdev); |
| 5949 | struct ata_host *host = dev_get_drvdata(dev); |
| 5950 | |
| 5951 | ata_host_detach(host); |
| 5952 | } |
| 5953 | |
| 5954 | /* move to PCI subsystem */ |
| 5955 | int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits) |
| 5956 | { |
| 5957 | unsigned long tmp = 0; |
| 5958 | |
| 5959 | switch (bits->width) { |
| 5960 | case 1: { |
| 5961 | u8 tmp8 = 0; |
| 5962 | pci_read_config_byte(pdev, bits->reg, &tmp8); |
| 5963 | tmp = tmp8; |
| 5964 | break; |
| 5965 | } |
| 5966 | case 2: { |
| 5967 | u16 tmp16 = 0; |
| 5968 | pci_read_config_word(pdev, bits->reg, &tmp16); |
| 5969 | tmp = tmp16; |
| 5970 | break; |
| 5971 | } |
| 5972 | case 4: { |
| 5973 | u32 tmp32 = 0; |
| 5974 | pci_read_config_dword(pdev, bits->reg, &tmp32); |
| 5975 | tmp = tmp32; |
| 5976 | break; |
| 5977 | } |
| 5978 | |
| 5979 | default: |
| 5980 | return -EINVAL; |
| 5981 | } |
| 5982 | |
| 5983 | tmp &= bits->mask; |
| 5984 | |
| 5985 | return (tmp == bits->val) ? 1 : 0; |
| 5986 | } |
| 5987 | |
| 5988 | void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg) |
| 5989 | { |
| 5990 | pci_save_state(pdev); |
| 5991 | |
| 5992 | if (mesg.event == PM_EVENT_SUSPEND) { |
| 5993 | pci_disable_device(pdev); |
| 5994 | pci_set_power_state(pdev, PCI_D3hot); |
| 5995 | } |
| 5996 | } |
| 5997 | |
| 5998 | int ata_pci_device_do_resume(struct pci_dev *pdev) |
| 5999 | { |
| 6000 | int rc; |
| 6001 | |
| 6002 | pci_set_power_state(pdev, PCI_D0); |
| 6003 | pci_restore_state(pdev); |
| 6004 | |
| 6005 | rc = pcim_enable_device(pdev); |
| 6006 | if (rc) { |
| 6007 | dev_printk(KERN_ERR, &pdev->dev, |
| 6008 | "failed to enable device after resume (%d)\n", rc); |
| 6009 | return rc; |
| 6010 | } |
| 6011 | |
| 6012 | pci_set_master(pdev); |
| 6013 | return 0; |
| 6014 | } |
| 6015 | |
| 6016 | int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg) |
| 6017 | { |
| 6018 | struct ata_host *host = dev_get_drvdata(&pdev->dev); |
| 6019 | int rc = 0; |
| 6020 | |
| 6021 | rc = ata_host_suspend(host, mesg); |
| 6022 | if (rc) |
| 6023 | return rc; |
| 6024 | |
| 6025 | ata_pci_device_do_suspend(pdev, mesg); |
| 6026 | |
| 6027 | return 0; |
| 6028 | } |
| 6029 | |
| 6030 | int ata_pci_device_resume(struct pci_dev *pdev) |
| 6031 | { |
| 6032 | struct ata_host *host = dev_get_drvdata(&pdev->dev); |
| 6033 | int rc; |
| 6034 | |
| 6035 | rc = ata_pci_device_do_resume(pdev); |
| 6036 | if (rc == 0) |
| 6037 | ata_host_resume(host); |
| 6038 | return rc; |
| 6039 | } |
| 6040 | #endif /* CONFIG_PCI */ |
| 6041 | |
| 6042 | |
| 6043 | static int __init ata_init(void) |
| 6044 | { |
| 6045 | ata_probe_timeout *= HZ; |
| 6046 | ata_wq = create_workqueue("ata"); |
| 6047 | if (!ata_wq) |
| 6048 | return -ENOMEM; |
| 6049 | |
| 6050 | ata_aux_wq = create_singlethread_workqueue("ata_aux"); |
| 6051 | if (!ata_aux_wq) { |
| 6052 | destroy_workqueue(ata_wq); |
| 6053 | return -ENOMEM; |
| 6054 | } |
| 6055 | |
| 6056 | printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n"); |
| 6057 | return 0; |
| 6058 | } |
| 6059 | |
| 6060 | static void __exit ata_exit(void) |
| 6061 | { |
| 6062 | destroy_workqueue(ata_wq); |
| 6063 | destroy_workqueue(ata_aux_wq); |
| 6064 | } |
| 6065 | |
| 6066 | subsys_initcall(ata_init); |
| 6067 | module_exit(ata_exit); |
| 6068 | |
| 6069 | static unsigned long ratelimit_time; |
| 6070 | static DEFINE_SPINLOCK(ata_ratelimit_lock); |
| 6071 | |
| 6072 | int ata_ratelimit(void) |
| 6073 | { |
| 6074 | int rc; |
| 6075 | unsigned long flags; |
| 6076 | |
| 6077 | spin_lock_irqsave(&ata_ratelimit_lock, flags); |
| 6078 | |
| 6079 | if (time_after(jiffies, ratelimit_time)) { |
| 6080 | rc = 1; |
| 6081 | ratelimit_time = jiffies + (HZ/5); |
| 6082 | } else |
| 6083 | rc = 0; |
| 6084 | |
| 6085 | spin_unlock_irqrestore(&ata_ratelimit_lock, flags); |
| 6086 | |
| 6087 | return rc; |
| 6088 | } |
| 6089 | |
| 6090 | /** |
| 6091 | * ata_wait_register - wait until register value changes |
| 6092 | * @reg: IO-mapped register |
| 6093 | * @mask: Mask to apply to read register value |
| 6094 | * @val: Wait condition |
| 6095 | * @interval_msec: polling interval in milliseconds |
| 6096 | * @timeout_msec: timeout in milliseconds |
| 6097 | * |
| 6098 | * Waiting for some bits of register to change is a common |
| 6099 | * operation for ATA controllers. This function reads 32bit LE |
| 6100 | * IO-mapped register @reg and tests for the following condition. |
| 6101 | * |
| 6102 | * (*@reg & mask) != val |
| 6103 | * |
| 6104 | * If the condition is met, it returns; otherwise, the process is |
| 6105 | * repeated after @interval_msec until timeout. |
| 6106 | * |
| 6107 | * LOCKING: |
| 6108 | * Kernel thread context (may sleep) |
| 6109 | * |
| 6110 | * RETURNS: |
| 6111 | * The final register value. |
| 6112 | */ |
| 6113 | u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val, |
| 6114 | unsigned long interval_msec, |
| 6115 | unsigned long timeout_msec) |
| 6116 | { |
| 6117 | unsigned long timeout; |
| 6118 | u32 tmp; |
| 6119 | |
| 6120 | tmp = ioread32(reg); |
| 6121 | |
| 6122 | /* Calculate timeout _after_ the first read to make sure |
| 6123 | * preceding writes reach the controller before starting to |
| 6124 | * eat away the timeout. |
| 6125 | */ |
| 6126 | timeout = jiffies + (timeout_msec * HZ) / 1000; |
| 6127 | |
| 6128 | while ((tmp & mask) == val && time_before(jiffies, timeout)) { |
| 6129 | msleep(interval_msec); |
| 6130 | tmp = ioread32(reg); |
| 6131 | } |
| 6132 | |
| 6133 | return tmp; |
| 6134 | } |
| 6135 | |
| 6136 | /* |
| 6137 | * Dummy port_ops |
| 6138 | */ |
| 6139 | static void ata_dummy_noret(struct ata_port *ap) { } |
| 6140 | static int ata_dummy_ret0(struct ata_port *ap) { return 0; } |
| 6141 | static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { } |
| 6142 | |
| 6143 | static u8 ata_dummy_check_status(struct ata_port *ap) |
| 6144 | { |
| 6145 | return ATA_DRDY; |
| 6146 | } |
| 6147 | |
| 6148 | static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc) |
| 6149 | { |
| 6150 | return AC_ERR_SYSTEM; |
| 6151 | } |
| 6152 | |
| 6153 | const struct ata_port_operations ata_dummy_port_ops = { |
| 6154 | .port_disable = ata_port_disable, |
| 6155 | .check_status = ata_dummy_check_status, |
| 6156 | .check_altstatus = ata_dummy_check_status, |
| 6157 | .dev_select = ata_noop_dev_select, |
| 6158 | .qc_prep = ata_noop_qc_prep, |
| 6159 | .qc_issue = ata_dummy_qc_issue, |
| 6160 | .freeze = ata_dummy_noret, |
| 6161 | .thaw = ata_dummy_noret, |
| 6162 | .error_handler = ata_dummy_noret, |
| 6163 | .post_internal_cmd = ata_dummy_qc_noret, |
| 6164 | .irq_clear = ata_dummy_noret, |
| 6165 | .port_start = ata_dummy_ret0, |
| 6166 | .port_stop = ata_dummy_noret, |
| 6167 | }; |
| 6168 | |
| 6169 | /* |
| 6170 | * libata is essentially a library of internal helper functions for |
| 6171 | * low-level ATA host controller drivers. As such, the API/ABI is |
| 6172 | * likely to change as new drivers are added and updated. |
| 6173 | * Do not depend on ABI/API stability. |
| 6174 | */ |
| 6175 | |
| 6176 | EXPORT_SYMBOL_GPL(sata_deb_timing_normal); |
| 6177 | EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug); |
| 6178 | EXPORT_SYMBOL_GPL(sata_deb_timing_long); |
| 6179 | EXPORT_SYMBOL_GPL(ata_dummy_port_ops); |
| 6180 | EXPORT_SYMBOL_GPL(ata_std_bios_param); |
| 6181 | EXPORT_SYMBOL_GPL(ata_std_ports); |
| 6182 | EXPORT_SYMBOL_GPL(ata_host_init); |
| 6183 | EXPORT_SYMBOL_GPL(ata_device_add); |
| 6184 | EXPORT_SYMBOL_GPL(ata_host_detach); |
| 6185 | EXPORT_SYMBOL_GPL(ata_sg_init); |
| 6186 | EXPORT_SYMBOL_GPL(ata_sg_init_one); |
| 6187 | EXPORT_SYMBOL_GPL(ata_hsm_move); |
| 6188 | EXPORT_SYMBOL_GPL(ata_qc_complete); |
| 6189 | EXPORT_SYMBOL_GPL(ata_qc_complete_multiple); |
| 6190 | EXPORT_SYMBOL_GPL(ata_qc_issue_prot); |
| 6191 | EXPORT_SYMBOL_GPL(ata_tf_load); |
| 6192 | EXPORT_SYMBOL_GPL(ata_tf_read); |
| 6193 | EXPORT_SYMBOL_GPL(ata_noop_dev_select); |
| 6194 | EXPORT_SYMBOL_GPL(ata_std_dev_select); |
| 6195 | EXPORT_SYMBOL_GPL(ata_tf_to_fis); |
| 6196 | EXPORT_SYMBOL_GPL(ata_tf_from_fis); |
| 6197 | EXPORT_SYMBOL_GPL(ata_check_status); |
| 6198 | EXPORT_SYMBOL_GPL(ata_altstatus); |
| 6199 | EXPORT_SYMBOL_GPL(ata_exec_command); |
| 6200 | EXPORT_SYMBOL_GPL(ata_port_start); |
| 6201 | EXPORT_SYMBOL_GPL(ata_interrupt); |
| 6202 | EXPORT_SYMBOL_GPL(ata_data_xfer); |
| 6203 | EXPORT_SYMBOL_GPL(ata_data_xfer_noirq); |
| 6204 | EXPORT_SYMBOL_GPL(ata_qc_prep); |
| 6205 | EXPORT_SYMBOL_GPL(ata_noop_qc_prep); |
| 6206 | EXPORT_SYMBOL_GPL(ata_bmdma_setup); |
| 6207 | EXPORT_SYMBOL_GPL(ata_bmdma_start); |
| 6208 | EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear); |
| 6209 | EXPORT_SYMBOL_GPL(ata_bmdma_status); |
| 6210 | EXPORT_SYMBOL_GPL(ata_bmdma_stop); |
| 6211 | EXPORT_SYMBOL_GPL(ata_bmdma_freeze); |
| 6212 | EXPORT_SYMBOL_GPL(ata_bmdma_thaw); |
| 6213 | EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh); |
| 6214 | EXPORT_SYMBOL_GPL(ata_bmdma_error_handler); |
| 6215 | EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd); |
| 6216 | EXPORT_SYMBOL_GPL(ata_port_probe); |
| 6217 | EXPORT_SYMBOL_GPL(sata_set_spd); |
| 6218 | EXPORT_SYMBOL_GPL(sata_phy_debounce); |
| 6219 | EXPORT_SYMBOL_GPL(sata_phy_resume); |
| 6220 | EXPORT_SYMBOL_GPL(sata_phy_reset); |
| 6221 | EXPORT_SYMBOL_GPL(__sata_phy_reset); |
| 6222 | EXPORT_SYMBOL_GPL(ata_bus_reset); |
| 6223 | EXPORT_SYMBOL_GPL(ata_std_prereset); |
| 6224 | EXPORT_SYMBOL_GPL(ata_std_softreset); |
| 6225 | EXPORT_SYMBOL_GPL(sata_port_hardreset); |
| 6226 | EXPORT_SYMBOL_GPL(sata_std_hardreset); |
| 6227 | EXPORT_SYMBOL_GPL(ata_std_postreset); |
| 6228 | EXPORT_SYMBOL_GPL(ata_dev_classify); |
| 6229 | EXPORT_SYMBOL_GPL(ata_dev_pair); |
| 6230 | EXPORT_SYMBOL_GPL(ata_port_disable); |
| 6231 | EXPORT_SYMBOL_GPL(ata_ratelimit); |
| 6232 | EXPORT_SYMBOL_GPL(ata_wait_register); |
| 6233 | EXPORT_SYMBOL_GPL(ata_busy_sleep); |
| 6234 | EXPORT_SYMBOL_GPL(ata_port_queue_task); |
| 6235 | EXPORT_SYMBOL_GPL(ata_scsi_ioctl); |
| 6236 | EXPORT_SYMBOL_GPL(ata_scsi_queuecmd); |
| 6237 | EXPORT_SYMBOL_GPL(ata_scsi_slave_config); |
| 6238 | EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy); |
| 6239 | EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth); |
| 6240 | EXPORT_SYMBOL_GPL(ata_host_intr); |
| 6241 | EXPORT_SYMBOL_GPL(sata_scr_valid); |
| 6242 | EXPORT_SYMBOL_GPL(sata_scr_read); |
| 6243 | EXPORT_SYMBOL_GPL(sata_scr_write); |
| 6244 | EXPORT_SYMBOL_GPL(sata_scr_write_flush); |
| 6245 | EXPORT_SYMBOL_GPL(ata_port_online); |
| 6246 | EXPORT_SYMBOL_GPL(ata_port_offline); |
| 6247 | EXPORT_SYMBOL_GPL(ata_host_suspend); |
| 6248 | EXPORT_SYMBOL_GPL(ata_host_resume); |
| 6249 | EXPORT_SYMBOL_GPL(ata_id_string); |
| 6250 | EXPORT_SYMBOL_GPL(ata_id_c_string); |
| 6251 | EXPORT_SYMBOL_GPL(ata_device_blacklisted); |
| 6252 | EXPORT_SYMBOL_GPL(ata_scsi_simulate); |
| 6253 | |
| 6254 | EXPORT_SYMBOL_GPL(ata_pio_need_iordy); |
| 6255 | EXPORT_SYMBOL_GPL(ata_timing_compute); |
| 6256 | EXPORT_SYMBOL_GPL(ata_timing_merge); |
| 6257 | |
| 6258 | #ifdef CONFIG_PCI |
| 6259 | EXPORT_SYMBOL_GPL(pci_test_config_bits); |
| 6260 | EXPORT_SYMBOL_GPL(ata_pci_init_native_mode); |
| 6261 | EXPORT_SYMBOL_GPL(ata_pci_init_one); |
| 6262 | EXPORT_SYMBOL_GPL(ata_pci_remove_one); |
| 6263 | EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend); |
| 6264 | EXPORT_SYMBOL_GPL(ata_pci_device_do_resume); |
| 6265 | EXPORT_SYMBOL_GPL(ata_pci_device_suspend); |
| 6266 | EXPORT_SYMBOL_GPL(ata_pci_device_resume); |
| 6267 | EXPORT_SYMBOL_GPL(ata_pci_default_filter); |
| 6268 | EXPORT_SYMBOL_GPL(ata_pci_clear_simplex); |
| 6269 | #endif /* CONFIG_PCI */ |
| 6270 | |
| 6271 | EXPORT_SYMBOL_GPL(ata_scsi_device_suspend); |
| 6272 | EXPORT_SYMBOL_GPL(ata_scsi_device_resume); |
| 6273 | |
| 6274 | EXPORT_SYMBOL_GPL(ata_eng_timeout); |
| 6275 | EXPORT_SYMBOL_GPL(ata_port_schedule_eh); |
| 6276 | EXPORT_SYMBOL_GPL(ata_port_abort); |
| 6277 | EXPORT_SYMBOL_GPL(ata_port_freeze); |
| 6278 | EXPORT_SYMBOL_GPL(ata_eh_freeze_port); |
| 6279 | EXPORT_SYMBOL_GPL(ata_eh_thaw_port); |
| 6280 | EXPORT_SYMBOL_GPL(ata_eh_qc_complete); |
| 6281 | EXPORT_SYMBOL_GPL(ata_eh_qc_retry); |
| 6282 | EXPORT_SYMBOL_GPL(ata_do_eh); |
| 6283 | EXPORT_SYMBOL_GPL(ata_irq_on); |
| 6284 | EXPORT_SYMBOL_GPL(ata_dummy_irq_on); |
| 6285 | EXPORT_SYMBOL_GPL(ata_irq_ack); |
| 6286 | EXPORT_SYMBOL_GPL(ata_dummy_irq_ack); |