Commit | Line | Data |
---|---|---|
1fdffbce | 1 | /* |
f3a03b09 | 2 | * libata-sff.c - helper library for PCI IDE BMDMA |
1fdffbce JG |
3 | * |
4 | * Maintained by: Jeff Garzik <jgarzik@pobox.com> | |
5 | * Please ALWAYS copy linux-ide@vger.kernel.org | |
6 | * on emails. | |
7 | * | |
8 | * Copyright 2003-2006 Red Hat, Inc. All rights reserved. | |
9 | * Copyright 2003-2006 Jeff Garzik | |
10 | * | |
11 | * | |
12 | * This program is free software; you can redistribute it and/or modify | |
13 | * it under the terms of the GNU General Public License as published by | |
14 | * the Free Software Foundation; either version 2, or (at your option) | |
15 | * any later version. | |
16 | * | |
17 | * This program is distributed in the hope that it will be useful, | |
18 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
19 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
20 | * GNU General Public License for more details. | |
21 | * | |
22 | * You should have received a copy of the GNU General Public License | |
23 | * along with this program; see the file COPYING. If not, write to | |
24 | * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. | |
25 | * | |
26 | * | |
27 | * libata documentation is available via 'make {ps|pdf}docs', | |
28 | * as Documentation/DocBook/libata.* | |
29 | * | |
30 | * Hardware documentation available from http://www.t13.org/ and | |
31 | * http://www.sata-io.org/ | |
32 | * | |
33 | */ | |
34 | ||
1fdffbce | 35 | #include <linux/kernel.h> |
5a0e3ad6 | 36 | #include <linux/gfp.h> |
1fdffbce JG |
37 | #include <linux/pci.h> |
38 | #include <linux/libata.h> | |
624d5c51 | 39 | #include <linux/highmem.h> |
1fdffbce JG |
40 | |
41 | #include "libata.h" | |
42 | ||
c429137a TH |
43 | static struct workqueue_struct *ata_sff_wq; |
44 | ||
624d5c51 TH |
45 | const struct ata_port_operations ata_sff_port_ops = { |
46 | .inherits = &ata_base_port_ops, | |
47 | ||
f47451c4 | 48 | .qc_prep = ata_noop_qc_prep, |
9363c382 | 49 | .qc_issue = ata_sff_qc_issue, |
4c9bf4e7 | 50 | .qc_fill_rtf = ata_sff_qc_fill_rtf, |
9363c382 TH |
51 | |
52 | .freeze = ata_sff_freeze, | |
53 | .thaw = ata_sff_thaw, | |
0aa1113d | 54 | .prereset = ata_sff_prereset, |
9363c382 | 55 | .softreset = ata_sff_softreset, |
57c9efdf | 56 | .hardreset = sata_sff_hardreset, |
203c75b8 | 57 | .postreset = ata_sff_postreset, |
9363c382 | 58 | .error_handler = ata_sff_error_handler, |
9363c382 | 59 | |
5682ed33 TH |
60 | .sff_dev_select = ata_sff_dev_select, |
61 | .sff_check_status = ata_sff_check_status, | |
62 | .sff_tf_load = ata_sff_tf_load, | |
63 | .sff_tf_read = ata_sff_tf_read, | |
64 | .sff_exec_command = ata_sff_exec_command, | |
65 | .sff_data_xfer = ata_sff_data_xfer, | |
8244cd05 | 66 | .sff_drain_fifo = ata_sff_drain_fifo, |
624d5c51 | 67 | |
c96f1732 | 68 | .lost_interrupt = ata_sff_lost_interrupt, |
624d5c51 | 69 | }; |
0fe40ff8 | 70 | EXPORT_SYMBOL_GPL(ata_sff_port_ops); |
624d5c51 | 71 | |
272f7884 | 72 | /** |
9363c382 | 73 | * ata_sff_check_status - Read device status reg & clear interrupt |
272f7884 TH |
74 | * @ap: port where the device is |
75 | * | |
76 | * Reads ATA taskfile status register for currently-selected device | |
77 | * and return its value. This also clears pending interrupts | |
78 | * from this device | |
79 | * | |
80 | * LOCKING: | |
81 | * Inherited from caller. | |
82 | */ | |
9363c382 | 83 | u8 ata_sff_check_status(struct ata_port *ap) |
272f7884 TH |
84 | { |
85 | return ioread8(ap->ioaddr.status_addr); | |
86 | } | |
0fe40ff8 | 87 | EXPORT_SYMBOL_GPL(ata_sff_check_status); |
272f7884 TH |
88 | |
89 | /** | |
9363c382 | 90 | * ata_sff_altstatus - Read device alternate status reg |
272f7884 TH |
91 | * @ap: port where the device is |
92 | * | |
93 | * Reads ATA taskfile alternate status register for | |
94 | * currently-selected device and return its value. | |
95 | * | |
96 | * Note: may NOT be used as the check_altstatus() entry in | |
97 | * ata_port_operations. | |
98 | * | |
99 | * LOCKING: | |
100 | * Inherited from caller. | |
101 | */ | |
a57c1bad | 102 | static u8 ata_sff_altstatus(struct ata_port *ap) |
624d5c51 | 103 | { |
5682ed33 TH |
104 | if (ap->ops->sff_check_altstatus) |
105 | return ap->ops->sff_check_altstatus(ap); | |
624d5c51 TH |
106 | |
107 | return ioread8(ap->ioaddr.altstatus_addr); | |
108 | } | |
109 | ||
a57c1bad AC |
110 | /** |
111 | * ata_sff_irq_status - Check if the device is busy | |
112 | * @ap: port where the device is | |
113 | * | |
114 | * Determine if the port is currently busy. Uses altstatus | |
115 | * if available in order to avoid clearing shared IRQ status | |
116 | * when finding an IRQ source. Non ctl capable devices don't | |
117 | * share interrupt lines fortunately for us. | |
118 | * | |
119 | * LOCKING: | |
120 | * Inherited from caller. | |
121 | */ | |
122 | static u8 ata_sff_irq_status(struct ata_port *ap) | |
123 | { | |
124 | u8 status; | |
125 | ||
126 | if (ap->ops->sff_check_altstatus || ap->ioaddr.altstatus_addr) { | |
127 | status = ata_sff_altstatus(ap); | |
128 | /* Not us: We are busy */ | |
129 | if (status & ATA_BUSY) | |
0fe40ff8 | 130 | return status; |
a57c1bad AC |
131 | } |
132 | /* Clear INTRQ latch */ | |
6311c90a | 133 | status = ap->ops->sff_check_status(ap); |
a57c1bad AC |
134 | return status; |
135 | } | |
136 | ||
137 | /** | |
138 | * ata_sff_sync - Flush writes | |
139 | * @ap: Port to wait for. | |
140 | * | |
141 | * CAUTION: | |
142 | * If we have an mmio device with no ctl and no altstatus | |
143 | * method this will fail. No such devices are known to exist. | |
144 | * | |
145 | * LOCKING: | |
146 | * Inherited from caller. | |
147 | */ | |
148 | ||
149 | static void ata_sff_sync(struct ata_port *ap) | |
150 | { | |
151 | if (ap->ops->sff_check_altstatus) | |
152 | ap->ops->sff_check_altstatus(ap); | |
153 | else if (ap->ioaddr.altstatus_addr) | |
154 | ioread8(ap->ioaddr.altstatus_addr); | |
155 | } | |
156 | ||
157 | /** | |
158 | * ata_sff_pause - Flush writes and wait 400nS | |
159 | * @ap: Port to pause for. | |
160 | * | |
161 | * CAUTION: | |
162 | * If we have an mmio device with no ctl and no altstatus | |
163 | * method this will fail. No such devices are known to exist. | |
164 | * | |
165 | * LOCKING: | |
166 | * Inherited from caller. | |
167 | */ | |
168 | ||
169 | void ata_sff_pause(struct ata_port *ap) | |
170 | { | |
171 | ata_sff_sync(ap); | |
172 | ndelay(400); | |
173 | } | |
0fe40ff8 | 174 | EXPORT_SYMBOL_GPL(ata_sff_pause); |
a57c1bad AC |
175 | |
176 | /** | |
177 | * ata_sff_dma_pause - Pause before commencing DMA | |
178 | * @ap: Port to pause for. | |
179 | * | |
180 | * Perform I/O fencing and ensure sufficient cycle delays occur | |
181 | * for the HDMA1:0 transition | |
182 | */ | |
0fe40ff8 | 183 | |
a57c1bad AC |
184 | void ata_sff_dma_pause(struct ata_port *ap) |
185 | { | |
186 | if (ap->ops->sff_check_altstatus || ap->ioaddr.altstatus_addr) { | |
187 | /* An altstatus read will cause the needed delay without | |
188 | messing up the IRQ status */ | |
189 | ata_sff_altstatus(ap); | |
190 | return; | |
191 | } | |
192 | /* There are no DMA controllers without ctl. BUG here to ensure | |
193 | we never violate the HDMA1:0 transition timing and risk | |
194 | corruption. */ | |
195 | BUG(); | |
196 | } | |
0fe40ff8 | 197 | EXPORT_SYMBOL_GPL(ata_sff_dma_pause); |
a57c1bad | 198 | |
624d5c51 | 199 | /** |
9363c382 | 200 | * ata_sff_busy_sleep - sleep until BSY clears, or timeout |
624d5c51 | 201 | * @ap: port containing status register to be polled |
341c2c95 TH |
202 | * @tmout_pat: impatience timeout in msecs |
203 | * @tmout: overall timeout in msecs | |
624d5c51 TH |
204 | * |
205 | * Sleep until ATA Status register bit BSY clears, | |
206 | * or a timeout occurs. | |
207 | * | |
208 | * LOCKING: | |
209 | * Kernel thread context (may sleep). | |
210 | * | |
211 | * RETURNS: | |
212 | * 0 on success, -errno otherwise. | |
213 | */ | |
9363c382 TH |
214 | int ata_sff_busy_sleep(struct ata_port *ap, |
215 | unsigned long tmout_pat, unsigned long tmout) | |
624d5c51 TH |
216 | { |
217 | unsigned long timer_start, timeout; | |
218 | u8 status; | |
219 | ||
9363c382 | 220 | status = ata_sff_busy_wait(ap, ATA_BUSY, 300); |
624d5c51 | 221 | timer_start = jiffies; |
341c2c95 | 222 | timeout = ata_deadline(timer_start, tmout_pat); |
624d5c51 TH |
223 | while (status != 0xff && (status & ATA_BUSY) && |
224 | time_before(jiffies, timeout)) { | |
97750ceb | 225 | ata_msleep(ap, 50); |
9363c382 | 226 | status = ata_sff_busy_wait(ap, ATA_BUSY, 3); |
624d5c51 TH |
227 | } |
228 | ||
229 | if (status != 0xff && (status & ATA_BUSY)) | |
230 | ata_port_printk(ap, KERN_WARNING, | |
231 | "port is slow to respond, please be patient " | |
232 | "(Status 0x%x)\n", status); | |
233 | ||
341c2c95 | 234 | timeout = ata_deadline(timer_start, tmout); |
624d5c51 TH |
235 | while (status != 0xff && (status & ATA_BUSY) && |
236 | time_before(jiffies, timeout)) { | |
97750ceb | 237 | ata_msleep(ap, 50); |
5682ed33 | 238 | status = ap->ops->sff_check_status(ap); |
624d5c51 TH |
239 | } |
240 | ||
241 | if (status == 0xff) | |
242 | return -ENODEV; | |
243 | ||
244 | if (status & ATA_BUSY) { | |
245 | ata_port_printk(ap, KERN_ERR, "port failed to respond " | |
246 | "(%lu secs, Status 0x%x)\n", | |
341c2c95 | 247 | DIV_ROUND_UP(tmout, 1000), status); |
624d5c51 TH |
248 | return -EBUSY; |
249 | } | |
250 | ||
251 | return 0; | |
252 | } | |
0fe40ff8 | 253 | EXPORT_SYMBOL_GPL(ata_sff_busy_sleep); |
624d5c51 | 254 | |
aa2731ad TH |
255 | static int ata_sff_check_ready(struct ata_link *link) |
256 | { | |
257 | u8 status = link->ap->ops->sff_check_status(link->ap); | |
258 | ||
78ab88f0 | 259 | return ata_check_ready(status); |
aa2731ad TH |
260 | } |
261 | ||
624d5c51 | 262 | /** |
9363c382 | 263 | * ata_sff_wait_ready - sleep until BSY clears, or timeout |
705e76be | 264 | * @link: SFF link to wait ready status for |
624d5c51 TH |
265 | * @deadline: deadline jiffies for the operation |
266 | * | |
267 | * Sleep until ATA Status register bit BSY clears, or timeout | |
268 | * occurs. | |
269 | * | |
270 | * LOCKING: | |
271 | * Kernel thread context (may sleep). | |
272 | * | |
273 | * RETURNS: | |
274 | * 0 on success, -errno otherwise. | |
275 | */ | |
705e76be | 276 | int ata_sff_wait_ready(struct ata_link *link, unsigned long deadline) |
624d5c51 | 277 | { |
aa2731ad | 278 | return ata_wait_ready(link, deadline, ata_sff_check_ready); |
624d5c51 | 279 | } |
0fe40ff8 | 280 | EXPORT_SYMBOL_GPL(ata_sff_wait_ready); |
624d5c51 | 281 | |
41dec29b SS |
282 | /** |
283 | * ata_sff_set_devctl - Write device control reg | |
284 | * @ap: port where the device is | |
285 | * @ctl: value to write | |
286 | * | |
287 | * Writes ATA taskfile device control register. | |
288 | * | |
289 | * Note: may NOT be used as the sff_set_devctl() entry in | |
290 | * ata_port_operations. | |
291 | * | |
292 | * LOCKING: | |
293 | * Inherited from caller. | |
294 | */ | |
295 | static void ata_sff_set_devctl(struct ata_port *ap, u8 ctl) | |
296 | { | |
297 | if (ap->ops->sff_set_devctl) | |
298 | ap->ops->sff_set_devctl(ap, ctl); | |
299 | else | |
300 | iowrite8(ctl, ap->ioaddr.ctl_addr); | |
301 | } | |
302 | ||
624d5c51 | 303 | /** |
9363c382 | 304 | * ata_sff_dev_select - Select device 0/1 on ATA bus |
624d5c51 TH |
305 | * @ap: ATA channel to manipulate |
306 | * @device: ATA device (numbered from zero) to select | |
307 | * | |
308 | * Use the method defined in the ATA specification to | |
309 | * make either device 0, or device 1, active on the | |
310 | * ATA channel. Works with both PIO and MMIO. | |
311 | * | |
312 | * May be used as the dev_select() entry in ata_port_operations. | |
313 | * | |
314 | * LOCKING: | |
315 | * caller. | |
316 | */ | |
9363c382 | 317 | void ata_sff_dev_select(struct ata_port *ap, unsigned int device) |
624d5c51 TH |
318 | { |
319 | u8 tmp; | |
320 | ||
321 | if (device == 0) | |
322 | tmp = ATA_DEVICE_OBS; | |
323 | else | |
324 | tmp = ATA_DEVICE_OBS | ATA_DEV1; | |
325 | ||
326 | iowrite8(tmp, ap->ioaddr.device_addr); | |
9363c382 | 327 | ata_sff_pause(ap); /* needed; also flushes, for mmio */ |
624d5c51 | 328 | } |
0fe40ff8 | 329 | EXPORT_SYMBOL_GPL(ata_sff_dev_select); |
624d5c51 TH |
330 | |
331 | /** | |
332 | * ata_dev_select - Select device 0/1 on ATA bus | |
333 | * @ap: ATA channel to manipulate | |
334 | * @device: ATA device (numbered from zero) to select | |
335 | * @wait: non-zero to wait for Status register BSY bit to clear | |
336 | * @can_sleep: non-zero if context allows sleeping | |
337 | * | |
338 | * Use the method defined in the ATA specification to | |
339 | * make either device 0, or device 1, active on the | |
340 | * ATA channel. | |
341 | * | |
9363c382 TH |
342 | * This is a high-level version of ata_sff_dev_select(), which |
343 | * additionally provides the services of inserting the proper | |
344 | * pauses and status polling, where needed. | |
624d5c51 TH |
345 | * |
346 | * LOCKING: | |
347 | * caller. | |
348 | */ | |
c7a8209f | 349 | static void ata_dev_select(struct ata_port *ap, unsigned int device, |
624d5c51 TH |
350 | unsigned int wait, unsigned int can_sleep) |
351 | { | |
352 | if (ata_msg_probe(ap)) | |
353 | ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, " | |
354 | "device %u, wait %u\n", device, wait); | |
355 | ||
356 | if (wait) | |
357 | ata_wait_idle(ap); | |
358 | ||
5682ed33 | 359 | ap->ops->sff_dev_select(ap, device); |
624d5c51 TH |
360 | |
361 | if (wait) { | |
362 | if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI) | |
97750ceb | 363 | ata_msleep(ap, 150); |
624d5c51 TH |
364 | ata_wait_idle(ap); |
365 | } | |
366 | } | |
367 | ||
368 | /** | |
9363c382 | 369 | * ata_sff_irq_on - Enable interrupts on a port. |
624d5c51 TH |
370 | * @ap: Port on which interrupts are enabled. |
371 | * | |
372 | * Enable interrupts on a legacy IDE device using MMIO or PIO, | |
373 | * wait for idle, clear any pending interrupts. | |
374 | * | |
e42a542b SS |
375 | * Note: may NOT be used as the sff_irq_on() entry in |
376 | * ata_port_operations. | |
377 | * | |
624d5c51 TH |
378 | * LOCKING: |
379 | * Inherited from caller. | |
380 | */ | |
e42a542b | 381 | void ata_sff_irq_on(struct ata_port *ap) |
624d5c51 TH |
382 | { |
383 | struct ata_ioports *ioaddr = &ap->ioaddr; | |
e42a542b SS |
384 | |
385 | if (ap->ops->sff_irq_on) { | |
386 | ap->ops->sff_irq_on(ap); | |
387 | return; | |
388 | } | |
624d5c51 TH |
389 | |
390 | ap->ctl &= ~ATA_NIEN; | |
391 | ap->last_ctl = ap->ctl; | |
392 | ||
e42a542b SS |
393 | if (ap->ops->sff_set_devctl || ioaddr->ctl_addr) |
394 | ata_sff_set_devctl(ap, ap->ctl); | |
395 | ata_wait_idle(ap); | |
624d5c51 | 396 | |
37f65b8b TH |
397 | if (ap->ops->sff_irq_clear) |
398 | ap->ops->sff_irq_clear(ap); | |
624d5c51 | 399 | } |
0fe40ff8 | 400 | EXPORT_SYMBOL_GPL(ata_sff_irq_on); |
624d5c51 | 401 | |
624d5c51 | 402 | /** |
9363c382 | 403 | * ata_sff_tf_load - send taskfile registers to host controller |
624d5c51 TH |
404 | * @ap: Port to which output is sent |
405 | * @tf: ATA taskfile register set | |
406 | * | |
407 | * Outputs ATA taskfile to standard ATA host controller. | |
408 | * | |
409 | * LOCKING: | |
410 | * Inherited from caller. | |
411 | */ | |
9363c382 | 412 | void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) |
624d5c51 TH |
413 | { |
414 | struct ata_ioports *ioaddr = &ap->ioaddr; | |
415 | unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR; | |
416 | ||
417 | if (tf->ctl != ap->last_ctl) { | |
418 | if (ioaddr->ctl_addr) | |
419 | iowrite8(tf->ctl, ioaddr->ctl_addr); | |
420 | ap->last_ctl = tf->ctl; | |
40c60230 | 421 | ata_wait_idle(ap); |
624d5c51 TH |
422 | } |
423 | ||
424 | if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) { | |
efcb3cf7 | 425 | WARN_ON_ONCE(!ioaddr->ctl_addr); |
624d5c51 TH |
426 | iowrite8(tf->hob_feature, ioaddr->feature_addr); |
427 | iowrite8(tf->hob_nsect, ioaddr->nsect_addr); | |
428 | iowrite8(tf->hob_lbal, ioaddr->lbal_addr); | |
429 | iowrite8(tf->hob_lbam, ioaddr->lbam_addr); | |
430 | iowrite8(tf->hob_lbah, ioaddr->lbah_addr); | |
431 | VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n", | |
432 | tf->hob_feature, | |
433 | tf->hob_nsect, | |
434 | tf->hob_lbal, | |
435 | tf->hob_lbam, | |
436 | tf->hob_lbah); | |
437 | } | |
438 | ||
439 | if (is_addr) { | |
440 | iowrite8(tf->feature, ioaddr->feature_addr); | |
441 | iowrite8(tf->nsect, ioaddr->nsect_addr); | |
442 | iowrite8(tf->lbal, ioaddr->lbal_addr); | |
443 | iowrite8(tf->lbam, ioaddr->lbam_addr); | |
444 | iowrite8(tf->lbah, ioaddr->lbah_addr); | |
445 | VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n", | |
446 | tf->feature, | |
447 | tf->nsect, | |
448 | tf->lbal, | |
449 | tf->lbam, | |
450 | tf->lbah); | |
451 | } | |
452 | ||
453 | if (tf->flags & ATA_TFLAG_DEVICE) { | |
454 | iowrite8(tf->device, ioaddr->device_addr); | |
455 | VPRINTK("device 0x%X\n", tf->device); | |
456 | } | |
40c60230 TH |
457 | |
458 | ata_wait_idle(ap); | |
624d5c51 | 459 | } |
0fe40ff8 | 460 | EXPORT_SYMBOL_GPL(ata_sff_tf_load); |
624d5c51 TH |
461 | |
462 | /** | |
9363c382 | 463 | * ata_sff_tf_read - input device's ATA taskfile shadow registers |
624d5c51 TH |
464 | * @ap: Port from which input is read |
465 | * @tf: ATA taskfile register set for storing input | |
466 | * | |
467 | * Reads ATA taskfile registers for currently-selected device | |
468 | * into @tf. Assumes the device has a fully SFF compliant task file | |
469 | * layout and behaviour. If you device does not (eg has a different | |
470 | * status method) then you will need to provide a replacement tf_read | |
471 | * | |
472 | * LOCKING: | |
473 | * Inherited from caller. | |
474 | */ | |
9363c382 | 475 | void ata_sff_tf_read(struct ata_port *ap, struct ata_taskfile *tf) |
624d5c51 TH |
476 | { |
477 | struct ata_ioports *ioaddr = &ap->ioaddr; | |
478 | ||
9363c382 | 479 | tf->command = ata_sff_check_status(ap); |
624d5c51 TH |
480 | tf->feature = ioread8(ioaddr->error_addr); |
481 | tf->nsect = ioread8(ioaddr->nsect_addr); | |
482 | tf->lbal = ioread8(ioaddr->lbal_addr); | |
483 | tf->lbam = ioread8(ioaddr->lbam_addr); | |
484 | tf->lbah = ioread8(ioaddr->lbah_addr); | |
485 | tf->device = ioread8(ioaddr->device_addr); | |
486 | ||
487 | if (tf->flags & ATA_TFLAG_LBA48) { | |
488 | if (likely(ioaddr->ctl_addr)) { | |
489 | iowrite8(tf->ctl | ATA_HOB, ioaddr->ctl_addr); | |
490 | tf->hob_feature = ioread8(ioaddr->error_addr); | |
491 | tf->hob_nsect = ioread8(ioaddr->nsect_addr); | |
492 | tf->hob_lbal = ioread8(ioaddr->lbal_addr); | |
493 | tf->hob_lbam = ioread8(ioaddr->lbam_addr); | |
494 | tf->hob_lbah = ioread8(ioaddr->lbah_addr); | |
495 | iowrite8(tf->ctl, ioaddr->ctl_addr); | |
496 | ap->last_ctl = tf->ctl; | |
497 | } else | |
efcb3cf7 | 498 | WARN_ON_ONCE(1); |
624d5c51 TH |
499 | } |
500 | } | |
0fe40ff8 | 501 | EXPORT_SYMBOL_GPL(ata_sff_tf_read); |
624d5c51 TH |
502 | |
503 | /** | |
9363c382 | 504 | * ata_sff_exec_command - issue ATA command to host controller |
624d5c51 TH |
505 | * @ap: port to which command is being issued |
506 | * @tf: ATA taskfile register set | |
507 | * | |
508 | * Issues ATA command, with proper synchronization with interrupt | |
509 | * handler / other threads. | |
510 | * | |
511 | * LOCKING: | |
512 | * spin_lock_irqsave(host lock) | |
513 | */ | |
9363c382 | 514 | void ata_sff_exec_command(struct ata_port *ap, const struct ata_taskfile *tf) |
624d5c51 TH |
515 | { |
516 | DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command); | |
517 | ||
518 | iowrite8(tf->command, ap->ioaddr.command_addr); | |
9363c382 | 519 | ata_sff_pause(ap); |
624d5c51 | 520 | } |
0fe40ff8 | 521 | EXPORT_SYMBOL_GPL(ata_sff_exec_command); |
624d5c51 TH |
522 | |
523 | /** | |
524 | * ata_tf_to_host - issue ATA taskfile to host controller | |
525 | * @ap: port to which command is being issued | |
526 | * @tf: ATA taskfile register set | |
527 | * | |
528 | * Issues ATA taskfile register set to ATA host controller, | |
529 | * with proper synchronization with interrupt handler and | |
530 | * other threads. | |
531 | * | |
532 | * LOCKING: | |
533 | * spin_lock_irqsave(host lock) | |
534 | */ | |
535 | static inline void ata_tf_to_host(struct ata_port *ap, | |
536 | const struct ata_taskfile *tf) | |
537 | { | |
5682ed33 TH |
538 | ap->ops->sff_tf_load(ap, tf); |
539 | ap->ops->sff_exec_command(ap, tf); | |
624d5c51 TH |
540 | } |
541 | ||
542 | /** | |
9363c382 | 543 | * ata_sff_data_xfer - Transfer data by PIO |
624d5c51 TH |
544 | * @dev: device to target |
545 | * @buf: data buffer | |
546 | * @buflen: buffer length | |
547 | * @rw: read/write | |
548 | * | |
549 | * Transfer data from/to the device data register by PIO. | |
550 | * | |
551 | * LOCKING: | |
552 | * Inherited from caller. | |
553 | * | |
554 | * RETURNS: | |
555 | * Bytes consumed. | |
556 | */ | |
9363c382 TH |
557 | unsigned int ata_sff_data_xfer(struct ata_device *dev, unsigned char *buf, |
558 | unsigned int buflen, int rw) | |
624d5c51 TH |
559 | { |
560 | struct ata_port *ap = dev->link->ap; | |
561 | void __iomem *data_addr = ap->ioaddr.data_addr; | |
562 | unsigned int words = buflen >> 1; | |
563 | ||
564 | /* Transfer multiple of 2 bytes */ | |
565 | if (rw == READ) | |
566 | ioread16_rep(data_addr, buf, words); | |
567 | else | |
568 | iowrite16_rep(data_addr, buf, words); | |
569 | ||
2102d749 | 570 | /* Transfer trailing byte, if any. */ |
624d5c51 | 571 | if (unlikely(buflen & 0x01)) { |
2102d749 | 572 | unsigned char pad[2]; |
624d5c51 | 573 | |
2102d749 SS |
574 | /* Point buf to the tail of buffer */ |
575 | buf += buflen - 1; | |
576 | ||
577 | /* | |
578 | * Use io*16_rep() accessors here as well to avoid pointlessly | |
972b94ff | 579 | * swapping bytes to and from on the big endian machines... |
2102d749 | 580 | */ |
624d5c51 | 581 | if (rw == READ) { |
2102d749 SS |
582 | ioread16_rep(data_addr, pad, 1); |
583 | *buf = pad[0]; | |
624d5c51 | 584 | } else { |
2102d749 SS |
585 | pad[0] = *buf; |
586 | iowrite16_rep(data_addr, pad, 1); | |
624d5c51 TH |
587 | } |
588 | words++; | |
589 | } | |
590 | ||
591 | return words << 1; | |
592 | } | |
0fe40ff8 | 593 | EXPORT_SYMBOL_GPL(ata_sff_data_xfer); |
624d5c51 | 594 | |
871af121 AC |
595 | /** |
596 | * ata_sff_data_xfer32 - Transfer data by PIO | |
597 | * @dev: device to target | |
598 | * @buf: data buffer | |
599 | * @buflen: buffer length | |
600 | * @rw: read/write | |
601 | * | |
602 | * Transfer data from/to the device data register by PIO using 32bit | |
603 | * I/O operations. | |
604 | * | |
605 | * LOCKING: | |
606 | * Inherited from caller. | |
607 | * | |
608 | * RETURNS: | |
609 | * Bytes consumed. | |
610 | */ | |
611 | ||
612 | unsigned int ata_sff_data_xfer32(struct ata_device *dev, unsigned char *buf, | |
613 | unsigned int buflen, int rw) | |
614 | { | |
615 | struct ata_port *ap = dev->link->ap; | |
616 | void __iomem *data_addr = ap->ioaddr.data_addr; | |
617 | unsigned int words = buflen >> 2; | |
618 | int slop = buflen & 3; | |
972b94ff | 619 | |
e3cf95dd AC |
620 | if (!(ap->pflags & ATA_PFLAG_PIO32)) |
621 | return ata_sff_data_xfer(dev, buf, buflen, rw); | |
871af121 AC |
622 | |
623 | /* Transfer multiple of 4 bytes */ | |
624 | if (rw == READ) | |
625 | ioread32_rep(data_addr, buf, words); | |
626 | else | |
627 | iowrite32_rep(data_addr, buf, words); | |
628 | ||
d1b3525b | 629 | /* Transfer trailing bytes, if any */ |
871af121 | 630 | if (unlikely(slop)) { |
d1b3525b SS |
631 | unsigned char pad[4]; |
632 | ||
633 | /* Point buf to the tail of buffer */ | |
634 | buf += buflen - slop; | |
635 | ||
636 | /* | |
637 | * Use io*_rep() accessors here as well to avoid pointlessly | |
972b94ff | 638 | * swapping bytes to and from on the big endian machines... |
d1b3525b | 639 | */ |
871af121 | 640 | if (rw == READ) { |
d1b3525b SS |
641 | if (slop < 3) |
642 | ioread16_rep(data_addr, pad, 1); | |
643 | else | |
644 | ioread32_rep(data_addr, pad, 1); | |
645 | memcpy(buf, pad, slop); | |
871af121 | 646 | } else { |
d1b3525b SS |
647 | memcpy(pad, buf, slop); |
648 | if (slop < 3) | |
649 | iowrite16_rep(data_addr, pad, 1); | |
650 | else | |
651 | iowrite32_rep(data_addr, pad, 1); | |
871af121 | 652 | } |
871af121 | 653 | } |
d1b3525b | 654 | return (buflen + 1) & ~1; |
871af121 AC |
655 | } |
656 | EXPORT_SYMBOL_GPL(ata_sff_data_xfer32); | |
657 | ||
624d5c51 | 658 | /** |
9363c382 | 659 | * ata_sff_data_xfer_noirq - Transfer data by PIO |
624d5c51 TH |
660 | * @dev: device to target |
661 | * @buf: data buffer | |
662 | * @buflen: buffer length | |
663 | * @rw: read/write | |
664 | * | |
665 | * Transfer data from/to the device data register by PIO. Do the | |
666 | * transfer with interrupts disabled. | |
667 | * | |
668 | * LOCKING: | |
669 | * Inherited from caller. | |
670 | * | |
671 | * RETURNS: | |
672 | * Bytes consumed. | |
673 | */ | |
9363c382 TH |
674 | unsigned int ata_sff_data_xfer_noirq(struct ata_device *dev, unsigned char *buf, |
675 | unsigned int buflen, int rw) | |
624d5c51 TH |
676 | { |
677 | unsigned long flags; | |
678 | unsigned int consumed; | |
679 | ||
680 | local_irq_save(flags); | |
9363c382 | 681 | consumed = ata_sff_data_xfer(dev, buf, buflen, rw); |
624d5c51 TH |
682 | local_irq_restore(flags); |
683 | ||
684 | return consumed; | |
685 | } | |
0fe40ff8 | 686 | EXPORT_SYMBOL_GPL(ata_sff_data_xfer_noirq); |
624d5c51 TH |
687 | |
688 | /** | |
689 | * ata_pio_sector - Transfer a sector of data. | |
690 | * @qc: Command on going | |
691 | * | |
692 | * Transfer qc->sect_size bytes of data from/to the ATA device. | |
693 | * | |
694 | * LOCKING: | |
695 | * Inherited from caller. | |
696 | */ | |
697 | static void ata_pio_sector(struct ata_queued_cmd *qc) | |
698 | { | |
699 | int do_write = (qc->tf.flags & ATA_TFLAG_WRITE); | |
700 | struct ata_port *ap = qc->ap; | |
701 | struct page *page; | |
702 | unsigned int offset; | |
703 | unsigned char *buf; | |
704 | ||
705 | if (qc->curbytes == qc->nbytes - qc->sect_size) | |
706 | ap->hsm_task_state = HSM_ST_LAST; | |
707 | ||
708 | page = sg_page(qc->cursg); | |
709 | offset = qc->cursg->offset + qc->cursg_ofs; | |
710 | ||
711 | /* get the current page and offset */ | |
712 | page = nth_page(page, (offset >> PAGE_SHIFT)); | |
713 | offset %= PAGE_SIZE; | |
714 | ||
715 | DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); | |
716 | ||
717 | if (PageHighMem(page)) { | |
718 | unsigned long flags; | |
719 | ||
720 | /* FIXME: use a bounce buffer */ | |
721 | local_irq_save(flags); | |
722 | buf = kmap_atomic(page, KM_IRQ0); | |
723 | ||
724 | /* do the actual data transfer */ | |
5682ed33 TH |
725 | ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size, |
726 | do_write); | |
624d5c51 TH |
727 | |
728 | kunmap_atomic(buf, KM_IRQ0); | |
729 | local_irq_restore(flags); | |
730 | } else { | |
731 | buf = page_address(page); | |
5682ed33 TH |
732 | ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size, |
733 | do_write); | |
624d5c51 TH |
734 | } |
735 | ||
3842e835 | 736 | if (!do_write && !PageSlab(page)) |
2d68b7fe CM |
737 | flush_dcache_page(page); |
738 | ||
624d5c51 TH |
739 | qc->curbytes += qc->sect_size; |
740 | qc->cursg_ofs += qc->sect_size; | |
741 | ||
742 | if (qc->cursg_ofs == qc->cursg->length) { | |
743 | qc->cursg = sg_next(qc->cursg); | |
744 | qc->cursg_ofs = 0; | |
745 | } | |
746 | } | |
747 | ||
748 | /** | |
749 | * ata_pio_sectors - Transfer one or many sectors. | |
750 | * @qc: Command on going | |
751 | * | |
752 | * Transfer one or many sectors of data from/to the | |
753 | * ATA device for the DRQ request. | |
754 | * | |
755 | * LOCKING: | |
756 | * Inherited from caller. | |
757 | */ | |
758 | static void ata_pio_sectors(struct ata_queued_cmd *qc) | |
759 | { | |
760 | if (is_multi_taskfile(&qc->tf)) { | |
761 | /* READ/WRITE MULTIPLE */ | |
762 | unsigned int nsect; | |
763 | ||
efcb3cf7 | 764 | WARN_ON_ONCE(qc->dev->multi_count == 0); |
624d5c51 TH |
765 | |
766 | nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size, | |
767 | qc->dev->multi_count); | |
768 | while (nsect--) | |
769 | ata_pio_sector(qc); | |
770 | } else | |
771 | ata_pio_sector(qc); | |
772 | ||
a57c1bad | 773 | ata_sff_sync(qc->ap); /* flush */ |
624d5c51 TH |
774 | } |
775 | ||
776 | /** | |
777 | * atapi_send_cdb - Write CDB bytes to hardware | |
778 | * @ap: Port to which ATAPI device is attached. | |
779 | * @qc: Taskfile currently active | |
780 | * | |
781 | * When device has indicated its readiness to accept | |
782 | * a CDB, this function is called. Send the CDB. | |
783 | * | |
784 | * LOCKING: | |
785 | * caller. | |
786 | */ | |
787 | static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc) | |
788 | { | |
789 | /* send SCSI cdb */ | |
790 | DPRINTK("send cdb\n"); | |
efcb3cf7 | 791 | WARN_ON_ONCE(qc->dev->cdb_len < 12); |
624d5c51 | 792 | |
5682ed33 | 793 | ap->ops->sff_data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1); |
a57c1bad AC |
794 | ata_sff_sync(ap); |
795 | /* FIXME: If the CDB is for DMA do we need to do the transition delay | |
796 | or is bmdma_start guaranteed to do it ? */ | |
624d5c51 TH |
797 | switch (qc->tf.protocol) { |
798 | case ATAPI_PROT_PIO: | |
799 | ap->hsm_task_state = HSM_ST; | |
800 | break; | |
801 | case ATAPI_PROT_NODATA: | |
802 | ap->hsm_task_state = HSM_ST_LAST; | |
803 | break; | |
9a7780c9 | 804 | #ifdef CONFIG_ATA_BMDMA |
624d5c51 TH |
805 | case ATAPI_PROT_DMA: |
806 | ap->hsm_task_state = HSM_ST_LAST; | |
807 | /* initiate bmdma */ | |
808 | ap->ops->bmdma_start(qc); | |
809 | break; | |
9a7780c9 TH |
810 | #endif /* CONFIG_ATA_BMDMA */ |
811 | default: | |
812 | BUG(); | |
624d5c51 TH |
813 | } |
814 | } | |
815 | ||
816 | /** | |
817 | * __atapi_pio_bytes - Transfer data from/to the ATAPI device. | |
818 | * @qc: Command on going | |
819 | * @bytes: number of bytes | |
820 | * | |
821 | * Transfer Transfer data from/to the ATAPI device. | |
822 | * | |
823 | * LOCKING: | |
824 | * Inherited from caller. | |
825 | * | |
826 | */ | |
827 | static int __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes) | |
828 | { | |
829 | int rw = (qc->tf.flags & ATA_TFLAG_WRITE) ? WRITE : READ; | |
830 | struct ata_port *ap = qc->ap; | |
831 | struct ata_device *dev = qc->dev; | |
832 | struct ata_eh_info *ehi = &dev->link->eh_info; | |
833 | struct scatterlist *sg; | |
834 | struct page *page; | |
835 | unsigned char *buf; | |
836 | unsigned int offset, count, consumed; | |
837 | ||
838 | next_sg: | |
839 | sg = qc->cursg; | |
840 | if (unlikely(!sg)) { | |
841 | ata_ehi_push_desc(ehi, "unexpected or too much trailing data " | |
842 | "buf=%u cur=%u bytes=%u", | |
843 | qc->nbytes, qc->curbytes, bytes); | |
844 | return -1; | |
845 | } | |
846 | ||
847 | page = sg_page(sg); | |
848 | offset = sg->offset + qc->cursg_ofs; | |
849 | ||
850 | /* get the current page and offset */ | |
851 | page = nth_page(page, (offset >> PAGE_SHIFT)); | |
852 | offset %= PAGE_SIZE; | |
853 | ||
854 | /* don't overrun current sg */ | |
855 | count = min(sg->length - qc->cursg_ofs, bytes); | |
856 | ||
857 | /* don't cross page boundaries */ | |
858 | count = min(count, (unsigned int)PAGE_SIZE - offset); | |
859 | ||
860 | DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); | |
861 | ||
862 | if (PageHighMem(page)) { | |
863 | unsigned long flags; | |
864 | ||
865 | /* FIXME: use bounce buffer */ | |
866 | local_irq_save(flags); | |
867 | buf = kmap_atomic(page, KM_IRQ0); | |
868 | ||
869 | /* do the actual data transfer */ | |
0fe40ff8 AC |
870 | consumed = ap->ops->sff_data_xfer(dev, buf + offset, |
871 | count, rw); | |
624d5c51 TH |
872 | |
873 | kunmap_atomic(buf, KM_IRQ0); | |
874 | local_irq_restore(flags); | |
875 | } else { | |
876 | buf = page_address(page); | |
0fe40ff8 AC |
877 | consumed = ap->ops->sff_data_xfer(dev, buf + offset, |
878 | count, rw); | |
624d5c51 TH |
879 | } |
880 | ||
881 | bytes -= min(bytes, consumed); | |
882 | qc->curbytes += count; | |
883 | qc->cursg_ofs += count; | |
884 | ||
885 | if (qc->cursg_ofs == sg->length) { | |
886 | qc->cursg = sg_next(qc->cursg); | |
887 | qc->cursg_ofs = 0; | |
888 | } | |
889 | ||
a0f79f7a CB |
890 | /* |
891 | * There used to be a WARN_ON_ONCE(qc->cursg && count != consumed); | |
892 | * Unfortunately __atapi_pio_bytes doesn't know enough to do the WARN | |
893 | * check correctly as it doesn't know if it is the last request being | |
894 | * made. Somebody should implement a proper sanity check. | |
895 | */ | |
624d5c51 TH |
896 | if (bytes) |
897 | goto next_sg; | |
898 | return 0; | |
899 | } | |
900 | ||
901 | /** | |
902 | * atapi_pio_bytes - Transfer data from/to the ATAPI device. | |
903 | * @qc: Command on going | |
904 | * | |
905 | * Transfer Transfer data from/to the ATAPI device. | |
906 | * | |
907 | * LOCKING: | |
908 | * Inherited from caller. | |
909 | */ | |
910 | static void atapi_pio_bytes(struct ata_queued_cmd *qc) | |
911 | { | |
912 | struct ata_port *ap = qc->ap; | |
913 | struct ata_device *dev = qc->dev; | |
914 | struct ata_eh_info *ehi = &dev->link->eh_info; | |
915 | unsigned int ireason, bc_lo, bc_hi, bytes; | |
916 | int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0; | |
917 | ||
918 | /* Abuse qc->result_tf for temp storage of intermediate TF | |
919 | * here to save some kernel stack usage. | |
920 | * For normal completion, qc->result_tf is not relevant. For | |
921 | * error, qc->result_tf is later overwritten by ata_qc_complete(). | |
922 | * So, the correctness of qc->result_tf is not affected. | |
923 | */ | |
5682ed33 | 924 | ap->ops->sff_tf_read(ap, &qc->result_tf); |
624d5c51 TH |
925 | ireason = qc->result_tf.nsect; |
926 | bc_lo = qc->result_tf.lbam; | |
927 | bc_hi = qc->result_tf.lbah; | |
928 | bytes = (bc_hi << 8) | bc_lo; | |
929 | ||
930 | /* shall be cleared to zero, indicating xfer of data */ | |
931 | if (unlikely(ireason & (1 << 0))) | |
932 | goto atapi_check; | |
933 | ||
934 | /* make sure transfer direction matches expected */ | |
935 | i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0; | |
936 | if (unlikely(do_write != i_write)) | |
937 | goto atapi_check; | |
938 | ||
939 | if (unlikely(!bytes)) | |
940 | goto atapi_check; | |
941 | ||
942 | VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes); | |
943 | ||
944 | if (unlikely(__atapi_pio_bytes(qc, bytes))) | |
945 | goto err_out; | |
a57c1bad | 946 | ata_sff_sync(ap); /* flush */ |
624d5c51 TH |
947 | |
948 | return; | |
949 | ||
950 | atapi_check: | |
951 | ata_ehi_push_desc(ehi, "ATAPI check failed (ireason=0x%x bytes=%u)", | |
952 | ireason, bytes); | |
953 | err_out: | |
954 | qc->err_mask |= AC_ERR_HSM; | |
955 | ap->hsm_task_state = HSM_ST_ERR; | |
956 | } | |
957 | ||
958 | /** | |
959 | * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue. | |
960 | * @ap: the target ata_port | |
961 | * @qc: qc on going | |
962 | * | |
963 | * RETURNS: | |
964 | * 1 if ok in workqueue, 0 otherwise. | |
965 | */ | |
0fe40ff8 AC |
966 | static inline int ata_hsm_ok_in_wq(struct ata_port *ap, |
967 | struct ata_queued_cmd *qc) | |
624d5c51 TH |
968 | { |
969 | if (qc->tf.flags & ATA_TFLAG_POLLING) | |
970 | return 1; | |
971 | ||
972 | if (ap->hsm_task_state == HSM_ST_FIRST) { | |
973 | if (qc->tf.protocol == ATA_PROT_PIO && | |
0fe40ff8 | 974 | (qc->tf.flags & ATA_TFLAG_WRITE)) |
624d5c51 TH |
975 | return 1; |
976 | ||
977 | if (ata_is_atapi(qc->tf.protocol) && | |
0fe40ff8 | 978 | !(qc->dev->flags & ATA_DFLAG_CDB_INTR)) |
624d5c51 TH |
979 | return 1; |
980 | } | |
981 | ||
982 | return 0; | |
983 | } | |
984 | ||
985 | /** | |
986 | * ata_hsm_qc_complete - finish a qc running on standard HSM | |
987 | * @qc: Command to complete | |
988 | * @in_wq: 1 if called from workqueue, 0 otherwise | |
989 | * | |
990 | * Finish @qc which is running on standard HSM. | |
991 | * | |
992 | * LOCKING: | |
993 | * If @in_wq is zero, spin_lock_irqsave(host lock). | |
994 | * Otherwise, none on entry and grabs host lock. | |
995 | */ | |
996 | static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq) | |
997 | { | |
998 | struct ata_port *ap = qc->ap; | |
999 | unsigned long flags; | |
1000 | ||
1001 | if (ap->ops->error_handler) { | |
1002 | if (in_wq) { | |
1003 | spin_lock_irqsave(ap->lock, flags); | |
1004 | ||
1005 | /* EH might have kicked in while host lock is | |
1006 | * released. | |
1007 | */ | |
1008 | qc = ata_qc_from_tag(ap, qc->tag); | |
1009 | if (qc) { | |
1010 | if (likely(!(qc->err_mask & AC_ERR_HSM))) { | |
e42a542b | 1011 | ata_sff_irq_on(ap); |
624d5c51 TH |
1012 | ata_qc_complete(qc); |
1013 | } else | |
1014 | ata_port_freeze(ap); | |
1015 | } | |
1016 | ||
1017 | spin_unlock_irqrestore(ap->lock, flags); | |
1018 | } else { | |
1019 | if (likely(!(qc->err_mask & AC_ERR_HSM))) | |
1020 | ata_qc_complete(qc); | |
1021 | else | |
1022 | ata_port_freeze(ap); | |
1023 | } | |
1024 | } else { | |
1025 | if (in_wq) { | |
1026 | spin_lock_irqsave(ap->lock, flags); | |
e42a542b | 1027 | ata_sff_irq_on(ap); |
624d5c51 TH |
1028 | ata_qc_complete(qc); |
1029 | spin_unlock_irqrestore(ap->lock, flags); | |
1030 | } else | |
1031 | ata_qc_complete(qc); | |
1032 | } | |
1033 | } | |
1034 | ||
1035 | /** | |
9363c382 | 1036 | * ata_sff_hsm_move - move the HSM to the next state. |
624d5c51 TH |
1037 | * @ap: the target ata_port |
1038 | * @qc: qc on going | |
1039 | * @status: current device status | |
1040 | * @in_wq: 1 if called from workqueue, 0 otherwise | |
1041 | * | |
1042 | * RETURNS: | |
1043 | * 1 when poll next status needed, 0 otherwise. | |
1044 | */ | |
9363c382 TH |
1045 | int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, |
1046 | u8 status, int in_wq) | |
624d5c51 | 1047 | { |
ea3c6450 GG |
1048 | struct ata_link *link = qc->dev->link; |
1049 | struct ata_eh_info *ehi = &link->eh_info; | |
624d5c51 TH |
1050 | unsigned long flags = 0; |
1051 | int poll_next; | |
1052 | ||
efcb3cf7 | 1053 | WARN_ON_ONCE((qc->flags & ATA_QCFLAG_ACTIVE) == 0); |
624d5c51 | 1054 | |
9363c382 | 1055 | /* Make sure ata_sff_qc_issue() does not throw things |
624d5c51 TH |
1056 | * like DMA polling into the workqueue. Notice that |
1057 | * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING). | |
1058 | */ | |
efcb3cf7 | 1059 | WARN_ON_ONCE(in_wq != ata_hsm_ok_in_wq(ap, qc)); |
624d5c51 TH |
1060 | |
1061 | fsm_start: | |
1062 | DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n", | |
1063 | ap->print_id, qc->tf.protocol, ap->hsm_task_state, status); | |
1064 | ||
1065 | switch (ap->hsm_task_state) { | |
1066 | case HSM_ST_FIRST: | |
1067 | /* Send first data block or PACKET CDB */ | |
1068 | ||
1069 | /* If polling, we will stay in the work queue after | |
1070 | * sending the data. Otherwise, interrupt handler | |
1071 | * takes over after sending the data. | |
1072 | */ | |
1073 | poll_next = (qc->tf.flags & ATA_TFLAG_POLLING); | |
1074 | ||
1075 | /* check device status */ | |
1076 | if (unlikely((status & ATA_DRQ) == 0)) { | |
1077 | /* handle BSY=0, DRQ=0 as error */ | |
1078 | if (likely(status & (ATA_ERR | ATA_DF))) | |
1079 | /* device stops HSM for abort/error */ | |
1080 | qc->err_mask |= AC_ERR_DEV; | |
a836d3e8 | 1081 | else { |
624d5c51 | 1082 | /* HSM violation. Let EH handle this */ |
a836d3e8 TH |
1083 | ata_ehi_push_desc(ehi, |
1084 | "ST_FIRST: !(DRQ|ERR|DF)"); | |
624d5c51 | 1085 | qc->err_mask |= AC_ERR_HSM; |
a836d3e8 | 1086 | } |
624d5c51 TH |
1087 | |
1088 | ap->hsm_task_state = HSM_ST_ERR; | |
1089 | goto fsm_start; | |
1090 | } | |
1091 | ||
1092 | /* Device should not ask for data transfer (DRQ=1) | |
1093 | * when it finds something wrong. | |
1094 | * We ignore DRQ here and stop the HSM by | |
1095 | * changing hsm_task_state to HSM_ST_ERR and | |
1096 | * let the EH abort the command or reset the device. | |
1097 | */ | |
1098 | if (unlikely(status & (ATA_ERR | ATA_DF))) { | |
1099 | /* Some ATAPI tape drives forget to clear the ERR bit | |
1100 | * when doing the next command (mostly request sense). | |
1101 | * We ignore ERR here to workaround and proceed sending | |
1102 | * the CDB. | |
1103 | */ | |
1104 | if (!(qc->dev->horkage & ATA_HORKAGE_STUCK_ERR)) { | |
a836d3e8 TH |
1105 | ata_ehi_push_desc(ehi, "ST_FIRST: " |
1106 | "DRQ=1 with device error, " | |
1107 | "dev_stat 0x%X", status); | |
624d5c51 TH |
1108 | qc->err_mask |= AC_ERR_HSM; |
1109 | ap->hsm_task_state = HSM_ST_ERR; | |
1110 | goto fsm_start; | |
1111 | } | |
1112 | } | |
1113 | ||
1114 | /* Send the CDB (atapi) or the first data block (ata pio out). | |
1115 | * During the state transition, interrupt handler shouldn't | |
1116 | * be invoked before the data transfer is complete and | |
1117 | * hsm_task_state is changed. Hence, the following locking. | |
1118 | */ | |
1119 | if (in_wq) | |
1120 | spin_lock_irqsave(ap->lock, flags); | |
1121 | ||
1122 | if (qc->tf.protocol == ATA_PROT_PIO) { | |
1123 | /* PIO data out protocol. | |
1124 | * send first data block. | |
1125 | */ | |
1126 | ||
1127 | /* ata_pio_sectors() might change the state | |
1128 | * to HSM_ST_LAST. so, the state is changed here | |
1129 | * before ata_pio_sectors(). | |
1130 | */ | |
1131 | ap->hsm_task_state = HSM_ST; | |
1132 | ata_pio_sectors(qc); | |
1133 | } else | |
1134 | /* send CDB */ | |
1135 | atapi_send_cdb(ap, qc); | |
1136 | ||
1137 | if (in_wq) | |
1138 | spin_unlock_irqrestore(ap->lock, flags); | |
1139 | ||
c429137a | 1140 | /* if polling, ata_sff_pio_task() handles the rest. |
624d5c51 TH |
1141 | * otherwise, interrupt handler takes over from here. |
1142 | */ | |
1143 | break; | |
1144 | ||
1145 | case HSM_ST: | |
1146 | /* complete command or read/write the data register */ | |
1147 | if (qc->tf.protocol == ATAPI_PROT_PIO) { | |
1148 | /* ATAPI PIO protocol */ | |
1149 | if ((status & ATA_DRQ) == 0) { | |
1150 | /* No more data to transfer or device error. | |
1151 | * Device error will be tagged in HSM_ST_LAST. | |
1152 | */ | |
1153 | ap->hsm_task_state = HSM_ST_LAST; | |
1154 | goto fsm_start; | |
1155 | } | |
1156 | ||
1157 | /* Device should not ask for data transfer (DRQ=1) | |
1158 | * when it finds something wrong. | |
1159 | * We ignore DRQ here and stop the HSM by | |
1160 | * changing hsm_task_state to HSM_ST_ERR and | |
1161 | * let the EH abort the command or reset the device. | |
1162 | */ | |
1163 | if (unlikely(status & (ATA_ERR | ATA_DF))) { | |
a836d3e8 TH |
1164 | ata_ehi_push_desc(ehi, "ST-ATAPI: " |
1165 | "DRQ=1 with device error, " | |
1166 | "dev_stat 0x%X", status); | |
624d5c51 TH |
1167 | qc->err_mask |= AC_ERR_HSM; |
1168 | ap->hsm_task_state = HSM_ST_ERR; | |
1169 | goto fsm_start; | |
1170 | } | |
1171 | ||
1172 | atapi_pio_bytes(qc); | |
1173 | ||
1174 | if (unlikely(ap->hsm_task_state == HSM_ST_ERR)) | |
1175 | /* bad ireason reported by device */ | |
1176 | goto fsm_start; | |
1177 | ||
1178 | } else { | |
1179 | /* ATA PIO protocol */ | |
1180 | if (unlikely((status & ATA_DRQ) == 0)) { | |
1181 | /* handle BSY=0, DRQ=0 as error */ | |
6a6b97d3 | 1182 | if (likely(status & (ATA_ERR | ATA_DF))) { |
624d5c51 TH |
1183 | /* device stops HSM for abort/error */ |
1184 | qc->err_mask |= AC_ERR_DEV; | |
6a6b97d3 TH |
1185 | |
1186 | /* If diagnostic failed and this is | |
1187 | * IDENTIFY, it's likely a phantom | |
1188 | * device. Mark hint. | |
1189 | */ | |
1190 | if (qc->dev->horkage & | |
1191 | ATA_HORKAGE_DIAGNOSTIC) | |
1192 | qc->err_mask |= | |
1193 | AC_ERR_NODEV_HINT; | |
1194 | } else { | |
624d5c51 TH |
1195 | /* HSM violation. Let EH handle this. |
1196 | * Phantom devices also trigger this | |
1197 | * condition. Mark hint. | |
1198 | */ | |
a836d3e8 | 1199 | ata_ehi_push_desc(ehi, "ST-ATA: " |
80ee6f54 | 1200 | "DRQ=0 without device error, " |
a836d3e8 | 1201 | "dev_stat 0x%X", status); |
624d5c51 TH |
1202 | qc->err_mask |= AC_ERR_HSM | |
1203 | AC_ERR_NODEV_HINT; | |
a836d3e8 | 1204 | } |
624d5c51 TH |
1205 | |
1206 | ap->hsm_task_state = HSM_ST_ERR; | |
1207 | goto fsm_start; | |
1208 | } | |
1209 | ||
1210 | /* For PIO reads, some devices may ask for | |
1211 | * data transfer (DRQ=1) alone with ERR=1. | |
1212 | * We respect DRQ here and transfer one | |
1213 | * block of junk data before changing the | |
1214 | * hsm_task_state to HSM_ST_ERR. | |
1215 | * | |
1216 | * For PIO writes, ERR=1 DRQ=1 doesn't make | |
1217 | * sense since the data block has been | |
1218 | * transferred to the device. | |
1219 | */ | |
1220 | if (unlikely(status & (ATA_ERR | ATA_DF))) { | |
1221 | /* data might be corrputed */ | |
1222 | qc->err_mask |= AC_ERR_DEV; | |
1223 | ||
1224 | if (!(qc->tf.flags & ATA_TFLAG_WRITE)) { | |
1225 | ata_pio_sectors(qc); | |
1226 | status = ata_wait_idle(ap); | |
1227 | } | |
1228 | ||
a836d3e8 TH |
1229 | if (status & (ATA_BUSY | ATA_DRQ)) { |
1230 | ata_ehi_push_desc(ehi, "ST-ATA: " | |
1231 | "BUSY|DRQ persists on ERR|DF, " | |
1232 | "dev_stat 0x%X", status); | |
624d5c51 | 1233 | qc->err_mask |= AC_ERR_HSM; |
a836d3e8 | 1234 | } |
624d5c51 | 1235 | |
b919930c TH |
1236 | /* There are oddball controllers with |
1237 | * status register stuck at 0x7f and | |
1238 | * lbal/m/h at zero which makes it | |
1239 | * pass all other presence detection | |
1240 | * mechanisms we have. Set NODEV_HINT | |
1241 | * for it. Kernel bz#7241. | |
1242 | */ | |
1243 | if (status == 0x7f) | |
1244 | qc->err_mask |= AC_ERR_NODEV_HINT; | |
1245 | ||
624d5c51 TH |
1246 | /* ata_pio_sectors() might change the |
1247 | * state to HSM_ST_LAST. so, the state | |
1248 | * is changed after ata_pio_sectors(). | |
1249 | */ | |
1250 | ap->hsm_task_state = HSM_ST_ERR; | |
1251 | goto fsm_start; | |
1252 | } | |
1253 | ||
1254 | ata_pio_sectors(qc); | |
1255 | ||
1256 | if (ap->hsm_task_state == HSM_ST_LAST && | |
1257 | (!(qc->tf.flags & ATA_TFLAG_WRITE))) { | |
1258 | /* all data read */ | |
1259 | status = ata_wait_idle(ap); | |
1260 | goto fsm_start; | |
1261 | } | |
1262 | } | |
1263 | ||
1264 | poll_next = 1; | |
1265 | break; | |
1266 | ||
1267 | case HSM_ST_LAST: | |
1268 | if (unlikely(!ata_ok(status))) { | |
1269 | qc->err_mask |= __ac_err_mask(status); | |
1270 | ap->hsm_task_state = HSM_ST_ERR; | |
1271 | goto fsm_start; | |
1272 | } | |
1273 | ||
1274 | /* no more data to transfer */ | |
1275 | DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n", | |
1276 | ap->print_id, qc->dev->devno, status); | |
1277 | ||
efcb3cf7 | 1278 | WARN_ON_ONCE(qc->err_mask & (AC_ERR_DEV | AC_ERR_HSM)); |
624d5c51 TH |
1279 | |
1280 | ap->hsm_task_state = HSM_ST_IDLE; | |
1281 | ||
1282 | /* complete taskfile transaction */ | |
1283 | ata_hsm_qc_complete(qc, in_wq); | |
1284 | ||
1285 | poll_next = 0; | |
1286 | break; | |
1287 | ||
1288 | case HSM_ST_ERR: | |
624d5c51 TH |
1289 | ap->hsm_task_state = HSM_ST_IDLE; |
1290 | ||
1291 | /* complete taskfile transaction */ | |
1292 | ata_hsm_qc_complete(qc, in_wq); | |
1293 | ||
1294 | poll_next = 0; | |
1295 | break; | |
1296 | default: | |
1297 | poll_next = 0; | |
1298 | BUG(); | |
1299 | } | |
1300 | ||
1301 | return poll_next; | |
1302 | } | |
0fe40ff8 | 1303 | EXPORT_SYMBOL_GPL(ata_sff_hsm_move); |
624d5c51 | 1304 | |
ea3c6450 | 1305 | void ata_sff_queue_pio_task(struct ata_link *link, unsigned long delay) |
c429137a | 1306 | { |
ea3c6450 GG |
1307 | struct ata_port *ap = link->ap; |
1308 | ||
1309 | WARN_ON((ap->sff_pio_task_link != NULL) && | |
1310 | (ap->sff_pio_task_link != link)); | |
1311 | ap->sff_pio_task_link = link; | |
1312 | ||
c429137a TH |
1313 | /* may fail if ata_sff_flush_pio_task() in progress */ |
1314 | queue_delayed_work(ata_sff_wq, &ap->sff_pio_task, | |
1315 | msecs_to_jiffies(delay)); | |
1316 | } | |
1317 | EXPORT_SYMBOL_GPL(ata_sff_queue_pio_task); | |
1318 | ||
1319 | void ata_sff_flush_pio_task(struct ata_port *ap) | |
1320 | { | |
1321 | DPRINTK("ENTER\n"); | |
1322 | ||
afe2c511 | 1323 | cancel_delayed_work_sync(&ap->sff_pio_task); |
c429137a TH |
1324 | ap->hsm_task_state = HSM_ST_IDLE; |
1325 | ||
1326 | if (ata_msg_ctl(ap)) | |
1327 | ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __func__); | |
1328 | } | |
1329 | ||
1330 | static void ata_sff_pio_task(struct work_struct *work) | |
624d5c51 TH |
1331 | { |
1332 | struct ata_port *ap = | |
c429137a | 1333 | container_of(work, struct ata_port, sff_pio_task.work); |
ea3c6450 | 1334 | struct ata_link *link = ap->sff_pio_task_link; |
c429137a | 1335 | struct ata_queued_cmd *qc; |
624d5c51 TH |
1336 | u8 status; |
1337 | int poll_next; | |
1338 | ||
ea3c6450 | 1339 | BUG_ON(ap->sff_pio_task_link == NULL); |
c429137a | 1340 | /* qc can be NULL if timeout occurred */ |
ea3c6450 GG |
1341 | qc = ata_qc_from_tag(ap, link->active_tag); |
1342 | if (!qc) { | |
1343 | ap->sff_pio_task_link = NULL; | |
c429137a | 1344 | return; |
ea3c6450 | 1345 | } |
c429137a | 1346 | |
624d5c51 | 1347 | fsm_start: |
efcb3cf7 | 1348 | WARN_ON_ONCE(ap->hsm_task_state == HSM_ST_IDLE); |
624d5c51 TH |
1349 | |
1350 | /* | |
1351 | * This is purely heuristic. This is a fast path. | |
1352 | * Sometimes when we enter, BSY will be cleared in | |
1353 | * a chk-status or two. If not, the drive is probably seeking | |
1354 | * or something. Snooze for a couple msecs, then | |
1355 | * chk-status again. If still busy, queue delayed work. | |
1356 | */ | |
9363c382 | 1357 | status = ata_sff_busy_wait(ap, ATA_BUSY, 5); |
624d5c51 | 1358 | if (status & ATA_BUSY) { |
97750ceb | 1359 | ata_msleep(ap, 2); |
9363c382 | 1360 | status = ata_sff_busy_wait(ap, ATA_BUSY, 10); |
624d5c51 | 1361 | if (status & ATA_BUSY) { |
ea3c6450 | 1362 | ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE); |
624d5c51 TH |
1363 | return; |
1364 | } | |
1365 | } | |
1366 | ||
ea3c6450 GG |
1367 | /* |
1368 | * hsm_move() may trigger another command to be processed. | |
1369 | * clean the link beforehand. | |
1370 | */ | |
1371 | ap->sff_pio_task_link = NULL; | |
624d5c51 | 1372 | /* move the HSM */ |
9363c382 | 1373 | poll_next = ata_sff_hsm_move(ap, qc, status, 1); |
624d5c51 TH |
1374 | |
1375 | /* another command or interrupt handler | |
1376 | * may be running at this point. | |
1377 | */ | |
1378 | if (poll_next) | |
1379 | goto fsm_start; | |
1380 | } | |
1381 | ||
1382 | /** | |
360ff783 | 1383 | * ata_sff_qc_issue - issue taskfile to a SFF controller |
624d5c51 TH |
1384 | * @qc: command to issue to device |
1385 | * | |
360ff783 TH |
1386 | * This function issues a PIO or NODATA command to a SFF |
1387 | * controller. | |
624d5c51 TH |
1388 | * |
1389 | * LOCKING: | |
1390 | * spin_lock_irqsave(host lock) | |
1391 | * | |
1392 | * RETURNS: | |
1393 | * Zero on success, AC_ERR_* mask on failure | |
1394 | */ | |
9363c382 | 1395 | unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc) |
624d5c51 TH |
1396 | { |
1397 | struct ata_port *ap = qc->ap; | |
ea3c6450 | 1398 | struct ata_link *link = qc->dev->link; |
624d5c51 TH |
1399 | |
1400 | /* Use polling pio if the LLD doesn't handle | |
1401 | * interrupt driven pio and atapi CDB interrupt. | |
1402 | */ | |
360ff783 TH |
1403 | if (ap->flags & ATA_FLAG_PIO_POLLING) |
1404 | qc->tf.flags |= ATA_TFLAG_POLLING; | |
624d5c51 TH |
1405 | |
1406 | /* select the device */ | |
1407 | ata_dev_select(ap, qc->dev->devno, 1, 0); | |
1408 | ||
1409 | /* start the command */ | |
1410 | switch (qc->tf.protocol) { | |
1411 | case ATA_PROT_NODATA: | |
1412 | if (qc->tf.flags & ATA_TFLAG_POLLING) | |
1413 | ata_qc_set_polling(qc); | |
1414 | ||
1415 | ata_tf_to_host(ap, &qc->tf); | |
1416 | ap->hsm_task_state = HSM_ST_LAST; | |
1417 | ||
1418 | if (qc->tf.flags & ATA_TFLAG_POLLING) | |
ea3c6450 | 1419 | ata_sff_queue_pio_task(link, 0); |
624d5c51 TH |
1420 | |
1421 | break; | |
1422 | ||
624d5c51 TH |
1423 | case ATA_PROT_PIO: |
1424 | if (qc->tf.flags & ATA_TFLAG_POLLING) | |
1425 | ata_qc_set_polling(qc); | |
1426 | ||
1427 | ata_tf_to_host(ap, &qc->tf); | |
1428 | ||
1429 | if (qc->tf.flags & ATA_TFLAG_WRITE) { | |
1430 | /* PIO data out protocol */ | |
1431 | ap->hsm_task_state = HSM_ST_FIRST; | |
ea3c6450 | 1432 | ata_sff_queue_pio_task(link, 0); |
624d5c51 | 1433 | |
c429137a TH |
1434 | /* always send first data block using the |
1435 | * ata_sff_pio_task() codepath. | |
624d5c51 TH |
1436 | */ |
1437 | } else { | |
1438 | /* PIO data in protocol */ | |
1439 | ap->hsm_task_state = HSM_ST; | |
1440 | ||
1441 | if (qc->tf.flags & ATA_TFLAG_POLLING) | |
ea3c6450 | 1442 | ata_sff_queue_pio_task(link, 0); |
624d5c51 | 1443 | |
c429137a TH |
1444 | /* if polling, ata_sff_pio_task() handles the |
1445 | * rest. otherwise, interrupt handler takes | |
1446 | * over from here. | |
624d5c51 TH |
1447 | */ |
1448 | } | |
1449 | ||
1450 | break; | |
1451 | ||
1452 | case ATAPI_PROT_PIO: | |
1453 | case ATAPI_PROT_NODATA: | |
1454 | if (qc->tf.flags & ATA_TFLAG_POLLING) | |
1455 | ata_qc_set_polling(qc); | |
1456 | ||
1457 | ata_tf_to_host(ap, &qc->tf); | |
1458 | ||
1459 | ap->hsm_task_state = HSM_ST_FIRST; | |
1460 | ||
1461 | /* send cdb by polling if no cdb interrupt */ | |
1462 | if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) || | |
1463 | (qc->tf.flags & ATA_TFLAG_POLLING)) | |
ea3c6450 | 1464 | ata_sff_queue_pio_task(link, 0); |
624d5c51 TH |
1465 | break; |
1466 | ||
624d5c51 | 1467 | default: |
efcb3cf7 | 1468 | WARN_ON_ONCE(1); |
624d5c51 TH |
1469 | return AC_ERR_SYSTEM; |
1470 | } | |
1471 | ||
1472 | return 0; | |
1473 | } | |
0fe40ff8 | 1474 | EXPORT_SYMBOL_GPL(ata_sff_qc_issue); |
624d5c51 | 1475 | |
22183bf5 TH |
1476 | /** |
1477 | * ata_sff_qc_fill_rtf - fill result TF using ->sff_tf_read | |
1478 | * @qc: qc to fill result TF for | |
1479 | * | |
1480 | * @qc is finished and result TF needs to be filled. Fill it | |
1481 | * using ->sff_tf_read. | |
1482 | * | |
1483 | * LOCKING: | |
1484 | * spin_lock_irqsave(host lock) | |
1485 | * | |
1486 | * RETURNS: | |
1487 | * true indicating that result TF is successfully filled. | |
1488 | */ | |
1489 | bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc) | |
1490 | { | |
1491 | qc->ap->ops->sff_tf_read(qc->ap, &qc->result_tf); | |
1492 | return true; | |
1493 | } | |
0fe40ff8 | 1494 | EXPORT_SYMBOL_GPL(ata_sff_qc_fill_rtf); |
22183bf5 | 1495 | |
c3b28894 | 1496 | static unsigned int ata_sff_idle_irq(struct ata_port *ap) |
624d5c51 | 1497 | { |
c3b28894 TH |
1498 | ap->stats.idle_irq++; |
1499 | ||
1500 | #ifdef ATA_IRQ_TRAP | |
1501 | if ((ap->stats.idle_irq % 1000) == 0) { | |
1502 | ap->ops->sff_check_status(ap); | |
1503 | if (ap->ops->sff_irq_clear) | |
1504 | ap->ops->sff_irq_clear(ap); | |
1505 | ata_port_printk(ap, KERN_WARNING, "irq trap\n"); | |
1506 | return 1; | |
1507 | } | |
1508 | #endif | |
1509 | return 0; /* irq not handled */ | |
1510 | } | |
1511 | ||
1512 | static unsigned int __ata_sff_port_intr(struct ata_port *ap, | |
1513 | struct ata_queued_cmd *qc, | |
1514 | bool hsmv_on_idle) | |
1515 | { | |
1516 | u8 status; | |
624d5c51 TH |
1517 | |
1518 | VPRINTK("ata%u: protocol %d task_state %d\n", | |
1519 | ap->print_id, qc->tf.protocol, ap->hsm_task_state); | |
1520 | ||
1521 | /* Check whether we are expecting interrupt in this state */ | |
1522 | switch (ap->hsm_task_state) { | |
1523 | case HSM_ST_FIRST: | |
1524 | /* Some pre-ATAPI-4 devices assert INTRQ | |
1525 | * at this state when ready to receive CDB. | |
1526 | */ | |
1527 | ||
1528 | /* Check the ATA_DFLAG_CDB_INTR flag is enough here. | |
1529 | * The flag was turned on only for atapi devices. No | |
1530 | * need to check ata_is_atapi(qc->tf.protocol) again. | |
1531 | */ | |
1532 | if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) | |
c3b28894 | 1533 | return ata_sff_idle_irq(ap); |
624d5c51 | 1534 | break; |
687a9933 | 1535 | case HSM_ST_IDLE: |
c3b28894 | 1536 | return ata_sff_idle_irq(ap); |
687a9933 TH |
1537 | default: |
1538 | break; | |
624d5c51 TH |
1539 | } |
1540 | ||
a57c1bad AC |
1541 | /* check main status, clearing INTRQ if needed */ |
1542 | status = ata_sff_irq_status(ap); | |
332ac7ff | 1543 | if (status & ATA_BUSY) { |
c3b28894 | 1544 | if (hsmv_on_idle) { |
332ac7ff TH |
1545 | /* BMDMA engine is already stopped, we're screwed */ |
1546 | qc->err_mask |= AC_ERR_HSM; | |
1547 | ap->hsm_task_state = HSM_ST_ERR; | |
1548 | } else | |
c3b28894 | 1549 | return ata_sff_idle_irq(ap); |
332ac7ff | 1550 | } |
624d5c51 | 1551 | |
9f2f7210 | 1552 | /* clear irq events */ |
37f65b8b TH |
1553 | if (ap->ops->sff_irq_clear) |
1554 | ap->ops->sff_irq_clear(ap); | |
624d5c51 | 1555 | |
9363c382 | 1556 | ata_sff_hsm_move(ap, qc, status, 0); |
624d5c51 | 1557 | |
624d5c51 | 1558 | return 1; /* irq handled */ |
624d5c51 TH |
1559 | } |
1560 | ||
1561 | /** | |
c3b28894 TH |
1562 | * ata_sff_port_intr - Handle SFF port interrupt |
1563 | * @ap: Port on which interrupt arrived (possibly...) | |
1564 | * @qc: Taskfile currently active in engine | |
624d5c51 | 1565 | * |
c3b28894 | 1566 | * Handle port interrupt for given queued command. |
624d5c51 TH |
1567 | * |
1568 | * LOCKING: | |
c3b28894 | 1569 | * spin_lock_irqsave(host lock) |
624d5c51 TH |
1570 | * |
1571 | * RETURNS: | |
c3b28894 | 1572 | * One if interrupt was handled, zero if not (shared irq). |
624d5c51 | 1573 | */ |
c3b28894 TH |
1574 | unsigned int ata_sff_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc) |
1575 | { | |
1576 | return __ata_sff_port_intr(ap, qc, false); | |
1577 | } | |
1578 | EXPORT_SYMBOL_GPL(ata_sff_port_intr); | |
1579 | ||
1580 | static inline irqreturn_t __ata_sff_interrupt(int irq, void *dev_instance, | |
1581 | unsigned int (*port_intr)(struct ata_port *, struct ata_queued_cmd *)) | |
624d5c51 TH |
1582 | { |
1583 | struct ata_host *host = dev_instance; | |
332ac7ff | 1584 | bool retried = false; |
624d5c51 | 1585 | unsigned int i; |
332ac7ff | 1586 | unsigned int handled, idle, polling; |
624d5c51 TH |
1587 | unsigned long flags; |
1588 | ||
1589 | /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */ | |
1590 | spin_lock_irqsave(&host->lock, flags); | |
1591 | ||
332ac7ff TH |
1592 | retry: |
1593 | handled = idle = polling = 0; | |
624d5c51 | 1594 | for (i = 0; i < host->n_ports; i++) { |
d88ec2e5 TH |
1595 | struct ata_port *ap = host->ports[i]; |
1596 | struct ata_queued_cmd *qc; | |
624d5c51 | 1597 | |
d88ec2e5 | 1598 | qc = ata_qc_from_tag(ap, ap->link.active_tag); |
27943620 TH |
1599 | if (qc) { |
1600 | if (!(qc->tf.flags & ATA_TFLAG_POLLING)) | |
c3b28894 | 1601 | handled |= port_intr(ap, qc); |
27943620 TH |
1602 | else |
1603 | polling |= 1 << i; | |
332ac7ff TH |
1604 | } else |
1605 | idle |= 1 << i; | |
27943620 TH |
1606 | } |
1607 | ||
1608 | /* | |
1609 | * If no port was expecting IRQ but the controller is actually | |
1610 | * asserting IRQ line, nobody cared will ensue. Check IRQ | |
1611 | * pending status if available and clear spurious IRQ. | |
1612 | */ | |
332ac7ff TH |
1613 | if (!handled && !retried) { |
1614 | bool retry = false; | |
1615 | ||
27943620 TH |
1616 | for (i = 0; i < host->n_ports; i++) { |
1617 | struct ata_port *ap = host->ports[i]; | |
1618 | ||
1619 | if (polling & (1 << i)) | |
1620 | continue; | |
1621 | ||
1622 | if (!ap->ops->sff_irq_check || | |
1623 | !ap->ops->sff_irq_check(ap)) | |
1624 | continue; | |
1625 | ||
332ac7ff TH |
1626 | if (idle & (1 << i)) { |
1627 | ap->ops->sff_check_status(ap); | |
37f65b8b TH |
1628 | if (ap->ops->sff_irq_clear) |
1629 | ap->ops->sff_irq_clear(ap); | |
332ac7ff TH |
1630 | } else { |
1631 | /* clear INTRQ and check if BUSY cleared */ | |
1632 | if (!(ap->ops->sff_check_status(ap) & ATA_BUSY)) | |
1633 | retry |= true; | |
1634 | /* | |
1635 | * With command in flight, we can't do | |
1636 | * sff_irq_clear() w/o racing with completion. | |
1637 | */ | |
1638 | } | |
1639 | } | |
1640 | ||
1641 | if (retry) { | |
1642 | retried = true; | |
1643 | goto retry; | |
27943620 | 1644 | } |
624d5c51 TH |
1645 | } |
1646 | ||
1647 | spin_unlock_irqrestore(&host->lock, flags); | |
1648 | ||
1649 | return IRQ_RETVAL(handled); | |
1650 | } | |
c3b28894 TH |
1651 | |
1652 | /** | |
1653 | * ata_sff_interrupt - Default SFF ATA host interrupt handler | |
1654 | * @irq: irq line (unused) | |
1655 | * @dev_instance: pointer to our ata_host information structure | |
1656 | * | |
1657 | * Default interrupt handler for PCI IDE devices. Calls | |
1658 | * ata_sff_port_intr() for each port that is not disabled. | |
1659 | * | |
1660 | * LOCKING: | |
1661 | * Obtains host lock during operation. | |
1662 | * | |
1663 | * RETURNS: | |
1664 | * IRQ_NONE or IRQ_HANDLED. | |
1665 | */ | |
1666 | irqreturn_t ata_sff_interrupt(int irq, void *dev_instance) | |
1667 | { | |
1668 | return __ata_sff_interrupt(irq, dev_instance, ata_sff_port_intr); | |
1669 | } | |
0fe40ff8 | 1670 | EXPORT_SYMBOL_GPL(ata_sff_interrupt); |
624d5c51 | 1671 | |
c96f1732 AC |
1672 | /** |
1673 | * ata_sff_lost_interrupt - Check for an apparent lost interrupt | |
1674 | * @ap: port that appears to have timed out | |
1675 | * | |
1676 | * Called from the libata error handlers when the core code suspects | |
1677 | * an interrupt has been lost. If it has complete anything we can and | |
1678 | * then return. Interface must support altstatus for this faster | |
1679 | * recovery to occur. | |
1680 | * | |
1681 | * Locking: | |
1682 | * Caller holds host lock | |
1683 | */ | |
1684 | ||
1685 | void ata_sff_lost_interrupt(struct ata_port *ap) | |
1686 | { | |
1687 | u8 status; | |
1688 | struct ata_queued_cmd *qc; | |
1689 | ||
1690 | /* Only one outstanding command per SFF channel */ | |
1691 | qc = ata_qc_from_tag(ap, ap->link.active_tag); | |
3e4ec344 TH |
1692 | /* We cannot lose an interrupt on a non-existent or polled command */ |
1693 | if (!qc || qc->tf.flags & ATA_TFLAG_POLLING) | |
c96f1732 AC |
1694 | return; |
1695 | /* See if the controller thinks it is still busy - if so the command | |
1696 | isn't a lost IRQ but is still in progress */ | |
1697 | status = ata_sff_altstatus(ap); | |
1698 | if (status & ATA_BUSY) | |
1699 | return; | |
1700 | ||
1701 | /* There was a command running, we are no longer busy and we have | |
1702 | no interrupt. */ | |
1703 | ata_port_printk(ap, KERN_WARNING, "lost interrupt (Status 0x%x)\n", | |
1704 | status); | |
1705 | /* Run the host interrupt logic as if the interrupt had not been | |
1706 | lost */ | |
c3b28894 | 1707 | ata_sff_port_intr(ap, qc); |
c96f1732 AC |
1708 | } |
1709 | EXPORT_SYMBOL_GPL(ata_sff_lost_interrupt); | |
1710 | ||
624d5c51 | 1711 | /** |
9363c382 | 1712 | * ata_sff_freeze - Freeze SFF controller port |
624d5c51 TH |
1713 | * @ap: port to freeze |
1714 | * | |
9f2f7210 | 1715 | * Freeze SFF controller port. |
624d5c51 TH |
1716 | * |
1717 | * LOCKING: | |
1718 | * Inherited from caller. | |
1719 | */ | |
9363c382 | 1720 | void ata_sff_freeze(struct ata_port *ap) |
624d5c51 | 1721 | { |
624d5c51 TH |
1722 | ap->ctl |= ATA_NIEN; |
1723 | ap->last_ctl = ap->ctl; | |
1724 | ||
41dec29b SS |
1725 | if (ap->ops->sff_set_devctl || ap->ioaddr.ctl_addr) |
1726 | ata_sff_set_devctl(ap, ap->ctl); | |
624d5c51 TH |
1727 | |
1728 | /* Under certain circumstances, some controllers raise IRQ on | |
1729 | * ATA_NIEN manipulation. Also, many controllers fail to mask | |
1730 | * previously pending IRQ on ATA_NIEN assertion. Clear it. | |
1731 | */ | |
5682ed33 | 1732 | ap->ops->sff_check_status(ap); |
624d5c51 | 1733 | |
37f65b8b TH |
1734 | if (ap->ops->sff_irq_clear) |
1735 | ap->ops->sff_irq_clear(ap); | |
624d5c51 | 1736 | } |
0fe40ff8 | 1737 | EXPORT_SYMBOL_GPL(ata_sff_freeze); |
624d5c51 TH |
1738 | |
1739 | /** | |
9363c382 | 1740 | * ata_sff_thaw - Thaw SFF controller port |
624d5c51 TH |
1741 | * @ap: port to thaw |
1742 | * | |
9363c382 | 1743 | * Thaw SFF controller port. |
624d5c51 TH |
1744 | * |
1745 | * LOCKING: | |
1746 | * Inherited from caller. | |
1747 | */ | |
9363c382 | 1748 | void ata_sff_thaw(struct ata_port *ap) |
272f7884 | 1749 | { |
624d5c51 | 1750 | /* clear & re-enable interrupts */ |
5682ed33 | 1751 | ap->ops->sff_check_status(ap); |
37f65b8b TH |
1752 | if (ap->ops->sff_irq_clear) |
1753 | ap->ops->sff_irq_clear(ap); | |
e42a542b | 1754 | ata_sff_irq_on(ap); |
272f7884 | 1755 | } |
0fe40ff8 | 1756 | EXPORT_SYMBOL_GPL(ata_sff_thaw); |
272f7884 | 1757 | |
0aa1113d TH |
1758 | /** |
1759 | * ata_sff_prereset - prepare SFF link for reset | |
1760 | * @link: SFF link to be reset | |
1761 | * @deadline: deadline jiffies for the operation | |
1762 | * | |
1763 | * SFF link @link is about to be reset. Initialize it. It first | |
1764 | * calls ata_std_prereset() and wait for !BSY if the port is | |
1765 | * being softreset. | |
1766 | * | |
1767 | * LOCKING: | |
1768 | * Kernel thread context (may sleep) | |
1769 | * | |
1770 | * RETURNS: | |
1771 | * 0 on success, -errno otherwise. | |
1772 | */ | |
1773 | int ata_sff_prereset(struct ata_link *link, unsigned long deadline) | |
1774 | { | |
0aa1113d TH |
1775 | struct ata_eh_context *ehc = &link->eh_context; |
1776 | int rc; | |
1777 | ||
1778 | rc = ata_std_prereset(link, deadline); | |
1779 | if (rc) | |
1780 | return rc; | |
1781 | ||
1782 | /* if we're about to do hardreset, nothing more to do */ | |
1783 | if (ehc->i.action & ATA_EH_HARDRESET) | |
1784 | return 0; | |
1785 | ||
1786 | /* wait for !BSY if we don't know that no device is attached */ | |
1787 | if (!ata_link_offline(link)) { | |
705e76be | 1788 | rc = ata_sff_wait_ready(link, deadline); |
0aa1113d TH |
1789 | if (rc && rc != -ENODEV) { |
1790 | ata_link_printk(link, KERN_WARNING, "device not ready " | |
1791 | "(errno=%d), forcing hardreset\n", rc); | |
1792 | ehc->i.action |= ATA_EH_HARDRESET; | |
1793 | } | |
1794 | } | |
1795 | ||
1796 | return 0; | |
1797 | } | |
0fe40ff8 | 1798 | EXPORT_SYMBOL_GPL(ata_sff_prereset); |
0aa1113d | 1799 | |
90088bb4 | 1800 | /** |
624d5c51 TH |
1801 | * ata_devchk - PATA device presence detection |
1802 | * @ap: ATA channel to examine | |
1803 | * @device: Device to examine (starting at zero) | |
90088bb4 | 1804 | * |
624d5c51 TH |
1805 | * This technique was originally described in |
1806 | * Hale Landis's ATADRVR (www.ata-atapi.com), and | |
1807 | * later found its way into the ATA/ATAPI spec. | |
1808 | * | |
1809 | * Write a pattern to the ATA shadow registers, | |
1810 | * and if a device is present, it will respond by | |
1811 | * correctly storing and echoing back the | |
1812 | * ATA shadow register contents. | |
90088bb4 TH |
1813 | * |
1814 | * LOCKING: | |
624d5c51 | 1815 | * caller. |
90088bb4 | 1816 | */ |
624d5c51 | 1817 | static unsigned int ata_devchk(struct ata_port *ap, unsigned int device) |
90088bb4 TH |
1818 | { |
1819 | struct ata_ioports *ioaddr = &ap->ioaddr; | |
624d5c51 | 1820 | u8 nsect, lbal; |
90088bb4 | 1821 | |
5682ed33 | 1822 | ap->ops->sff_dev_select(ap, device); |
90088bb4 | 1823 | |
624d5c51 TH |
1824 | iowrite8(0x55, ioaddr->nsect_addr); |
1825 | iowrite8(0xaa, ioaddr->lbal_addr); | |
90088bb4 | 1826 | |
624d5c51 TH |
1827 | iowrite8(0xaa, ioaddr->nsect_addr); |
1828 | iowrite8(0x55, ioaddr->lbal_addr); | |
90088bb4 | 1829 | |
624d5c51 TH |
1830 | iowrite8(0x55, ioaddr->nsect_addr); |
1831 | iowrite8(0xaa, ioaddr->lbal_addr); | |
1832 | ||
1833 | nsect = ioread8(ioaddr->nsect_addr); | |
1834 | lbal = ioread8(ioaddr->lbal_addr); | |
1835 | ||
1836 | if ((nsect == 0x55) && (lbal == 0xaa)) | |
1837 | return 1; /* we found a device */ | |
1838 | ||
1839 | return 0; /* nothing found */ | |
90088bb4 TH |
1840 | } |
1841 | ||
272f7884 | 1842 | /** |
9363c382 | 1843 | * ata_sff_dev_classify - Parse returned ATA device signature |
624d5c51 TH |
1844 | * @dev: ATA device to classify (starting at zero) |
1845 | * @present: device seems present | |
1846 | * @r_err: Value of error register on completion | |
272f7884 | 1847 | * |
624d5c51 TH |
1848 | * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs, |
1849 | * an ATA/ATAPI-defined set of values is placed in the ATA | |
1850 | * shadow registers, indicating the results of device detection | |
1851 | * and diagnostics. | |
272f7884 | 1852 | * |
624d5c51 TH |
1853 | * Select the ATA device, and read the values from the ATA shadow |
1854 | * registers. Then parse according to the Error register value, | |
1855 | * and the spec-defined values examined by ata_dev_classify(). | |
272f7884 TH |
1856 | * |
1857 | * LOCKING: | |
624d5c51 TH |
1858 | * caller. |
1859 | * | |
1860 | * RETURNS: | |
1861 | * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE. | |
272f7884 | 1862 | */ |
9363c382 | 1863 | unsigned int ata_sff_dev_classify(struct ata_device *dev, int present, |
624d5c51 | 1864 | u8 *r_err) |
272f7884 | 1865 | { |
624d5c51 TH |
1866 | struct ata_port *ap = dev->link->ap; |
1867 | struct ata_taskfile tf; | |
1868 | unsigned int class; | |
1869 | u8 err; | |
1870 | ||
5682ed33 | 1871 | ap->ops->sff_dev_select(ap, dev->devno); |
624d5c51 TH |
1872 | |
1873 | memset(&tf, 0, sizeof(tf)); | |
1874 | ||
5682ed33 | 1875 | ap->ops->sff_tf_read(ap, &tf); |
624d5c51 TH |
1876 | err = tf.feature; |
1877 | if (r_err) | |
1878 | *r_err = err; | |
1879 | ||
1880 | /* see if device passed diags: continue and warn later */ | |
1881 | if (err == 0) | |
1882 | /* diagnostic fail : do nothing _YET_ */ | |
1883 | dev->horkage |= ATA_HORKAGE_DIAGNOSTIC; | |
1884 | else if (err == 1) | |
1885 | /* do nothing */ ; | |
1886 | else if ((dev->devno == 0) && (err == 0x81)) | |
1887 | /* do nothing */ ; | |
1888 | else | |
1889 | return ATA_DEV_NONE; | |
272f7884 | 1890 | |
624d5c51 TH |
1891 | /* determine if device is ATA or ATAPI */ |
1892 | class = ata_dev_classify(&tf); | |
272f7884 | 1893 | |
624d5c51 TH |
1894 | if (class == ATA_DEV_UNKNOWN) { |
1895 | /* If the device failed diagnostic, it's likely to | |
1896 | * have reported incorrect device signature too. | |
1897 | * Assume ATA device if the device seems present but | |
1898 | * device signature is invalid with diagnostic | |
1899 | * failure. | |
1900 | */ | |
1901 | if (present && (dev->horkage & ATA_HORKAGE_DIAGNOSTIC)) | |
1902 | class = ATA_DEV_ATA; | |
1903 | else | |
1904 | class = ATA_DEV_NONE; | |
5682ed33 TH |
1905 | } else if ((class == ATA_DEV_ATA) && |
1906 | (ap->ops->sff_check_status(ap) == 0)) | |
624d5c51 TH |
1907 | class = ATA_DEV_NONE; |
1908 | ||
1909 | return class; | |
272f7884 | 1910 | } |
0fe40ff8 | 1911 | EXPORT_SYMBOL_GPL(ata_sff_dev_classify); |
272f7884 | 1912 | |
705e76be TH |
1913 | /** |
1914 | * ata_sff_wait_after_reset - wait for devices to become ready after reset | |
1915 | * @link: SFF link which is just reset | |
1916 | * @devmask: mask of present devices | |
1917 | * @deadline: deadline jiffies for the operation | |
1918 | * | |
1919 | * Wait devices attached to SFF @link to become ready after | |
1920 | * reset. It contains preceding 150ms wait to avoid accessing TF | |
1921 | * status register too early. | |
1922 | * | |
1923 | * LOCKING: | |
1924 | * Kernel thread context (may sleep). | |
1925 | * | |
1926 | * RETURNS: | |
1927 | * 0 on success, -ENODEV if some or all of devices in @devmask | |
1928 | * don't seem to exist. -errno on other errors. | |
1929 | */ | |
1930 | int ata_sff_wait_after_reset(struct ata_link *link, unsigned int devmask, | |
1931 | unsigned long deadline) | |
1fdffbce | 1932 | { |
705e76be | 1933 | struct ata_port *ap = link->ap; |
1fdffbce | 1934 | struct ata_ioports *ioaddr = &ap->ioaddr; |
624d5c51 TH |
1935 | unsigned int dev0 = devmask & (1 << 0); |
1936 | unsigned int dev1 = devmask & (1 << 1); | |
1937 | int rc, ret = 0; | |
1fdffbce | 1938 | |
97750ceb | 1939 | ata_msleep(ap, ATA_WAIT_AFTER_RESET); |
705e76be TH |
1940 | |
1941 | /* always check readiness of the master device */ | |
1942 | rc = ata_sff_wait_ready(link, deadline); | |
1943 | /* -ENODEV means the odd clown forgot the D7 pulldown resistor | |
1944 | * and TF status is 0xff, bail out on it too. | |
624d5c51 | 1945 | */ |
705e76be TH |
1946 | if (rc) |
1947 | return rc; | |
1fdffbce | 1948 | |
624d5c51 TH |
1949 | /* if device 1 was found in ata_devchk, wait for register |
1950 | * access briefly, then wait for BSY to clear. | |
1951 | */ | |
1952 | if (dev1) { | |
1953 | int i; | |
1fdffbce | 1954 | |
5682ed33 | 1955 | ap->ops->sff_dev_select(ap, 1); |
1fdffbce | 1956 | |
624d5c51 TH |
1957 | /* Wait for register access. Some ATAPI devices fail |
1958 | * to set nsect/lbal after reset, so don't waste too | |
1959 | * much time on it. We're gonna wait for !BSY anyway. | |
1960 | */ | |
1961 | for (i = 0; i < 2; i++) { | |
1962 | u8 nsect, lbal; | |
1963 | ||
1964 | nsect = ioread8(ioaddr->nsect_addr); | |
1965 | lbal = ioread8(ioaddr->lbal_addr); | |
1966 | if ((nsect == 1) && (lbal == 1)) | |
1967 | break; | |
97750ceb | 1968 | ata_msleep(ap, 50); /* give drive a breather */ |
624d5c51 TH |
1969 | } |
1970 | ||
705e76be | 1971 | rc = ata_sff_wait_ready(link, deadline); |
624d5c51 TH |
1972 | if (rc) { |
1973 | if (rc != -ENODEV) | |
1974 | return rc; | |
1975 | ret = rc; | |
1976 | } | |
1fdffbce JG |
1977 | } |
1978 | ||
624d5c51 | 1979 | /* is all this really necessary? */ |
5682ed33 | 1980 | ap->ops->sff_dev_select(ap, 0); |
624d5c51 | 1981 | if (dev1) |
5682ed33 | 1982 | ap->ops->sff_dev_select(ap, 1); |
624d5c51 | 1983 | if (dev0) |
5682ed33 | 1984 | ap->ops->sff_dev_select(ap, 0); |
624d5c51 TH |
1985 | |
1986 | return ret; | |
1fdffbce | 1987 | } |
0fe40ff8 | 1988 | EXPORT_SYMBOL_GPL(ata_sff_wait_after_reset); |
1fdffbce | 1989 | |
624d5c51 TH |
1990 | static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask, |
1991 | unsigned long deadline) | |
2cc432ee | 1992 | { |
624d5c51 | 1993 | struct ata_ioports *ioaddr = &ap->ioaddr; |
2cc432ee | 1994 | |
624d5c51 TH |
1995 | DPRINTK("ata%u: bus reset via SRST\n", ap->print_id); |
1996 | ||
1997 | /* software reset. causes dev0 to be selected */ | |
1998 | iowrite8(ap->ctl, ioaddr->ctl_addr); | |
1999 | udelay(20); /* FIXME: flush */ | |
2000 | iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr); | |
2001 | udelay(20); /* FIXME: flush */ | |
2002 | iowrite8(ap->ctl, ioaddr->ctl_addr); | |
e3e4385f | 2003 | ap->last_ctl = ap->ctl; |
624d5c51 | 2004 | |
705e76be TH |
2005 | /* wait the port to become ready */ |
2006 | return ata_sff_wait_after_reset(&ap->link, devmask, deadline); | |
2cc432ee JG |
2007 | } |
2008 | ||
6d97dbd7 | 2009 | /** |
9363c382 | 2010 | * ata_sff_softreset - reset host port via ATA SRST |
624d5c51 TH |
2011 | * @link: ATA link to reset |
2012 | * @classes: resulting classes of attached devices | |
2013 | * @deadline: deadline jiffies for the operation | |
6d97dbd7 | 2014 | * |
624d5c51 | 2015 | * Reset host port using ATA SRST. |
6d97dbd7 TH |
2016 | * |
2017 | * LOCKING: | |
624d5c51 TH |
2018 | * Kernel thread context (may sleep) |
2019 | * | |
2020 | * RETURNS: | |
2021 | * 0 on success, -errno otherwise. | |
6d97dbd7 | 2022 | */ |
9363c382 | 2023 | int ata_sff_softreset(struct ata_link *link, unsigned int *classes, |
624d5c51 | 2024 | unsigned long deadline) |
6d97dbd7 | 2025 | { |
624d5c51 TH |
2026 | struct ata_port *ap = link->ap; |
2027 | unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS; | |
2028 | unsigned int devmask = 0; | |
2029 | int rc; | |
2030 | u8 err; | |
6d97dbd7 | 2031 | |
624d5c51 | 2032 | DPRINTK("ENTER\n"); |
6d97dbd7 | 2033 | |
624d5c51 TH |
2034 | /* determine if device 0/1 are present */ |
2035 | if (ata_devchk(ap, 0)) | |
2036 | devmask |= (1 << 0); | |
2037 | if (slave_possible && ata_devchk(ap, 1)) | |
2038 | devmask |= (1 << 1); | |
2039 | ||
2040 | /* select device 0 again */ | |
5682ed33 | 2041 | ap->ops->sff_dev_select(ap, 0); |
624d5c51 TH |
2042 | |
2043 | /* issue bus reset */ | |
2044 | DPRINTK("about to softreset, devmask=%x\n", devmask); | |
2045 | rc = ata_bus_softreset(ap, devmask, deadline); | |
2046 | /* if link is occupied, -ENODEV too is an error */ | |
2047 | if (rc && (rc != -ENODEV || sata_scr_valid(link))) { | |
2048 | ata_link_printk(link, KERN_ERR, "SRST failed (errno=%d)\n", rc); | |
2049 | return rc; | |
2050 | } | |
0f0a3ad3 | 2051 | |
624d5c51 | 2052 | /* determine by signature whether we have ATA or ATAPI devices */ |
9363c382 | 2053 | classes[0] = ata_sff_dev_classify(&link->device[0], |
624d5c51 TH |
2054 | devmask & (1 << 0), &err); |
2055 | if (slave_possible && err != 0x81) | |
9363c382 | 2056 | classes[1] = ata_sff_dev_classify(&link->device[1], |
624d5c51 TH |
2057 | devmask & (1 << 1), &err); |
2058 | ||
624d5c51 TH |
2059 | DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]); |
2060 | return 0; | |
6d97dbd7 | 2061 | } |
0fe40ff8 | 2062 | EXPORT_SYMBOL_GPL(ata_sff_softreset); |
6d97dbd7 TH |
2063 | |
2064 | /** | |
9363c382 | 2065 | * sata_sff_hardreset - reset host port via SATA phy reset |
624d5c51 TH |
2066 | * @link: link to reset |
2067 | * @class: resulting class of attached device | |
2068 | * @deadline: deadline jiffies for the operation | |
6d97dbd7 | 2069 | * |
624d5c51 TH |
2070 | * SATA phy-reset host port using DET bits of SControl register, |
2071 | * wait for !BSY and classify the attached device. | |
6d97dbd7 TH |
2072 | * |
2073 | * LOCKING: | |
624d5c51 TH |
2074 | * Kernel thread context (may sleep) |
2075 | * | |
2076 | * RETURNS: | |
2077 | * 0 on success, -errno otherwise. | |
6d97dbd7 | 2078 | */ |
9363c382 | 2079 | int sata_sff_hardreset(struct ata_link *link, unsigned int *class, |
624d5c51 | 2080 | unsigned long deadline) |
6d97dbd7 | 2081 | { |
9dadd45b TH |
2082 | struct ata_eh_context *ehc = &link->eh_context; |
2083 | const unsigned long *timing = sata_ehc_deb_timing(ehc); | |
2084 | bool online; | |
624d5c51 TH |
2085 | int rc; |
2086 | ||
9dadd45b TH |
2087 | rc = sata_link_hardreset(link, timing, deadline, &online, |
2088 | ata_sff_check_ready); | |
9dadd45b TH |
2089 | if (online) |
2090 | *class = ata_sff_dev_classify(link->device, 1, NULL); | |
624d5c51 TH |
2091 | |
2092 | DPRINTK("EXIT, class=%u\n", *class); | |
9dadd45b | 2093 | return rc; |
6d97dbd7 | 2094 | } |
0fe40ff8 | 2095 | EXPORT_SYMBOL_GPL(sata_sff_hardreset); |
6d97dbd7 | 2096 | |
203c75b8 TH |
2097 | /** |
2098 | * ata_sff_postreset - SFF postreset callback | |
2099 | * @link: the target SFF ata_link | |
2100 | * @classes: classes of attached devices | |
2101 | * | |
2102 | * This function is invoked after a successful reset. It first | |
2103 | * calls ata_std_postreset() and performs SFF specific postreset | |
2104 | * processing. | |
2105 | * | |
2106 | * LOCKING: | |
2107 | * Kernel thread context (may sleep) | |
2108 | */ | |
2109 | void ata_sff_postreset(struct ata_link *link, unsigned int *classes) | |
2110 | { | |
2111 | struct ata_port *ap = link->ap; | |
2112 | ||
2113 | ata_std_postreset(link, classes); | |
2114 | ||
2115 | /* is double-select really necessary? */ | |
2116 | if (classes[0] != ATA_DEV_NONE) | |
2117 | ap->ops->sff_dev_select(ap, 1); | |
2118 | if (classes[1] != ATA_DEV_NONE) | |
2119 | ap->ops->sff_dev_select(ap, 0); | |
2120 | ||
2121 | /* bail out if no device is present */ | |
2122 | if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) { | |
2123 | DPRINTK("EXIT, no device\n"); | |
2124 | return; | |
2125 | } | |
2126 | ||
2127 | /* set up device control */ | |
41dec29b SS |
2128 | if (ap->ops->sff_set_devctl || ap->ioaddr.ctl_addr) { |
2129 | ata_sff_set_devctl(ap, ap->ctl); | |
e3e4385f SM |
2130 | ap->last_ctl = ap->ctl; |
2131 | } | |
203c75b8 | 2132 | } |
0fe40ff8 | 2133 | EXPORT_SYMBOL_GPL(ata_sff_postreset); |
203c75b8 | 2134 | |
3d47aa8e AC |
2135 | /** |
2136 | * ata_sff_drain_fifo - Stock FIFO drain logic for SFF controllers | |
2137 | * @qc: command | |
2138 | * | |
2139 | * Drain the FIFO and device of any stuck data following a command | |
3ad2f3fb | 2140 | * failing to complete. In some cases this is necessary before a |
3d47aa8e AC |
2141 | * reset will recover the device. |
2142 | * | |
2143 | */ | |
2144 | ||
2145 | void ata_sff_drain_fifo(struct ata_queued_cmd *qc) | |
2146 | { | |
2147 | int count; | |
2148 | struct ata_port *ap; | |
2149 | ||
2150 | /* We only need to flush incoming data when a command was running */ | |
2151 | if (qc == NULL || qc->dma_dir == DMA_TO_DEVICE) | |
2152 | return; | |
2153 | ||
2154 | ap = qc->ap; | |
2155 | /* Drain up to 64K of data before we give up this recovery method */ | |
2156 | for (count = 0; (ap->ops->sff_check_status(ap) & ATA_DRQ) | |
9a8fd68b | 2157 | && count < 65536; count += 2) |
3d47aa8e AC |
2158 | ioread16(ap->ioaddr.data_addr); |
2159 | ||
2160 | /* Can become DEBUG later */ | |
2161 | if (count) | |
2162 | ata_port_printk(ap, KERN_DEBUG, | |
2163 | "drained %d bytes to clear DRQ.\n", count); | |
2164 | ||
2165 | } | |
2166 | EXPORT_SYMBOL_GPL(ata_sff_drain_fifo); | |
2167 | ||
6d97dbd7 | 2168 | /** |
fe06e5f9 | 2169 | * ata_sff_error_handler - Stock error handler for SFF controller |
6d97dbd7 | 2170 | * @ap: port to handle error for |
6d97dbd7 | 2171 | * |
9363c382 | 2172 | * Stock error handler for SFF controller. It can handle both |
6d97dbd7 TH |
2173 | * PATA and SATA controllers. Many controllers should be able to |
2174 | * use this EH as-is or with some added handling before and | |
2175 | * after. | |
2176 | * | |
6d97dbd7 TH |
2177 | * LOCKING: |
2178 | * Kernel thread context (may sleep) | |
2179 | */ | |
9363c382 | 2180 | void ata_sff_error_handler(struct ata_port *ap) |
6d97dbd7 | 2181 | { |
a1efdaba TH |
2182 | ata_reset_fn_t softreset = ap->ops->softreset; |
2183 | ata_reset_fn_t hardreset = ap->ops->hardreset; | |
6d97dbd7 TH |
2184 | struct ata_queued_cmd *qc; |
2185 | unsigned long flags; | |
6d97dbd7 | 2186 | |
9af5c9c9 | 2187 | qc = __ata_qc_from_tag(ap, ap->link.active_tag); |
6d97dbd7 TH |
2188 | if (qc && !(qc->flags & ATA_QCFLAG_FAILED)) |
2189 | qc = NULL; | |
2190 | ||
ba6a1308 | 2191 | spin_lock_irqsave(ap->lock, flags); |
6d97dbd7 | 2192 | |
fe06e5f9 TH |
2193 | /* |
2194 | * We *MUST* do FIFO draining before we issue a reset as | |
2195 | * several devices helpfully clear their internal state and | |
2196 | * will lock solid if we touch the data port post reset. Pass | |
2197 | * qc in case anyone wants to do different PIO/DMA recovery or | |
2198 | * has per command fixups | |
3d47aa8e | 2199 | */ |
8244cd05 TH |
2200 | if (ap->ops->sff_drain_fifo) |
2201 | ap->ops->sff_drain_fifo(qc); | |
6d97dbd7 | 2202 | |
ba6a1308 | 2203 | spin_unlock_irqrestore(ap->lock, flags); |
6d97dbd7 | 2204 | |
fe06e5f9 | 2205 | /* ignore ata_sff_softreset if ctl isn't accessible */ |
9363c382 | 2206 | if (softreset == ata_sff_softreset && !ap->ioaddr.ctl_addr) |
a1efdaba | 2207 | softreset = NULL; |
fe06e5f9 TH |
2208 | |
2209 | /* ignore built-in hardresets if SCR access is not available */ | |
2210 | if ((hardreset == sata_std_hardreset || | |
2211 | hardreset == sata_sff_hardreset) && !sata_scr_valid(&ap->link)) | |
a1efdaba | 2212 | hardreset = NULL; |
6d97dbd7 | 2213 | |
a1efdaba TH |
2214 | ata_do_eh(ap, ap->ops->prereset, softreset, hardreset, |
2215 | ap->ops->postreset); | |
6d97dbd7 | 2216 | } |
0fe40ff8 | 2217 | EXPORT_SYMBOL_GPL(ata_sff_error_handler); |
6d97dbd7 | 2218 | |
624d5c51 | 2219 | /** |
9363c382 | 2220 | * ata_sff_std_ports - initialize ioaddr with standard port offsets. |
624d5c51 TH |
2221 | * @ioaddr: IO address structure to be initialized |
2222 | * | |
2223 | * Utility function which initializes data_addr, error_addr, | |
2224 | * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr, | |
2225 | * device_addr, status_addr, and command_addr to standard offsets | |
2226 | * relative to cmd_addr. | |
2227 | * | |
2228 | * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr. | |
2229 | */ | |
9363c382 | 2230 | void ata_sff_std_ports(struct ata_ioports *ioaddr) |
624d5c51 TH |
2231 | { |
2232 | ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA; | |
2233 | ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR; | |
2234 | ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE; | |
2235 | ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT; | |
2236 | ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL; | |
2237 | ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM; | |
2238 | ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH; | |
2239 | ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE; | |
2240 | ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS; | |
2241 | ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD; | |
2242 | } | |
0fe40ff8 | 2243 | EXPORT_SYMBOL_GPL(ata_sff_std_ports); |
624d5c51 | 2244 | |
1fdffbce | 2245 | #ifdef CONFIG_PCI |
4112e16a | 2246 | |
272f7884 TH |
2247 | static int ata_resources_present(struct pci_dev *pdev, int port) |
2248 | { | |
2249 | int i; | |
2250 | ||
2251 | /* Check the PCI resources for this channel are enabled */ | |
2252 | port = port * 2; | |
0fe40ff8 | 2253 | for (i = 0; i < 2; i++) { |
272f7884 TH |
2254 | if (pci_resource_start(pdev, port + i) == 0 || |
2255 | pci_resource_len(pdev, port + i) == 0) | |
2256 | return 0; | |
2257 | } | |
2258 | return 1; | |
2259 | } | |
2260 | ||
d491b27b | 2261 | /** |
9363c382 | 2262 | * ata_pci_sff_init_host - acquire native PCI ATA resources and init host |
d491b27b | 2263 | * @host: target ATA host |
d491b27b | 2264 | * |
1626aeb8 TH |
2265 | * Acquire native PCI ATA resources for @host and initialize the |
2266 | * first two ports of @host accordingly. Ports marked dummy are | |
2267 | * skipped and allocation failure makes the port dummy. | |
d491b27b | 2268 | * |
d583bc18 TH |
2269 | * Note that native PCI resources are valid even for legacy hosts |
2270 | * as we fix up pdev resources array early in boot, so this | |
2271 | * function can be used for both native and legacy SFF hosts. | |
2272 | * | |
d491b27b TH |
2273 | * LOCKING: |
2274 | * Inherited from calling layer (may sleep). | |
2275 | * | |
2276 | * RETURNS: | |
1626aeb8 TH |
2277 | * 0 if at least one port is initialized, -ENODEV if no port is |
2278 | * available. | |
d491b27b | 2279 | */ |
9363c382 | 2280 | int ata_pci_sff_init_host(struct ata_host *host) |
d491b27b TH |
2281 | { |
2282 | struct device *gdev = host->dev; | |
2283 | struct pci_dev *pdev = to_pci_dev(gdev); | |
1626aeb8 | 2284 | unsigned int mask = 0; |
d491b27b TH |
2285 | int i, rc; |
2286 | ||
d491b27b TH |
2287 | /* request, iomap BARs and init port addresses accordingly */ |
2288 | for (i = 0; i < 2; i++) { | |
2289 | struct ata_port *ap = host->ports[i]; | |
2290 | int base = i * 2; | |
2291 | void __iomem * const *iomap; | |
2292 | ||
1626aeb8 TH |
2293 | if (ata_port_is_dummy(ap)) |
2294 | continue; | |
2295 | ||
2296 | /* Discard disabled ports. Some controllers show | |
2297 | * their unused channels this way. Disabled ports are | |
2298 | * made dummy. | |
2299 | */ | |
2300 | if (!ata_resources_present(pdev, i)) { | |
2301 | ap->ops = &ata_dummy_port_ops; | |
d491b27b | 2302 | continue; |
1626aeb8 | 2303 | } |
d491b27b | 2304 | |
35a10a80 TH |
2305 | rc = pcim_iomap_regions(pdev, 0x3 << base, |
2306 | dev_driver_string(gdev)); | |
d491b27b | 2307 | if (rc) { |
1626aeb8 TH |
2308 | dev_printk(KERN_WARNING, gdev, |
2309 | "failed to request/iomap BARs for port %d " | |
2310 | "(errno=%d)\n", i, rc); | |
d491b27b TH |
2311 | if (rc == -EBUSY) |
2312 | pcim_pin_device(pdev); | |
1626aeb8 TH |
2313 | ap->ops = &ata_dummy_port_ops; |
2314 | continue; | |
d491b27b TH |
2315 | } |
2316 | host->iomap = iomap = pcim_iomap_table(pdev); | |
2317 | ||
2318 | ap->ioaddr.cmd_addr = iomap[base]; | |
2319 | ap->ioaddr.altstatus_addr = | |
2320 | ap->ioaddr.ctl_addr = (void __iomem *) | |
2321 | ((unsigned long)iomap[base + 1] | ATA_PCI_CTL_OFS); | |
9363c382 | 2322 | ata_sff_std_ports(&ap->ioaddr); |
1626aeb8 | 2323 | |
cbcdd875 TH |
2324 | ata_port_desc(ap, "cmd 0x%llx ctl 0x%llx", |
2325 | (unsigned long long)pci_resource_start(pdev, base), | |
2326 | (unsigned long long)pci_resource_start(pdev, base + 1)); | |
2327 | ||
1626aeb8 TH |
2328 | mask |= 1 << i; |
2329 | } | |
2330 | ||
2331 | if (!mask) { | |
2332 | dev_printk(KERN_ERR, gdev, "no available native port\n"); | |
2333 | return -ENODEV; | |
d491b27b TH |
2334 | } |
2335 | ||
2336 | return 0; | |
2337 | } | |
0fe40ff8 | 2338 | EXPORT_SYMBOL_GPL(ata_pci_sff_init_host); |
d491b27b | 2339 | |
21b0ad4f | 2340 | /** |
1c5afdf7 | 2341 | * ata_pci_sff_prepare_host - helper to prepare PCI PIO-only SFF ATA host |
21b0ad4f | 2342 | * @pdev: target PCI device |
1626aeb8 | 2343 | * @ppi: array of port_info, must be enough for two ports |
21b0ad4f TH |
2344 | * @r_host: out argument for the initialized ATA host |
2345 | * | |
1c5afdf7 TH |
2346 | * Helper to allocate PIO-only SFF ATA host for @pdev, acquire |
2347 | * all PCI resources and initialize it accordingly in one go. | |
21b0ad4f TH |
2348 | * |
2349 | * LOCKING: | |
2350 | * Inherited from calling layer (may sleep). | |
2351 | * | |
2352 | * RETURNS: | |
2353 | * 0 on success, -errno otherwise. | |
2354 | */ | |
9363c382 | 2355 | int ata_pci_sff_prepare_host(struct pci_dev *pdev, |
0fe40ff8 | 2356 | const struct ata_port_info * const *ppi, |
d583bc18 | 2357 | struct ata_host **r_host) |
21b0ad4f TH |
2358 | { |
2359 | struct ata_host *host; | |
21b0ad4f TH |
2360 | int rc; |
2361 | ||
2362 | if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) | |
2363 | return -ENOMEM; | |
2364 | ||
2365 | host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2); | |
2366 | if (!host) { | |
2367 | dev_printk(KERN_ERR, &pdev->dev, | |
2368 | "failed to allocate ATA host\n"); | |
2369 | rc = -ENOMEM; | |
2370 | goto err_out; | |
2371 | } | |
2372 | ||
9363c382 | 2373 | rc = ata_pci_sff_init_host(host); |
21b0ad4f TH |
2374 | if (rc) |
2375 | goto err_out; | |
2376 | ||
21b0ad4f TH |
2377 | devres_remove_group(&pdev->dev, NULL); |
2378 | *r_host = host; | |
2379 | return 0; | |
2380 | ||
0fe40ff8 | 2381 | err_out: |
21b0ad4f TH |
2382 | devres_release_group(&pdev->dev, NULL); |
2383 | return rc; | |
2384 | } | |
0fe40ff8 | 2385 | EXPORT_SYMBOL_GPL(ata_pci_sff_prepare_host); |
21b0ad4f | 2386 | |
4e6b79fa | 2387 | /** |
9363c382 | 2388 | * ata_pci_sff_activate_host - start SFF host, request IRQ and register it |
4e6b79fa TH |
2389 | * @host: target SFF ATA host |
2390 | * @irq_handler: irq_handler used when requesting IRQ(s) | |
2391 | * @sht: scsi_host_template to use when registering the host | |
2392 | * | |
2393 | * This is the counterpart of ata_host_activate() for SFF ATA | |
2394 | * hosts. This separate helper is necessary because SFF hosts | |
2395 | * use two separate interrupts in legacy mode. | |
2396 | * | |
2397 | * LOCKING: | |
2398 | * Inherited from calling layer (may sleep). | |
2399 | * | |
2400 | * RETURNS: | |
2401 | * 0 on success, -errno otherwise. | |
2402 | */ | |
9363c382 | 2403 | int ata_pci_sff_activate_host(struct ata_host *host, |
4e6b79fa TH |
2404 | irq_handler_t irq_handler, |
2405 | struct scsi_host_template *sht) | |
2406 | { | |
2407 | struct device *dev = host->dev; | |
2408 | struct pci_dev *pdev = to_pci_dev(dev); | |
2409 | const char *drv_name = dev_driver_string(host->dev); | |
2410 | int legacy_mode = 0, rc; | |
2411 | ||
2412 | rc = ata_host_start(host); | |
2413 | if (rc) | |
2414 | return rc; | |
2415 | ||
2416 | if ((pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) { | |
2417 | u8 tmp8, mask; | |
2418 | ||
2419 | /* TODO: What if one channel is in native mode ... */ | |
2420 | pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8); | |
2421 | mask = (1 << 2) | (1 << 0); | |
2422 | if ((tmp8 & mask) != mask) | |
2423 | legacy_mode = 1; | |
2424 | #if defined(CONFIG_NO_ATA_LEGACY) | |
2425 | /* Some platforms with PCI limits cannot address compat | |
2426 | port space. In that case we punt if their firmware has | |
2427 | left a device in compatibility mode */ | |
2428 | if (legacy_mode) { | |
2429 | printk(KERN_ERR "ata: Compatibility mode ATA is not supported on this platform, skipping.\n"); | |
2430 | return -EOPNOTSUPP; | |
2431 | } | |
2432 | #endif | |
2433 | } | |
2434 | ||
2435 | if (!devres_open_group(dev, NULL, GFP_KERNEL)) | |
2436 | return -ENOMEM; | |
2437 | ||
2438 | if (!legacy_mode && pdev->irq) { | |
2439 | rc = devm_request_irq(dev, pdev->irq, irq_handler, | |
2440 | IRQF_SHARED, drv_name, host); | |
2441 | if (rc) | |
2442 | goto out; | |
2443 | ||
2444 | ata_port_desc(host->ports[0], "irq %d", pdev->irq); | |
2445 | ata_port_desc(host->ports[1], "irq %d", pdev->irq); | |
2446 | } else if (legacy_mode) { | |
2447 | if (!ata_port_is_dummy(host->ports[0])) { | |
2448 | rc = devm_request_irq(dev, ATA_PRIMARY_IRQ(pdev), | |
2449 | irq_handler, IRQF_SHARED, | |
2450 | drv_name, host); | |
2451 | if (rc) | |
2452 | goto out; | |
2453 | ||
2454 | ata_port_desc(host->ports[0], "irq %d", | |
2455 | ATA_PRIMARY_IRQ(pdev)); | |
2456 | } | |
2457 | ||
2458 | if (!ata_port_is_dummy(host->ports[1])) { | |
2459 | rc = devm_request_irq(dev, ATA_SECONDARY_IRQ(pdev), | |
2460 | irq_handler, IRQF_SHARED, | |
2461 | drv_name, host); | |
2462 | if (rc) | |
2463 | goto out; | |
2464 | ||
2465 | ata_port_desc(host->ports[1], "irq %d", | |
2466 | ATA_SECONDARY_IRQ(pdev)); | |
2467 | } | |
2468 | } | |
2469 | ||
2470 | rc = ata_host_register(host, sht); | |
0fe40ff8 | 2471 | out: |
4e6b79fa TH |
2472 | if (rc == 0) |
2473 | devres_remove_group(dev, NULL); | |
2474 | else | |
2475 | devres_release_group(dev, NULL); | |
2476 | ||
2477 | return rc; | |
2478 | } | |
0fe40ff8 | 2479 | EXPORT_SYMBOL_GPL(ata_pci_sff_activate_host); |
4e6b79fa | 2480 | |
1c5afdf7 TH |
2481 | static const struct ata_port_info *ata_sff_find_valid_pi( |
2482 | const struct ata_port_info * const *ppi) | |
2483 | { | |
2484 | int i; | |
2485 | ||
2486 | /* look up the first valid port_info */ | |
2487 | for (i = 0; i < 2 && ppi[i]; i++) | |
2488 | if (ppi[i]->port_ops != &ata_dummy_port_ops) | |
2489 | return ppi[i]; | |
2490 | ||
2491 | return NULL; | |
2492 | } | |
2493 | ||
1fdffbce | 2494 | /** |
1c5afdf7 | 2495 | * ata_pci_sff_init_one - Initialize/register PIO-only PCI IDE controller |
1fdffbce | 2496 | * @pdev: Controller to be initialized |
1626aeb8 | 2497 | * @ppi: array of port_info, must be enough for two ports |
1bd5b715 | 2498 | * @sht: scsi_host_template to use when registering the host |
887125e3 | 2499 | * @host_priv: host private_data |
16ea0fc9 | 2500 | * @hflag: host flags |
1fdffbce JG |
2501 | * |
2502 | * This is a helper function which can be called from a driver's | |
2503 | * xxx_init_one() probe function if the hardware uses traditional | |
1c5afdf7 | 2504 | * IDE taskfile registers and is PIO only. |
1fdffbce | 2505 | * |
2ec7df04 AC |
2506 | * ASSUMPTION: |
2507 | * Nobody makes a single channel controller that appears solely as | |
2508 | * the secondary legacy port on PCI. | |
2509 | * | |
1fdffbce JG |
2510 | * LOCKING: |
2511 | * Inherited from PCI layer (may sleep). | |
2512 | * | |
2513 | * RETURNS: | |
2514 | * Zero on success, negative on errno-based value on error. | |
2515 | */ | |
9363c382 | 2516 | int ata_pci_sff_init_one(struct pci_dev *pdev, |
16ea0fc9 AC |
2517 | const struct ata_port_info * const *ppi, |
2518 | struct scsi_host_template *sht, void *host_priv, int hflag) | |
1fdffbce | 2519 | { |
f0d36efd | 2520 | struct device *dev = &pdev->dev; |
1c5afdf7 | 2521 | const struct ata_port_info *pi; |
0f834de3 | 2522 | struct ata_host *host = NULL; |
1c5afdf7 | 2523 | int rc; |
1fdffbce JG |
2524 | |
2525 | DPRINTK("ENTER\n"); | |
2526 | ||
1c5afdf7 | 2527 | pi = ata_sff_find_valid_pi(ppi); |
1626aeb8 TH |
2528 | if (!pi) { |
2529 | dev_printk(KERN_ERR, &pdev->dev, | |
2530 | "no valid port_info specified\n"); | |
2531 | return -EINVAL; | |
2532 | } | |
c791c306 | 2533 | |
1626aeb8 TH |
2534 | if (!devres_open_group(dev, NULL, GFP_KERNEL)) |
2535 | return -ENOMEM; | |
1fdffbce | 2536 | |
f0d36efd | 2537 | rc = pcim_enable_device(pdev); |
1fdffbce | 2538 | if (rc) |
4e6b79fa | 2539 | goto out; |
1fdffbce | 2540 | |
4e6b79fa | 2541 | /* prepare and activate SFF host */ |
9363c382 | 2542 | rc = ata_pci_sff_prepare_host(pdev, ppi, &host); |
d583bc18 | 2543 | if (rc) |
4e6b79fa | 2544 | goto out; |
887125e3 | 2545 | host->private_data = host_priv; |
16ea0fc9 | 2546 | host->flags |= hflag; |
d491b27b | 2547 | |
1c5afdf7 | 2548 | rc = ata_pci_sff_activate_host(host, ata_sff_interrupt, sht); |
0fe40ff8 | 2549 | out: |
4e6b79fa TH |
2550 | if (rc == 0) |
2551 | devres_remove_group(&pdev->dev, NULL); | |
2552 | else | |
2553 | devres_release_group(&pdev->dev, NULL); | |
d491b27b | 2554 | |
1fdffbce JG |
2555 | return rc; |
2556 | } | |
0fe40ff8 | 2557 | EXPORT_SYMBOL_GPL(ata_pci_sff_init_one); |
1fdffbce JG |
2558 | |
2559 | #endif /* CONFIG_PCI */ | |
9f2f7210 | 2560 | |
9a7780c9 TH |
2561 | /* |
2562 | * BMDMA support | |
2563 | */ | |
2564 | ||
2565 | #ifdef CONFIG_ATA_BMDMA | |
2566 | ||
9f2f7210 TH |
2567 | const struct ata_port_operations ata_bmdma_port_ops = { |
2568 | .inherits = &ata_sff_port_ops, | |
2569 | ||
fe06e5f9 TH |
2570 | .error_handler = ata_bmdma_error_handler, |
2571 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | |
2572 | ||
f47451c4 | 2573 | .qc_prep = ata_bmdma_qc_prep, |
360ff783 | 2574 | .qc_issue = ata_bmdma_qc_issue, |
f47451c4 | 2575 | |
37f65b8b | 2576 | .sff_irq_clear = ata_bmdma_irq_clear, |
9f2f7210 TH |
2577 | .bmdma_setup = ata_bmdma_setup, |
2578 | .bmdma_start = ata_bmdma_start, | |
2579 | .bmdma_stop = ata_bmdma_stop, | |
2580 | .bmdma_status = ata_bmdma_status, | |
c7087652 TH |
2581 | |
2582 | .port_start = ata_bmdma_port_start, | |
9f2f7210 TH |
2583 | }; |
2584 | EXPORT_SYMBOL_GPL(ata_bmdma_port_ops); | |
2585 | ||
2586 | const struct ata_port_operations ata_bmdma32_port_ops = { | |
2587 | .inherits = &ata_bmdma_port_ops, | |
2588 | ||
2589 | .sff_data_xfer = ata_sff_data_xfer32, | |
c7087652 | 2590 | .port_start = ata_bmdma_port_start32, |
9f2f7210 TH |
2591 | }; |
2592 | EXPORT_SYMBOL_GPL(ata_bmdma32_port_ops); | |
2593 | ||
f47451c4 TH |
2594 | /** |
2595 | * ata_bmdma_fill_sg - Fill PCI IDE PRD table | |
2596 | * @qc: Metadata associated with taskfile to be transferred | |
2597 | * | |
2598 | * Fill PCI IDE PRD (scatter-gather) table with segments | |
2599 | * associated with the current disk command. | |
2600 | * | |
2601 | * LOCKING: | |
2602 | * spin_lock_irqsave(host lock) | |
2603 | * | |
2604 | */ | |
2605 | static void ata_bmdma_fill_sg(struct ata_queued_cmd *qc) | |
2606 | { | |
2607 | struct ata_port *ap = qc->ap; | |
f60d7011 | 2608 | struct ata_bmdma_prd *prd = ap->bmdma_prd; |
f47451c4 TH |
2609 | struct scatterlist *sg; |
2610 | unsigned int si, pi; | |
2611 | ||
2612 | pi = 0; | |
2613 | for_each_sg(qc->sg, sg, qc->n_elem, si) { | |
2614 | u32 addr, offset; | |
2615 | u32 sg_len, len; | |
2616 | ||
2617 | /* determine if physical DMA addr spans 64K boundary. | |
2618 | * Note h/w doesn't support 64-bit, so we unconditionally | |
2619 | * truncate dma_addr_t to u32. | |
2620 | */ | |
2621 | addr = (u32) sg_dma_address(sg); | |
2622 | sg_len = sg_dma_len(sg); | |
2623 | ||
2624 | while (sg_len) { | |
2625 | offset = addr & 0xffff; | |
2626 | len = sg_len; | |
2627 | if ((offset + sg_len) > 0x10000) | |
2628 | len = 0x10000 - offset; | |
2629 | ||
f60d7011 TH |
2630 | prd[pi].addr = cpu_to_le32(addr); |
2631 | prd[pi].flags_len = cpu_to_le32(len & 0xffff); | |
f47451c4 TH |
2632 | VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len); |
2633 | ||
2634 | pi++; | |
2635 | sg_len -= len; | |
2636 | addr += len; | |
2637 | } | |
2638 | } | |
2639 | ||
f60d7011 | 2640 | prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT); |
f47451c4 TH |
2641 | } |
2642 | ||
2643 | /** | |
2644 | * ata_bmdma_fill_sg_dumb - Fill PCI IDE PRD table | |
2645 | * @qc: Metadata associated with taskfile to be transferred | |
2646 | * | |
2647 | * Fill PCI IDE PRD (scatter-gather) table with segments | |
2648 | * associated with the current disk command. Perform the fill | |
2649 | * so that we avoid writing any length 64K records for | |
2650 | * controllers that don't follow the spec. | |
2651 | * | |
2652 | * LOCKING: | |
2653 | * spin_lock_irqsave(host lock) | |
2654 | * | |
2655 | */ | |
2656 | static void ata_bmdma_fill_sg_dumb(struct ata_queued_cmd *qc) | |
2657 | { | |
2658 | struct ata_port *ap = qc->ap; | |
f60d7011 | 2659 | struct ata_bmdma_prd *prd = ap->bmdma_prd; |
f47451c4 TH |
2660 | struct scatterlist *sg; |
2661 | unsigned int si, pi; | |
2662 | ||
2663 | pi = 0; | |
2664 | for_each_sg(qc->sg, sg, qc->n_elem, si) { | |
2665 | u32 addr, offset; | |
2666 | u32 sg_len, len, blen; | |
2667 | ||
2668 | /* determine if physical DMA addr spans 64K boundary. | |
2669 | * Note h/w doesn't support 64-bit, so we unconditionally | |
2670 | * truncate dma_addr_t to u32. | |
2671 | */ | |
2672 | addr = (u32) sg_dma_address(sg); | |
2673 | sg_len = sg_dma_len(sg); | |
2674 | ||
2675 | while (sg_len) { | |
2676 | offset = addr & 0xffff; | |
2677 | len = sg_len; | |
2678 | if ((offset + sg_len) > 0x10000) | |
2679 | len = 0x10000 - offset; | |
2680 | ||
2681 | blen = len & 0xffff; | |
f60d7011 | 2682 | prd[pi].addr = cpu_to_le32(addr); |
f47451c4 TH |
2683 | if (blen == 0) { |
2684 | /* Some PATA chipsets like the CS5530 can't | |
2685 | cope with 0x0000 meaning 64K as the spec | |
2686 | says */ | |
f60d7011 | 2687 | prd[pi].flags_len = cpu_to_le32(0x8000); |
f47451c4 | 2688 | blen = 0x8000; |
f60d7011 | 2689 | prd[++pi].addr = cpu_to_le32(addr + 0x8000); |
f47451c4 | 2690 | } |
f60d7011 | 2691 | prd[pi].flags_len = cpu_to_le32(blen); |
f47451c4 TH |
2692 | VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len); |
2693 | ||
2694 | pi++; | |
2695 | sg_len -= len; | |
2696 | addr += len; | |
2697 | } | |
2698 | } | |
2699 | ||
f60d7011 | 2700 | prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT); |
f47451c4 TH |
2701 | } |
2702 | ||
2703 | /** | |
2704 | * ata_bmdma_qc_prep - Prepare taskfile for submission | |
2705 | * @qc: Metadata associated with taskfile to be prepared | |
2706 | * | |
2707 | * Prepare ATA taskfile for submission. | |
2708 | * | |
2709 | * LOCKING: | |
2710 | * spin_lock_irqsave(host lock) | |
2711 | */ | |
2712 | void ata_bmdma_qc_prep(struct ata_queued_cmd *qc) | |
2713 | { | |
2714 | if (!(qc->flags & ATA_QCFLAG_DMAMAP)) | |
2715 | return; | |
2716 | ||
2717 | ata_bmdma_fill_sg(qc); | |
2718 | } | |
2719 | EXPORT_SYMBOL_GPL(ata_bmdma_qc_prep); | |
2720 | ||
2721 | /** | |
2722 | * ata_bmdma_dumb_qc_prep - Prepare taskfile for submission | |
2723 | * @qc: Metadata associated with taskfile to be prepared | |
2724 | * | |
2725 | * Prepare ATA taskfile for submission. | |
2726 | * | |
2727 | * LOCKING: | |
2728 | * spin_lock_irqsave(host lock) | |
2729 | */ | |
2730 | void ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc) | |
2731 | { | |
2732 | if (!(qc->flags & ATA_QCFLAG_DMAMAP)) | |
2733 | return; | |
2734 | ||
2735 | ata_bmdma_fill_sg_dumb(qc); | |
2736 | } | |
2737 | EXPORT_SYMBOL_GPL(ata_bmdma_dumb_qc_prep); | |
2738 | ||
360ff783 TH |
2739 | /** |
2740 | * ata_bmdma_qc_issue - issue taskfile to a BMDMA controller | |
2741 | * @qc: command to issue to device | |
2742 | * | |
2743 | * This function issues a PIO, NODATA or DMA command to a | |
2744 | * SFF/BMDMA controller. PIO and NODATA are handled by | |
2745 | * ata_sff_qc_issue(). | |
2746 | * | |
2747 | * LOCKING: | |
2748 | * spin_lock_irqsave(host lock) | |
2749 | * | |
2750 | * RETURNS: | |
2751 | * Zero on success, AC_ERR_* mask on failure | |
2752 | */ | |
2753 | unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc) | |
2754 | { | |
2755 | struct ata_port *ap = qc->ap; | |
ea3c6450 | 2756 | struct ata_link *link = qc->dev->link; |
360ff783 | 2757 | |
360ff783 TH |
2758 | /* defer PIO handling to sff_qc_issue */ |
2759 | if (!ata_is_dma(qc->tf.protocol)) | |
2760 | return ata_sff_qc_issue(qc); | |
2761 | ||
2762 | /* select the device */ | |
2763 | ata_dev_select(ap, qc->dev->devno, 1, 0); | |
2764 | ||
2765 | /* start the command */ | |
2766 | switch (qc->tf.protocol) { | |
2767 | case ATA_PROT_DMA: | |
2768 | WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING); | |
2769 | ||
2770 | ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */ | |
2771 | ap->ops->bmdma_setup(qc); /* set up bmdma */ | |
2772 | ap->ops->bmdma_start(qc); /* initiate bmdma */ | |
2773 | ap->hsm_task_state = HSM_ST_LAST; | |
2774 | break; | |
2775 | ||
2776 | case ATAPI_PROT_DMA: | |
2777 | WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING); | |
2778 | ||
2779 | ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */ | |
2780 | ap->ops->bmdma_setup(qc); /* set up bmdma */ | |
2781 | ap->hsm_task_state = HSM_ST_FIRST; | |
2782 | ||
2783 | /* send cdb by polling if no cdb interrupt */ | |
2784 | if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) | |
ea3c6450 | 2785 | ata_sff_queue_pio_task(link, 0); |
360ff783 TH |
2786 | break; |
2787 | ||
2788 | default: | |
2789 | WARN_ON(1); | |
2790 | return AC_ERR_SYSTEM; | |
2791 | } | |
2792 | ||
2793 | return 0; | |
2794 | } | |
2795 | EXPORT_SYMBOL_GPL(ata_bmdma_qc_issue); | |
2796 | ||
c3b28894 TH |
2797 | /** |
2798 | * ata_bmdma_port_intr - Handle BMDMA port interrupt | |
2799 | * @ap: Port on which interrupt arrived (possibly...) | |
2800 | * @qc: Taskfile currently active in engine | |
2801 | * | |
2802 | * Handle port interrupt for given queued command. | |
2803 | * | |
2804 | * LOCKING: | |
2805 | * spin_lock_irqsave(host lock) | |
2806 | * | |
2807 | * RETURNS: | |
2808 | * One if interrupt was handled, zero if not (shared irq). | |
2809 | */ | |
2810 | unsigned int ata_bmdma_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc) | |
2811 | { | |
2812 | struct ata_eh_info *ehi = &ap->link.eh_info; | |
2813 | u8 host_stat = 0; | |
2814 | bool bmdma_stopped = false; | |
2815 | unsigned int handled; | |
2816 | ||
2817 | if (ap->hsm_task_state == HSM_ST_LAST && ata_is_dma(qc->tf.protocol)) { | |
2818 | /* check status of DMA engine */ | |
2819 | host_stat = ap->ops->bmdma_status(ap); | |
2820 | VPRINTK("ata%u: host_stat 0x%X\n", ap->print_id, host_stat); | |
2821 | ||
2822 | /* if it's not our irq... */ | |
2823 | if (!(host_stat & ATA_DMA_INTR)) | |
2824 | return ata_sff_idle_irq(ap); | |
2825 | ||
2826 | /* before we do anything else, clear DMA-Start bit */ | |
2827 | ap->ops->bmdma_stop(qc); | |
2828 | bmdma_stopped = true; | |
2829 | ||
2830 | if (unlikely(host_stat & ATA_DMA_ERR)) { | |
2831 | /* error when transfering data to/from memory */ | |
2832 | qc->err_mask |= AC_ERR_HOST_BUS; | |
2833 | ap->hsm_task_state = HSM_ST_ERR; | |
2834 | } | |
2835 | } | |
2836 | ||
2837 | handled = __ata_sff_port_intr(ap, qc, bmdma_stopped); | |
2838 | ||
2839 | if (unlikely(qc->err_mask) && ata_is_dma(qc->tf.protocol)) | |
2840 | ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat); | |
2841 | ||
2842 | return handled; | |
2843 | } | |
2844 | EXPORT_SYMBOL_GPL(ata_bmdma_port_intr); | |
2845 | ||
2846 | /** | |
2847 | * ata_bmdma_interrupt - Default BMDMA ATA host interrupt handler | |
2848 | * @irq: irq line (unused) | |
2849 | * @dev_instance: pointer to our ata_host information structure | |
2850 | * | |
2851 | * Default interrupt handler for PCI IDE devices. Calls | |
2852 | * ata_bmdma_port_intr() for each port that is not disabled. | |
2853 | * | |
2854 | * LOCKING: | |
2855 | * Obtains host lock during operation. | |
2856 | * | |
2857 | * RETURNS: | |
2858 | * IRQ_NONE or IRQ_HANDLED. | |
2859 | */ | |
2860 | irqreturn_t ata_bmdma_interrupt(int irq, void *dev_instance) | |
2861 | { | |
2862 | return __ata_sff_interrupt(irq, dev_instance, ata_bmdma_port_intr); | |
2863 | } | |
2864 | EXPORT_SYMBOL_GPL(ata_bmdma_interrupt); | |
2865 | ||
fe06e5f9 TH |
2866 | /** |
2867 | * ata_bmdma_error_handler - Stock error handler for BMDMA controller | |
2868 | * @ap: port to handle error for | |
2869 | * | |
2870 | * Stock error handler for BMDMA controller. It can handle both | |
2871 | * PATA and SATA controllers. Most BMDMA controllers should be | |
2872 | * able to use this EH as-is or with some added handling before | |
2873 | * and after. | |
2874 | * | |
2875 | * LOCKING: | |
2876 | * Kernel thread context (may sleep) | |
2877 | */ | |
2878 | void ata_bmdma_error_handler(struct ata_port *ap) | |
2879 | { | |
2880 | struct ata_queued_cmd *qc; | |
2881 | unsigned long flags; | |
2882 | bool thaw = false; | |
2883 | ||
2884 | qc = __ata_qc_from_tag(ap, ap->link.active_tag); | |
2885 | if (qc && !(qc->flags & ATA_QCFLAG_FAILED)) | |
2886 | qc = NULL; | |
2887 | ||
2888 | /* reset PIO HSM and stop DMA engine */ | |
2889 | spin_lock_irqsave(ap->lock, flags); | |
2890 | ||
2891 | if (qc && ata_is_dma(qc->tf.protocol)) { | |
2892 | u8 host_stat; | |
2893 | ||
2894 | host_stat = ap->ops->bmdma_status(ap); | |
2895 | ||
2896 | /* BMDMA controllers indicate host bus error by | |
2897 | * setting DMA_ERR bit and timing out. As it wasn't | |
2898 | * really a timeout event, adjust error mask and | |
2899 | * cancel frozen state. | |
2900 | */ | |
2901 | if (qc->err_mask == AC_ERR_TIMEOUT && (host_stat & ATA_DMA_ERR)) { | |
2902 | qc->err_mask = AC_ERR_HOST_BUS; | |
2903 | thaw = true; | |
2904 | } | |
2905 | ||
2906 | ap->ops->bmdma_stop(qc); | |
2907 | ||
2908 | /* if we're gonna thaw, make sure IRQ is clear */ | |
2909 | if (thaw) { | |
2910 | ap->ops->sff_check_status(ap); | |
37f65b8b TH |
2911 | if (ap->ops->sff_irq_clear) |
2912 | ap->ops->sff_irq_clear(ap); | |
fe06e5f9 TH |
2913 | } |
2914 | } | |
2915 | ||
2916 | spin_unlock_irqrestore(ap->lock, flags); | |
2917 | ||
2918 | if (thaw) | |
2919 | ata_eh_thaw_port(ap); | |
2920 | ||
2921 | ata_sff_error_handler(ap); | |
2922 | } | |
2923 | EXPORT_SYMBOL_GPL(ata_bmdma_error_handler); | |
2924 | ||
2925 | /** | |
2926 | * ata_bmdma_post_internal_cmd - Stock post_internal_cmd for BMDMA | |
2927 | * @qc: internal command to clean up | |
2928 | * | |
2929 | * LOCKING: | |
2930 | * Kernel thread context (may sleep) | |
2931 | */ | |
2932 | void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc) | |
2933 | { | |
2934 | struct ata_port *ap = qc->ap; | |
2935 | unsigned long flags; | |
2936 | ||
2937 | if (ata_is_dma(qc->tf.protocol)) { | |
2938 | spin_lock_irqsave(ap->lock, flags); | |
2939 | ap->ops->bmdma_stop(qc); | |
2940 | spin_unlock_irqrestore(ap->lock, flags); | |
2941 | } | |
2942 | } | |
2943 | EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd); | |
2944 | ||
37f65b8b TH |
2945 | /** |
2946 | * ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt. | |
2947 | * @ap: Port associated with this ATA transaction. | |
2948 | * | |
2949 | * Clear interrupt and error flags in DMA status register. | |
2950 | * | |
2951 | * May be used as the irq_clear() entry in ata_port_operations. | |
2952 | * | |
2953 | * LOCKING: | |
2954 | * spin_lock_irqsave(host lock) | |
2955 | */ | |
2956 | void ata_bmdma_irq_clear(struct ata_port *ap) | |
2957 | { | |
2958 | void __iomem *mmio = ap->ioaddr.bmdma_addr; | |
2959 | ||
2960 | if (!mmio) | |
2961 | return; | |
2962 | ||
2963 | iowrite8(ioread8(mmio + ATA_DMA_STATUS), mmio + ATA_DMA_STATUS); | |
2964 | } | |
2965 | EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear); | |
2966 | ||
9f2f7210 TH |
2967 | /** |
2968 | * ata_bmdma_setup - Set up PCI IDE BMDMA transaction | |
2969 | * @qc: Info associated with this ATA transaction. | |
2970 | * | |
2971 | * LOCKING: | |
2972 | * spin_lock_irqsave(host lock) | |
2973 | */ | |
2974 | void ata_bmdma_setup(struct ata_queued_cmd *qc) | |
2975 | { | |
2976 | struct ata_port *ap = qc->ap; | |
2977 | unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); | |
2978 | u8 dmactl; | |
2979 | ||
2980 | /* load PRD table addr. */ | |
2981 | mb(); /* make sure PRD table writes are visible to controller */ | |
f60d7011 | 2982 | iowrite32(ap->bmdma_prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS); |
9f2f7210 TH |
2983 | |
2984 | /* specify data direction, triple-check start bit is clear */ | |
2985 | dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); | |
2986 | dmactl &= ~(ATA_DMA_WR | ATA_DMA_START); | |
2987 | if (!rw) | |
2988 | dmactl |= ATA_DMA_WR; | |
2989 | iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD); | |
2990 | ||
2991 | /* issue r/w command */ | |
2992 | ap->ops->sff_exec_command(ap, &qc->tf); | |
2993 | } | |
2994 | EXPORT_SYMBOL_GPL(ata_bmdma_setup); | |
2995 | ||
2996 | /** | |
2997 | * ata_bmdma_start - Start a PCI IDE BMDMA transaction | |
2998 | * @qc: Info associated with this ATA transaction. | |
2999 | * | |
3000 | * LOCKING: | |
3001 | * spin_lock_irqsave(host lock) | |
3002 | */ | |
3003 | void ata_bmdma_start(struct ata_queued_cmd *qc) | |
3004 | { | |
3005 | struct ata_port *ap = qc->ap; | |
3006 | u8 dmactl; | |
3007 | ||
3008 | /* start host DMA transaction */ | |
3009 | dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); | |
3010 | iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD); | |
3011 | ||
3012 | /* Strictly, one may wish to issue an ioread8() here, to | |
3013 | * flush the mmio write. However, control also passes | |
3014 | * to the hardware at this point, and it will interrupt | |
3015 | * us when we are to resume control. So, in effect, | |
3016 | * we don't care when the mmio write flushes. | |
3017 | * Further, a read of the DMA status register _immediately_ | |
3018 | * following the write may not be what certain flaky hardware | |
3019 | * is expected, so I think it is best to not add a readb() | |
3020 | * without first all the MMIO ATA cards/mobos. | |
3021 | * Or maybe I'm just being paranoid. | |
3022 | * | |
3023 | * FIXME: The posting of this write means I/O starts are | |
3024 | * unneccessarily delayed for MMIO | |
3025 | */ | |
3026 | } | |
3027 | EXPORT_SYMBOL_GPL(ata_bmdma_start); | |
3028 | ||
3029 | /** | |
3030 | * ata_bmdma_stop - Stop PCI IDE BMDMA transfer | |
3031 | * @qc: Command we are ending DMA for | |
3032 | * | |
3033 | * Clears the ATA_DMA_START flag in the dma control register | |
3034 | * | |
3035 | * May be used as the bmdma_stop() entry in ata_port_operations. | |
3036 | * | |
3037 | * LOCKING: | |
3038 | * spin_lock_irqsave(host lock) | |
3039 | */ | |
3040 | void ata_bmdma_stop(struct ata_queued_cmd *qc) | |
3041 | { | |
3042 | struct ata_port *ap = qc->ap; | |
3043 | void __iomem *mmio = ap->ioaddr.bmdma_addr; | |
3044 | ||
3045 | /* clear start/stop bit */ | |
3046 | iowrite8(ioread8(mmio + ATA_DMA_CMD) & ~ATA_DMA_START, | |
3047 | mmio + ATA_DMA_CMD); | |
3048 | ||
3049 | /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */ | |
3050 | ata_sff_dma_pause(ap); | |
3051 | } | |
3052 | EXPORT_SYMBOL_GPL(ata_bmdma_stop); | |
3053 | ||
3054 | /** | |
3055 | * ata_bmdma_status - Read PCI IDE BMDMA status | |
3056 | * @ap: Port associated with this ATA transaction. | |
3057 | * | |
3058 | * Read and return BMDMA status register. | |
3059 | * | |
3060 | * May be used as the bmdma_status() entry in ata_port_operations. | |
3061 | * | |
3062 | * LOCKING: | |
3063 | * spin_lock_irqsave(host lock) | |
3064 | */ | |
3065 | u8 ata_bmdma_status(struct ata_port *ap) | |
3066 | { | |
3067 | return ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); | |
3068 | } | |
3069 | EXPORT_SYMBOL_GPL(ata_bmdma_status); | |
3070 | ||
c7087652 TH |
3071 | |
3072 | /** | |
3073 | * ata_bmdma_port_start - Set port up for bmdma. | |
3074 | * @ap: Port to initialize | |
3075 | * | |
3076 | * Called just after data structures for each port are | |
3077 | * initialized. Allocates space for PRD table. | |
3078 | * | |
3079 | * May be used as the port_start() entry in ata_port_operations. | |
3080 | * | |
3081 | * LOCKING: | |
3082 | * Inherited from caller. | |
3083 | */ | |
3084 | int ata_bmdma_port_start(struct ata_port *ap) | |
3085 | { | |
3086 | if (ap->mwdma_mask || ap->udma_mask) { | |
f60d7011 TH |
3087 | ap->bmdma_prd = |
3088 | dmam_alloc_coherent(ap->host->dev, ATA_PRD_TBL_SZ, | |
3089 | &ap->bmdma_prd_dma, GFP_KERNEL); | |
3090 | if (!ap->bmdma_prd) | |
c7087652 TH |
3091 | return -ENOMEM; |
3092 | } | |
3093 | ||
3094 | return 0; | |
3095 | } | |
3096 | EXPORT_SYMBOL_GPL(ata_bmdma_port_start); | |
3097 | ||
3098 | /** | |
3099 | * ata_bmdma_port_start32 - Set port up for dma. | |
3100 | * @ap: Port to initialize | |
3101 | * | |
3102 | * Called just after data structures for each port are | |
3103 | * initialized. Enables 32bit PIO and allocates space for PRD | |
3104 | * table. | |
3105 | * | |
3106 | * May be used as the port_start() entry in ata_port_operations for | |
3107 | * devices that are capable of 32bit PIO. | |
3108 | * | |
3109 | * LOCKING: | |
3110 | * Inherited from caller. | |
3111 | */ | |
3112 | int ata_bmdma_port_start32(struct ata_port *ap) | |
3113 | { | |
3114 | ap->pflags |= ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE; | |
3115 | return ata_bmdma_port_start(ap); | |
3116 | } | |
3117 | EXPORT_SYMBOL_GPL(ata_bmdma_port_start32); | |
3118 | ||
9f2f7210 TH |
3119 | #ifdef CONFIG_PCI |
3120 | ||
3121 | /** | |
3122 | * ata_pci_bmdma_clear_simplex - attempt to kick device out of simplex | |
3123 | * @pdev: PCI device | |
3124 | * | |
3125 | * Some PCI ATA devices report simplex mode but in fact can be told to | |
3126 | * enter non simplex mode. This implements the necessary logic to | |
3127 | * perform the task on such devices. Calling it on other devices will | |
3128 | * have -undefined- behaviour. | |
3129 | */ | |
3130 | int ata_pci_bmdma_clear_simplex(struct pci_dev *pdev) | |
3131 | { | |
3132 | unsigned long bmdma = pci_resource_start(pdev, 4); | |
3133 | u8 simplex; | |
3134 | ||
3135 | if (bmdma == 0) | |
3136 | return -ENOENT; | |
3137 | ||
3138 | simplex = inb(bmdma + 0x02); | |
3139 | outb(simplex & 0x60, bmdma + 0x02); | |
3140 | simplex = inb(bmdma + 0x02); | |
3141 | if (simplex & 0x80) | |
3142 | return -EOPNOTSUPP; | |
3143 | return 0; | |
3144 | } | |
3145 | EXPORT_SYMBOL_GPL(ata_pci_bmdma_clear_simplex); | |
3146 | ||
c7087652 TH |
3147 | static void ata_bmdma_nodma(struct ata_host *host, const char *reason) |
3148 | { | |
3149 | int i; | |
3150 | ||
3151 | dev_printk(KERN_ERR, host->dev, "BMDMA: %s, falling back to PIO\n", | |
3152 | reason); | |
3153 | ||
3154 | for (i = 0; i < 2; i++) { | |
3155 | host->ports[i]->mwdma_mask = 0; | |
3156 | host->ports[i]->udma_mask = 0; | |
3157 | } | |
3158 | } | |
3159 | ||
9f2f7210 TH |
3160 | /** |
3161 | * ata_pci_bmdma_init - acquire PCI BMDMA resources and init ATA host | |
3162 | * @host: target ATA host | |
3163 | * | |
3164 | * Acquire PCI BMDMA resources and initialize @host accordingly. | |
3165 | * | |
3166 | * LOCKING: | |
3167 | * Inherited from calling layer (may sleep). | |
9f2f7210 | 3168 | */ |
c7087652 | 3169 | void ata_pci_bmdma_init(struct ata_host *host) |
9f2f7210 TH |
3170 | { |
3171 | struct device *gdev = host->dev; | |
3172 | struct pci_dev *pdev = to_pci_dev(gdev); | |
3173 | int i, rc; | |
3174 | ||
3175 | /* No BAR4 allocation: No DMA */ | |
c7087652 TH |
3176 | if (pci_resource_start(pdev, 4) == 0) { |
3177 | ata_bmdma_nodma(host, "BAR4 is zero"); | |
3178 | return; | |
3179 | } | |
9f2f7210 | 3180 | |
c7087652 TH |
3181 | /* |
3182 | * Some controllers require BMDMA region to be initialized | |
3183 | * even if DMA is not in use to clear IRQ status via | |
3184 | * ->sff_irq_clear method. Try to initialize bmdma_addr | |
3185 | * regardless of dma masks. | |
3186 | */ | |
9f2f7210 TH |
3187 | rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); |
3188 | if (rc) | |
c7087652 TH |
3189 | ata_bmdma_nodma(host, "failed to set dma mask"); |
3190 | if (!rc) { | |
3191 | rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); | |
3192 | if (rc) | |
3193 | ata_bmdma_nodma(host, | |
3194 | "failed to set consistent dma mask"); | |
3195 | } | |
9f2f7210 TH |
3196 | |
3197 | /* request and iomap DMA region */ | |
3198 | rc = pcim_iomap_regions(pdev, 1 << 4, dev_driver_string(gdev)); | |
3199 | if (rc) { | |
c7087652 TH |
3200 | ata_bmdma_nodma(host, "failed to request/iomap BAR4"); |
3201 | return; | |
9f2f7210 TH |
3202 | } |
3203 | host->iomap = pcim_iomap_table(pdev); | |
3204 | ||
3205 | for (i = 0; i < 2; i++) { | |
3206 | struct ata_port *ap = host->ports[i]; | |
3207 | void __iomem *bmdma = host->iomap[4] + 8 * i; | |
3208 | ||
3209 | if (ata_port_is_dummy(ap)) | |
3210 | continue; | |
3211 | ||
3212 | ap->ioaddr.bmdma_addr = bmdma; | |
3213 | if ((!(ap->flags & ATA_FLAG_IGN_SIMPLEX)) && | |
3214 | (ioread8(bmdma + 2) & 0x80)) | |
3215 | host->flags |= ATA_HOST_SIMPLEX; | |
3216 | ||
3217 | ata_port_desc(ap, "bmdma 0x%llx", | |
3218 | (unsigned long long)pci_resource_start(pdev, 4) + 8 * i); | |
3219 | } | |
9f2f7210 TH |
3220 | } |
3221 | EXPORT_SYMBOL_GPL(ata_pci_bmdma_init); | |
3222 | ||
1c5afdf7 TH |
3223 | /** |
3224 | * ata_pci_bmdma_prepare_host - helper to prepare PCI BMDMA ATA host | |
3225 | * @pdev: target PCI device | |
3226 | * @ppi: array of port_info, must be enough for two ports | |
3227 | * @r_host: out argument for the initialized ATA host | |
3228 | * | |
3229 | * Helper to allocate BMDMA ATA host for @pdev, acquire all PCI | |
3230 | * resources and initialize it accordingly in one go. | |
3231 | * | |
3232 | * LOCKING: | |
3233 | * Inherited from calling layer (may sleep). | |
3234 | * | |
3235 | * RETURNS: | |
3236 | * 0 on success, -errno otherwise. | |
3237 | */ | |
3238 | int ata_pci_bmdma_prepare_host(struct pci_dev *pdev, | |
3239 | const struct ata_port_info * const * ppi, | |
3240 | struct ata_host **r_host) | |
3241 | { | |
3242 | int rc; | |
3243 | ||
3244 | rc = ata_pci_sff_prepare_host(pdev, ppi, r_host); | |
3245 | if (rc) | |
3246 | return rc; | |
3247 | ||
3248 | ata_pci_bmdma_init(*r_host); | |
3249 | return 0; | |
3250 | } | |
3251 | EXPORT_SYMBOL_GPL(ata_pci_bmdma_prepare_host); | |
3252 | ||
3253 | /** | |
3254 | * ata_pci_bmdma_init_one - Initialize/register BMDMA PCI IDE controller | |
3255 | * @pdev: Controller to be initialized | |
3256 | * @ppi: array of port_info, must be enough for two ports | |
3257 | * @sht: scsi_host_template to use when registering the host | |
3258 | * @host_priv: host private_data | |
3259 | * @hflags: host flags | |
3260 | * | |
3261 | * This function is similar to ata_pci_sff_init_one() but also | |
3262 | * takes care of BMDMA initialization. | |
3263 | * | |
3264 | * LOCKING: | |
3265 | * Inherited from PCI layer (may sleep). | |
3266 | * | |
3267 | * RETURNS: | |
3268 | * Zero on success, negative on errno-based value on error. | |
3269 | */ | |
3270 | int ata_pci_bmdma_init_one(struct pci_dev *pdev, | |
3271 | const struct ata_port_info * const * ppi, | |
3272 | struct scsi_host_template *sht, void *host_priv, | |
3273 | int hflags) | |
3274 | { | |
3275 | struct device *dev = &pdev->dev; | |
3276 | const struct ata_port_info *pi; | |
3277 | struct ata_host *host = NULL; | |
3278 | int rc; | |
3279 | ||
3280 | DPRINTK("ENTER\n"); | |
3281 | ||
3282 | pi = ata_sff_find_valid_pi(ppi); | |
3283 | if (!pi) { | |
3284 | dev_printk(KERN_ERR, &pdev->dev, | |
3285 | "no valid port_info specified\n"); | |
3286 | return -EINVAL; | |
3287 | } | |
3288 | ||
3289 | if (!devres_open_group(dev, NULL, GFP_KERNEL)) | |
3290 | return -ENOMEM; | |
3291 | ||
3292 | rc = pcim_enable_device(pdev); | |
3293 | if (rc) | |
3294 | goto out; | |
3295 | ||
3296 | /* prepare and activate BMDMA host */ | |
3297 | rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host); | |
3298 | if (rc) | |
3299 | goto out; | |
3300 | host->private_data = host_priv; | |
3301 | host->flags |= hflags; | |
3302 | ||
3303 | pci_set_master(pdev); | |
3304 | rc = ata_pci_sff_activate_host(host, ata_bmdma_interrupt, sht); | |
3305 | out: | |
3306 | if (rc == 0) | |
3307 | devres_remove_group(&pdev->dev, NULL); | |
3308 | else | |
3309 | devres_release_group(&pdev->dev, NULL); | |
3310 | ||
3311 | return rc; | |
3312 | } | |
3313 | EXPORT_SYMBOL_GPL(ata_pci_bmdma_init_one); | |
3314 | ||
9f2f7210 | 3315 | #endif /* CONFIG_PCI */ |
9a7780c9 | 3316 | #endif /* CONFIG_ATA_BMDMA */ |
270390e1 TH |
3317 | |
3318 | /** | |
3319 | * ata_sff_port_init - Initialize SFF/BMDMA ATA port | |
3320 | * @ap: Port to initialize | |
3321 | * | |
3322 | * Called on port allocation to initialize SFF/BMDMA specific | |
3323 | * fields. | |
3324 | * | |
3325 | * LOCKING: | |
3326 | * None. | |
3327 | */ | |
3328 | void ata_sff_port_init(struct ata_port *ap) | |
3329 | { | |
c429137a | 3330 | INIT_DELAYED_WORK(&ap->sff_pio_task, ata_sff_pio_task); |
5fe7454a TH |
3331 | ap->ctl = ATA_DEVCTL_OBS; |
3332 | ap->last_ctl = 0xFF; | |
270390e1 TH |
3333 | } |
3334 | ||
3335 | int __init ata_sff_init(void) | |
3336 | { | |
6370a6ad | 3337 | ata_sff_wq = alloc_workqueue("ata_sff", WQ_MEM_RECLAIM, WQ_MAX_ACTIVE); |
c429137a TH |
3338 | if (!ata_sff_wq) |
3339 | return -ENOMEM; | |
3340 | ||
270390e1 TH |
3341 | return 0; |
3342 | } | |
3343 | ||
c43d559f | 3344 | void ata_sff_exit(void) |
270390e1 | 3345 | { |
c429137a | 3346 | destroy_workqueue(ata_sff_wq); |
270390e1 | 3347 | } |