am53c974: add new driver
[deliverable/linux.git] / drivers / scsi / esp_scsi.c
CommitLineData
cd9ad58d
DM
1/* esp_scsi.c: ESP SCSI driver.
2 *
3 * Copyright (C) 2007 David S. Miller (davem@davemloft.net)
4 */
5
6#include <linux/kernel.h>
7#include <linux/types.h>
8#include <linux/slab.h>
9#include <linux/delay.h>
10#include <linux/list.h>
11#include <linux/completion.h>
12#include <linux/kallsyms.h>
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15#include <linux/init.h>
e1f2a094 16#include <linux/irqreturn.h>
cd9ad58d
DM
17
18#include <asm/irq.h>
19#include <asm/io.h>
20#include <asm/dma.h>
21
22#include <scsi/scsi.h>
23#include <scsi/scsi_host.h>
24#include <scsi/scsi_cmnd.h>
25#include <scsi/scsi_device.h>
26#include <scsi/scsi_tcq.h>
27#include <scsi/scsi_dbg.h>
28#include <scsi/scsi_transport_spi.h>
29
30#include "esp_scsi.h"
31
32#define DRV_MODULE_NAME "esp"
33#define PFX DRV_MODULE_NAME ": "
34#define DRV_VERSION "2.000"
35#define DRV_MODULE_RELDATE "April 19, 2007"
36
37/* SCSI bus reset settle time in seconds. */
38static int esp_bus_reset_settle = 3;
39
40static u32 esp_debug;
41#define ESP_DEBUG_INTR 0x00000001
42#define ESP_DEBUG_SCSICMD 0x00000002
43#define ESP_DEBUG_RESET 0x00000004
44#define ESP_DEBUG_MSGIN 0x00000008
45#define ESP_DEBUG_MSGOUT 0x00000010
46#define ESP_DEBUG_CMDDONE 0x00000020
47#define ESP_DEBUG_DISCONNECT 0x00000040
48#define ESP_DEBUG_DATASTART 0x00000080
49#define ESP_DEBUG_DATADONE 0x00000100
50#define ESP_DEBUG_RECONNECT 0x00000200
51#define ESP_DEBUG_AUTOSENSE 0x00000400
1af6f603
HR
52#define ESP_DEBUG_EVENT 0x00000800
53#define ESP_DEBUG_COMMAND 0x00001000
cd9ad58d
DM
54
55#define esp_log_intr(f, a...) \
56do { if (esp_debug & ESP_DEBUG_INTR) \
a1a75b35 57 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
cd9ad58d
DM
58} while (0)
59
60#define esp_log_reset(f, a...) \
61do { if (esp_debug & ESP_DEBUG_RESET) \
a1a75b35 62 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
cd9ad58d
DM
63} while (0)
64
65#define esp_log_msgin(f, a...) \
66do { if (esp_debug & ESP_DEBUG_MSGIN) \
a1a75b35 67 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
cd9ad58d
DM
68} while (0)
69
70#define esp_log_msgout(f, a...) \
71do { if (esp_debug & ESP_DEBUG_MSGOUT) \
a1a75b35 72 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
cd9ad58d
DM
73} while (0)
74
75#define esp_log_cmddone(f, a...) \
76do { if (esp_debug & ESP_DEBUG_CMDDONE) \
a1a75b35 77 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
cd9ad58d
DM
78} while (0)
79
80#define esp_log_disconnect(f, a...) \
81do { if (esp_debug & ESP_DEBUG_DISCONNECT) \
a1a75b35 82 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
cd9ad58d
DM
83} while (0)
84
85#define esp_log_datastart(f, a...) \
86do { if (esp_debug & ESP_DEBUG_DATASTART) \
a1a75b35 87 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
cd9ad58d
DM
88} while (0)
89
90#define esp_log_datadone(f, a...) \
91do { if (esp_debug & ESP_DEBUG_DATADONE) \
a1a75b35 92 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
cd9ad58d
DM
93} while (0)
94
95#define esp_log_reconnect(f, a...) \
96do { if (esp_debug & ESP_DEBUG_RECONNECT) \
a1a75b35 97 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
cd9ad58d
DM
98} while (0)
99
100#define esp_log_autosense(f, a...) \
101do { if (esp_debug & ESP_DEBUG_AUTOSENSE) \
a1a75b35 102 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
cd9ad58d
DM
103} while (0)
104
1af6f603
HR
105#define esp_log_event(f, a...) \
106do { if (esp_debug & ESP_DEBUG_EVENT) \
107 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
108} while (0)
109
110#define esp_log_command(f, a...) \
111do { if (esp_debug & ESP_DEBUG_COMMAND) \
112 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
113} while (0)
114
cd9ad58d
DM
115#define esp_read8(REG) esp->ops->esp_read8(esp, REG)
116#define esp_write8(VAL,REG) esp->ops->esp_write8(esp, VAL, REG)
117
118static void esp_log_fill_regs(struct esp *esp,
119 struct esp_event_ent *p)
120{
121 p->sreg = esp->sreg;
122 p->seqreg = esp->seqreg;
123 p->sreg2 = esp->sreg2;
124 p->ireg = esp->ireg;
125 p->select_state = esp->select_state;
126 p->event = esp->event;
127}
128
129void scsi_esp_cmd(struct esp *esp, u8 val)
130{
131 struct esp_event_ent *p;
132 int idx = esp->esp_event_cur;
133
134 p = &esp->esp_event_log[idx];
135 p->type = ESP_EVENT_TYPE_CMD;
136 p->val = val;
137 esp_log_fill_regs(esp, p);
138
139 esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
140
1af6f603 141 esp_log_command("cmd[%02x]\n", val);
cd9ad58d
DM
142 esp_write8(val, ESP_CMD);
143}
144EXPORT_SYMBOL(scsi_esp_cmd);
145
3170866f
HR
146static void esp_send_dma_cmd(struct esp *esp, int len, int max_len, int cmd)
147{
148 if (esp->flags & ESP_FLAG_USE_FIFO) {
149 int i;
150
151 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
152 for (i = 0; i < len; i++)
153 esp_write8(esp->command_block[i], ESP_FDATA);
154 scsi_esp_cmd(esp, cmd);
155 } else {
156 if (esp->rev == FASHME)
157 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
158 cmd |= ESP_CMD_DMA;
159 esp->ops->send_dma_cmd(esp, esp->command_block_dma,
160 len, max_len, 0, cmd);
161 }
162}
163
cd9ad58d
DM
164static void esp_event(struct esp *esp, u8 val)
165{
166 struct esp_event_ent *p;
167 int idx = esp->esp_event_cur;
168
169 p = &esp->esp_event_log[idx];
170 p->type = ESP_EVENT_TYPE_EVENT;
171 p->val = val;
172 esp_log_fill_regs(esp, p);
173
174 esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
175
176 esp->event = val;
177}
178
179static void esp_dump_cmd_log(struct esp *esp)
180{
181 int idx = esp->esp_event_cur;
182 int stop = idx;
183
a1a75b35 184 shost_printk(KERN_INFO, esp->host, "Dumping command log\n");
cd9ad58d
DM
185 do {
186 struct esp_event_ent *p = &esp->esp_event_log[idx];
187
a1a75b35
HR
188 shost_printk(KERN_INFO, esp->host,
189 "ent[%d] %s val[%02x] sreg[%02x] seqreg[%02x] "
190 "sreg2[%02x] ireg[%02x] ss[%02x] event[%02x]\n",
191 idx,
192 p->type == ESP_EVENT_TYPE_CMD ? "CMD" : "EVENT",
193 p->val, p->sreg, p->seqreg,
194 p->sreg2, p->ireg, p->select_state, p->event);
cd9ad58d
DM
195
196 idx = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
197 } while (idx != stop);
198}
199
200static void esp_flush_fifo(struct esp *esp)
201{
202 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
203 if (esp->rev == ESP236) {
204 int lim = 1000;
205
206 while (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES) {
207 if (--lim == 0) {
a1a75b35
HR
208 shost_printk(KERN_ALERT, esp->host,
209 "ESP_FF_BYTES will not clear!\n");
cd9ad58d
DM
210 break;
211 }
212 udelay(1);
213 }
214 }
215}
216
217static void hme_read_fifo(struct esp *esp)
218{
219 int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
220 int idx = 0;
221
222 while (fcnt--) {
223 esp->fifo[idx++] = esp_read8(ESP_FDATA);
224 esp->fifo[idx++] = esp_read8(ESP_FDATA);
225 }
226 if (esp->sreg2 & ESP_STAT2_F1BYTE) {
227 esp_write8(0, ESP_FDATA);
228 esp->fifo[idx++] = esp_read8(ESP_FDATA);
229 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
230 }
231 esp->fifo_cnt = idx;
232}
233
234static void esp_set_all_config3(struct esp *esp, u8 val)
235{
236 int i;
237
238 for (i = 0; i < ESP_MAX_TARGET; i++)
239 esp->target[i].esp_config3 = val;
240}
241
242/* Reset the ESP chip, _not_ the SCSI bus. */
243static void esp_reset_esp(struct esp *esp)
244{
245 u8 family_code, version;
246
247 /* Now reset the ESP chip */
248 scsi_esp_cmd(esp, ESP_CMD_RC);
249 scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
a793804f
DM
250 if (esp->rev == FAST)
251 esp_write8(ESP_CONFIG2_FENAB, ESP_CFG2);
cd9ad58d
DM
252 scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
253
cd9ad58d
DM
254 /* This is the only point at which it is reliable to read
255 * the ID-code for a fast ESP chip variants.
256 */
257 esp->max_period = ((35 * esp->ccycle) / 1000);
258 if (esp->rev == FAST) {
259 version = esp_read8(ESP_UID);
260 family_code = (version & 0xf8) >> 3;
261 if (family_code == 0x02)
262 esp->rev = FAS236;
263 else if (family_code == 0x0a)
264 esp->rev = FASHME; /* Version is usually '5'. */
265 else
266 esp->rev = FAS100A;
267 esp->min_period = ((4 * esp->ccycle) / 1000);
268 } else {
269 esp->min_period = ((5 * esp->ccycle) / 1000);
270 }
271 esp->max_period = (esp->max_period + 3)>>2;
272 esp->min_period = (esp->min_period + 3)>>2;
273
274 esp_write8(esp->config1, ESP_CFG1);
275 switch (esp->rev) {
276 case ESP100:
277 /* nothing to do */
278 break;
279
280 case ESP100A:
281 esp_write8(esp->config2, ESP_CFG2);
282 break;
283
284 case ESP236:
285 /* Slow 236 */
286 esp_write8(esp->config2, ESP_CFG2);
287 esp->prev_cfg3 = esp->target[0].esp_config3;
288 esp_write8(esp->prev_cfg3, ESP_CFG3);
289 break;
290
291 case FASHME:
292 esp->config2 |= (ESP_CONFIG2_HME32 | ESP_CONFIG2_HMEFENAB);
293 /* fallthrough... */
294
295 case FAS236:
296 /* Fast 236 or HME */
297 esp_write8(esp->config2, ESP_CFG2);
298 if (esp->rev == FASHME) {
299 u8 cfg3 = esp->target[0].esp_config3;
300
301 cfg3 |= ESP_CONFIG3_FCLOCK | ESP_CONFIG3_OBPUSH;
302 if (esp->scsi_id >= 8)
303 cfg3 |= ESP_CONFIG3_IDBIT3;
304 esp_set_all_config3(esp, cfg3);
305 } else {
306 u32 cfg3 = esp->target[0].esp_config3;
307
308 cfg3 |= ESP_CONFIG3_FCLK;
309 esp_set_all_config3(esp, cfg3);
310 }
311 esp->prev_cfg3 = esp->target[0].esp_config3;
312 esp_write8(esp->prev_cfg3, ESP_CFG3);
313 if (esp->rev == FASHME) {
314 esp->radelay = 80;
315 } else {
316 if (esp->flags & ESP_FLAG_DIFFERENTIAL)
317 esp->radelay = 0;
318 else
319 esp->radelay = 96;
320 }
321 break;
322
323 case FAS100A:
324 /* Fast 100a */
325 esp_write8(esp->config2, ESP_CFG2);
326 esp_set_all_config3(esp,
327 (esp->target[0].esp_config3 |
328 ESP_CONFIG3_FCLOCK));
329 esp->prev_cfg3 = esp->target[0].esp_config3;
330 esp_write8(esp->prev_cfg3, ESP_CFG3);
331 esp->radelay = 32;
332 break;
333
334 default:
335 break;
336 }
337
a793804f
DM
338 /* Reload the configuration registers */
339 esp_write8(esp->cfact, ESP_CFACT);
340
341 esp->prev_stp = 0;
342 esp_write8(esp->prev_stp, ESP_STP);
343
344 esp->prev_soff = 0;
345 esp_write8(esp->prev_soff, ESP_SOFF);
346
347 esp_write8(esp->neg_defp, ESP_TIMEO);
348
cd9ad58d
DM
349 /* Eat any bitrot in the chip */
350 esp_read8(ESP_INTRPT);
351 udelay(100);
352}
353
354static void esp_map_dma(struct esp *esp, struct scsi_cmnd *cmd)
355{
356 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
4c2baaaf 357 struct scatterlist *sg = scsi_sglist(cmd);
cd9ad58d
DM
358 int dir = cmd->sc_data_direction;
359 int total, i;
360
361 if (dir == DMA_NONE)
362 return;
363
4c2baaaf 364 spriv->u.num_sg = esp->ops->map_sg(esp, sg, scsi_sg_count(cmd), dir);
cd9ad58d
DM
365 spriv->cur_residue = sg_dma_len(sg);
366 spriv->cur_sg = sg;
367
368 total = 0;
369 for (i = 0; i < spriv->u.num_sg; i++)
370 total += sg_dma_len(&sg[i]);
371 spriv->tot_residue = total;
372}
373
374static dma_addr_t esp_cur_dma_addr(struct esp_cmd_entry *ent,
375 struct scsi_cmnd *cmd)
376{
377 struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
378
379 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
380 return ent->sense_dma +
381 (ent->sense_ptr - cmd->sense_buffer);
382 }
383
384 return sg_dma_address(p->cur_sg) +
385 (sg_dma_len(p->cur_sg) -
386 p->cur_residue);
387}
388
389static unsigned int esp_cur_dma_len(struct esp_cmd_entry *ent,
390 struct scsi_cmnd *cmd)
391{
392 struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
393
394 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
395 return SCSI_SENSE_BUFFERSIZE -
396 (ent->sense_ptr - cmd->sense_buffer);
397 }
398 return p->cur_residue;
399}
400
401static void esp_advance_dma(struct esp *esp, struct esp_cmd_entry *ent,
402 struct scsi_cmnd *cmd, unsigned int len)
403{
404 struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
405
406 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
407 ent->sense_ptr += len;
408 return;
409 }
410
411 p->cur_residue -= len;
412 p->tot_residue -= len;
413 if (p->cur_residue < 0 || p->tot_residue < 0) {
a1a75b35
HR
414 shost_printk(KERN_ERR, esp->host,
415 "Data transfer overflow.\n");
416 shost_printk(KERN_ERR, esp->host,
417 "cur_residue[%d] tot_residue[%d] len[%u]\n",
418 p->cur_residue, p->tot_residue, len);
cd9ad58d
DM
419 p->cur_residue = 0;
420 p->tot_residue = 0;
421 }
422 if (!p->cur_residue && p->tot_residue) {
423 p->cur_sg++;
424 p->cur_residue = sg_dma_len(p->cur_sg);
425 }
426}
427
428static void esp_unmap_dma(struct esp *esp, struct scsi_cmnd *cmd)
429{
430 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
431 int dir = cmd->sc_data_direction;
432
433 if (dir == DMA_NONE)
434 return;
435
4c2baaaf 436 esp->ops->unmap_sg(esp, scsi_sglist(cmd), spriv->u.num_sg, dir);
cd9ad58d
DM
437}
438
439static void esp_save_pointers(struct esp *esp, struct esp_cmd_entry *ent)
440{
441 struct scsi_cmnd *cmd = ent->cmd;
442 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
443
444 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
445 ent->saved_sense_ptr = ent->sense_ptr;
446 return;
447 }
448 ent->saved_cur_residue = spriv->cur_residue;
449 ent->saved_cur_sg = spriv->cur_sg;
450 ent->saved_tot_residue = spriv->tot_residue;
451}
452
453static void esp_restore_pointers(struct esp *esp, struct esp_cmd_entry *ent)
454{
455 struct scsi_cmnd *cmd = ent->cmd;
456 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
457
458 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
459 ent->sense_ptr = ent->saved_sense_ptr;
460 return;
461 }
462 spriv->cur_residue = ent->saved_cur_residue;
463 spriv->cur_sg = ent->saved_cur_sg;
464 spriv->tot_residue = ent->saved_tot_residue;
465}
466
467static void esp_check_command_len(struct esp *esp, struct scsi_cmnd *cmd)
468{
469 if (cmd->cmd_len == 6 ||
470 cmd->cmd_len == 10 ||
471 cmd->cmd_len == 12) {
472 esp->flags &= ~ESP_FLAG_DOING_SLOWCMD;
473 } else {
474 esp->flags |= ESP_FLAG_DOING_SLOWCMD;
475 }
476}
477
478static void esp_write_tgt_config3(struct esp *esp, int tgt)
479{
480 if (esp->rev > ESP100A) {
481 u8 val = esp->target[tgt].esp_config3;
482
483 if (val != esp->prev_cfg3) {
484 esp->prev_cfg3 = val;
485 esp_write8(val, ESP_CFG3);
486 }
487 }
488}
489
490static void esp_write_tgt_sync(struct esp *esp, int tgt)
491{
492 u8 off = esp->target[tgt].esp_offset;
493 u8 per = esp->target[tgt].esp_period;
494
495 if (off != esp->prev_soff) {
496 esp->prev_soff = off;
497 esp_write8(off, ESP_SOFF);
498 }
499 if (per != esp->prev_stp) {
500 esp->prev_stp = per;
501 esp_write8(per, ESP_STP);
502 }
503}
504
505static u32 esp_dma_length_limit(struct esp *esp, u32 dma_addr, u32 dma_len)
506{
507 if (esp->rev == FASHME) {
508 /* Arbitrary segment boundaries, 24-bit counts. */
509 if (dma_len > (1U << 24))
510 dma_len = (1U << 24);
511 } else {
512 u32 base, end;
513
514 /* ESP chip limits other variants by 16-bits of transfer
515 * count. Actually on FAS100A and FAS236 we could get
516 * 24-bits of transfer count by enabling ESP_CONFIG2_FENAB
517 * in the ESP_CFG2 register but that causes other unwanted
518 * changes so we don't use it currently.
519 */
520 if (dma_len > (1U << 16))
521 dma_len = (1U << 16);
522
523 /* All of the DMA variants hooked up to these chips
524 * cannot handle crossing a 24-bit address boundary.
525 */
526 base = dma_addr & ((1U << 24) - 1U);
527 end = base + dma_len;
528 if (end > (1U << 24))
529 end = (1U <<24);
530 dma_len = end - base;
531 }
532 return dma_len;
533}
534
535static int esp_need_to_nego_wide(struct esp_target_data *tp)
536{
537 struct scsi_target *target = tp->starget;
538
539 return spi_width(target) != tp->nego_goal_width;
540}
541
542static int esp_need_to_nego_sync(struct esp_target_data *tp)
543{
544 struct scsi_target *target = tp->starget;
545
546 /* When offset is zero, period is "don't care". */
547 if (!spi_offset(target) && !tp->nego_goal_offset)
548 return 0;
549
550 if (spi_offset(target) == tp->nego_goal_offset &&
551 spi_period(target) == tp->nego_goal_period)
552 return 0;
553
554 return 1;
555}
556
557static int esp_alloc_lun_tag(struct esp_cmd_entry *ent,
558 struct esp_lun_data *lp)
559{
21af8107 560 if (!ent->orig_tag[0]) {
cd9ad58d
DM
561 /* Non-tagged, slot already taken? */
562 if (lp->non_tagged_cmd)
563 return -EBUSY;
564
565 if (lp->hold) {
566 /* We are being held by active tagged
567 * commands.
568 */
569 if (lp->num_tagged)
570 return -EBUSY;
571
572 /* Tagged commands completed, we can unplug
573 * the queue and run this untagged command.
574 */
575 lp->hold = 0;
576 } else if (lp->num_tagged) {
577 /* Plug the queue until num_tagged decreases
578 * to zero in esp_free_lun_tag.
579 */
580 lp->hold = 1;
581 return -EBUSY;
582 }
583
584 lp->non_tagged_cmd = ent;
585 return 0;
586 } else {
587 /* Tagged command, see if blocked by a
588 * non-tagged one.
589 */
590 if (lp->non_tagged_cmd || lp->hold)
591 return -EBUSY;
592 }
593
21af8107 594 BUG_ON(lp->tagged_cmds[ent->orig_tag[1]]);
cd9ad58d 595
21af8107 596 lp->tagged_cmds[ent->orig_tag[1]] = ent;
cd9ad58d
DM
597 lp->num_tagged++;
598
599 return 0;
600}
601
602static void esp_free_lun_tag(struct esp_cmd_entry *ent,
603 struct esp_lun_data *lp)
604{
21af8107
DM
605 if (ent->orig_tag[0]) {
606 BUG_ON(lp->tagged_cmds[ent->orig_tag[1]] != ent);
607 lp->tagged_cmds[ent->orig_tag[1]] = NULL;
cd9ad58d
DM
608 lp->num_tagged--;
609 } else {
610 BUG_ON(lp->non_tagged_cmd != ent);
611 lp->non_tagged_cmd = NULL;
612 }
613}
614
615/* When a contingent allegiance conditon is created, we force feed a
616 * REQUEST_SENSE command to the device to fetch the sense data. I
617 * tried many other schemes, relying on the scsi error handling layer
618 * to send out the REQUEST_SENSE automatically, but this was difficult
619 * to get right especially in the presence of applications like smartd
620 * which use SG_IO to send out their own REQUEST_SENSE commands.
621 */
622static void esp_autosense(struct esp *esp, struct esp_cmd_entry *ent)
623{
624 struct scsi_cmnd *cmd = ent->cmd;
625 struct scsi_device *dev = cmd->device;
626 int tgt, lun;
627 u8 *p, val;
628
629 tgt = dev->id;
630 lun = dev->lun;
631
632
633 if (!ent->sense_ptr) {
a1a75b35
HR
634 esp_log_autosense("Doing auto-sense for tgt[%d] lun[%d]\n",
635 tgt, lun);
cd9ad58d
DM
636
637 ent->sense_ptr = cmd->sense_buffer;
638 ent->sense_dma = esp->ops->map_single(esp,
639 ent->sense_ptr,
640 SCSI_SENSE_BUFFERSIZE,
641 DMA_FROM_DEVICE);
642 }
643 ent->saved_sense_ptr = ent->sense_ptr;
644
645 esp->active_cmd = ent;
646
647 p = esp->command_block;
648 esp->msg_out_len = 0;
649
650 *p++ = IDENTIFY(0, lun);
651 *p++ = REQUEST_SENSE;
652 *p++ = ((dev->scsi_level <= SCSI_2) ?
653 (lun << 5) : 0);
654 *p++ = 0;
655 *p++ = 0;
656 *p++ = SCSI_SENSE_BUFFERSIZE;
657 *p++ = 0;
658
659 esp->select_state = ESP_SELECT_BASIC;
660
661 val = tgt;
662 if (esp->rev == FASHME)
663 val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT;
664 esp_write8(val, ESP_BUSID);
665
666 esp_write_tgt_sync(esp, tgt);
667 esp_write_tgt_config3(esp, tgt);
668
669 val = (p - esp->command_block);
670
3170866f 671 esp_send_dma_cmd(esp, val, 16, ESP_CMD_SELA);
cd9ad58d
DM
672}
673
674static struct esp_cmd_entry *find_and_prep_issuable_command(struct esp *esp)
675{
676 struct esp_cmd_entry *ent;
677
678 list_for_each_entry(ent, &esp->queued_cmds, list) {
679 struct scsi_cmnd *cmd = ent->cmd;
680 struct scsi_device *dev = cmd->device;
681 struct esp_lun_data *lp = dev->hostdata;
682
683 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
684 ent->tag[0] = 0;
685 ent->tag[1] = 0;
686 return ent;
687 }
688
50668633 689 if (!spi_populate_tag_msg(&ent->tag[0], cmd)) {
cd9ad58d
DM
690 ent->tag[0] = 0;
691 ent->tag[1] = 0;
692 }
21af8107
DM
693 ent->orig_tag[0] = ent->tag[0];
694 ent->orig_tag[1] = ent->tag[1];
cd9ad58d
DM
695
696 if (esp_alloc_lun_tag(ent, lp) < 0)
697 continue;
698
699 return ent;
700 }
701
702 return NULL;
703}
704
705static void esp_maybe_execute_command(struct esp *esp)
706{
707 struct esp_target_data *tp;
708 struct esp_lun_data *lp;
709 struct scsi_device *dev;
710 struct scsi_cmnd *cmd;
711 struct esp_cmd_entry *ent;
712 int tgt, lun, i;
713 u32 val, start_cmd;
714 u8 *p;
715
716 if (esp->active_cmd ||
717 (esp->flags & ESP_FLAG_RESETTING))
718 return;
719
720 ent = find_and_prep_issuable_command(esp);
721 if (!ent)
722 return;
723
724 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
725 esp_autosense(esp, ent);
726 return;
727 }
728
729 cmd = ent->cmd;
730 dev = cmd->device;
731 tgt = dev->id;
732 lun = dev->lun;
733 tp = &esp->target[tgt];
734 lp = dev->hostdata;
735
63ce2499 736 list_move(&ent->list, &esp->active_cmds);
cd9ad58d
DM
737
738 esp->active_cmd = ent;
739
740 esp_map_dma(esp, cmd);
741 esp_save_pointers(esp, ent);
742
743 esp_check_command_len(esp, cmd);
744
745 p = esp->command_block;
746
747 esp->msg_out_len = 0;
748 if (tp->flags & ESP_TGT_CHECK_NEGO) {
749 /* Need to negotiate. If the target is broken
750 * go for synchronous transfers and non-wide.
751 */
752 if (tp->flags & ESP_TGT_BROKEN) {
753 tp->flags &= ~ESP_TGT_DISCONNECT;
754 tp->nego_goal_period = 0;
755 tp->nego_goal_offset = 0;
756 tp->nego_goal_width = 0;
757 tp->nego_goal_tags = 0;
758 }
759
760 /* If the settings are not changing, skip this. */
761 if (spi_width(tp->starget) == tp->nego_goal_width &&
762 spi_period(tp->starget) == tp->nego_goal_period &&
763 spi_offset(tp->starget) == tp->nego_goal_offset) {
764 tp->flags &= ~ESP_TGT_CHECK_NEGO;
765 goto build_identify;
766 }
767
768 if (esp->rev == FASHME && esp_need_to_nego_wide(tp)) {
769 esp->msg_out_len =
770 spi_populate_width_msg(&esp->msg_out[0],
771 (tp->nego_goal_width ?
772 1 : 0));
773 tp->flags |= ESP_TGT_NEGO_WIDE;
774 } else if (esp_need_to_nego_sync(tp)) {
775 esp->msg_out_len =
776 spi_populate_sync_msg(&esp->msg_out[0],
777 tp->nego_goal_period,
778 tp->nego_goal_offset);
779 tp->flags |= ESP_TGT_NEGO_SYNC;
780 } else {
781 tp->flags &= ~ESP_TGT_CHECK_NEGO;
782 }
783
784 /* Process it like a slow command. */
785 if (tp->flags & (ESP_TGT_NEGO_WIDE | ESP_TGT_NEGO_SYNC))
786 esp->flags |= ESP_FLAG_DOING_SLOWCMD;
787 }
788
789build_identify:
790 /* If we don't have a lun-data struct yet, we're probing
791 * so do not disconnect. Also, do not disconnect unless
792 * we have a tag on this command.
793 */
794 if (lp && (tp->flags & ESP_TGT_DISCONNECT) && ent->tag[0])
795 *p++ = IDENTIFY(1, lun);
796 else
797 *p++ = IDENTIFY(0, lun);
798
799 if (ent->tag[0] && esp->rev == ESP100) {
800 /* ESP100 lacks select w/atn3 command, use select
801 * and stop instead.
802 */
803 esp->flags |= ESP_FLAG_DOING_SLOWCMD;
804 }
805
806 if (!(esp->flags & ESP_FLAG_DOING_SLOWCMD)) {
3170866f 807 start_cmd = ESP_CMD_SELA;
cd9ad58d
DM
808 if (ent->tag[0]) {
809 *p++ = ent->tag[0];
810 *p++ = ent->tag[1];
811
3170866f 812 start_cmd = ESP_CMD_SA3;
cd9ad58d
DM
813 }
814
815 for (i = 0; i < cmd->cmd_len; i++)
816 *p++ = cmd->cmnd[i];
817
818 esp->select_state = ESP_SELECT_BASIC;
819 } else {
820 esp->cmd_bytes_left = cmd->cmd_len;
821 esp->cmd_bytes_ptr = &cmd->cmnd[0];
822
823 if (ent->tag[0]) {
824 for (i = esp->msg_out_len - 1;
825 i >= 0; i--)
826 esp->msg_out[i + 2] = esp->msg_out[i];
827 esp->msg_out[0] = ent->tag[0];
828 esp->msg_out[1] = ent->tag[1];
829 esp->msg_out_len += 2;
830 }
831
3170866f 832 start_cmd = ESP_CMD_SELAS;
cd9ad58d
DM
833 esp->select_state = ESP_SELECT_MSGOUT;
834 }
835 val = tgt;
836 if (esp->rev == FASHME)
837 val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT;
838 esp_write8(val, ESP_BUSID);
839
840 esp_write_tgt_sync(esp, tgt);
841 esp_write_tgt_config3(esp, tgt);
842
843 val = (p - esp->command_block);
844
845 if (esp_debug & ESP_DEBUG_SCSICMD) {
846 printk("ESP: tgt[%d] lun[%d] scsi_cmd [ ", tgt, lun);
847 for (i = 0; i < cmd->cmd_len; i++)
848 printk("%02x ", cmd->cmnd[i]);
849 printk("]\n");
850 }
851
3170866f 852 esp_send_dma_cmd(esp, val, 16, start_cmd);
cd9ad58d
DM
853}
854
855static struct esp_cmd_entry *esp_get_ent(struct esp *esp)
856{
857 struct list_head *head = &esp->esp_cmd_pool;
858 struct esp_cmd_entry *ret;
859
860 if (list_empty(head)) {
861 ret = kzalloc(sizeof(struct esp_cmd_entry), GFP_ATOMIC);
862 } else {
863 ret = list_entry(head->next, struct esp_cmd_entry, list);
864 list_del(&ret->list);
865 memset(ret, 0, sizeof(*ret));
866 }
867 return ret;
868}
869
870static void esp_put_ent(struct esp *esp, struct esp_cmd_entry *ent)
871{
872 list_add(&ent->list, &esp->esp_cmd_pool);
873}
874
875static void esp_cmd_is_done(struct esp *esp, struct esp_cmd_entry *ent,
876 struct scsi_cmnd *cmd, unsigned int result)
877{
878 struct scsi_device *dev = cmd->device;
879 int tgt = dev->id;
880 int lun = dev->lun;
881
882 esp->active_cmd = NULL;
883 esp_unmap_dma(esp, cmd);
884 esp_free_lun_tag(ent, dev->hostdata);
885 cmd->result = result;
886
887 if (ent->eh_done) {
888 complete(ent->eh_done);
889 ent->eh_done = NULL;
890 }
891
892 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
893 esp->ops->unmap_single(esp, ent->sense_dma,
894 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
895 ent->sense_ptr = NULL;
896
897 /* Restore the message/status bytes to what we actually
898 * saw originally. Also, report that we are providing
899 * the sense data.
900 */
901 cmd->result = ((DRIVER_SENSE << 24) |
902 (DID_OK << 16) |
903 (COMMAND_COMPLETE << 8) |
904 (SAM_STAT_CHECK_CONDITION << 0));
905
906 ent->flags &= ~ESP_CMD_FLAG_AUTOSENSE;
907 if (esp_debug & ESP_DEBUG_AUTOSENSE) {
908 int i;
909
910 printk("esp%d: tgt[%d] lun[%d] AUTO SENSE[ ",
911 esp->host->unique_id, tgt, lun);
912 for (i = 0; i < 18; i++)
913 printk("%02x ", cmd->sense_buffer[i]);
914 printk("]\n");
915 }
916 }
917
918 cmd->scsi_done(cmd);
919
920 list_del(&ent->list);
921 esp_put_ent(esp, ent);
922
923 esp_maybe_execute_command(esp);
924}
925
926static unsigned int compose_result(unsigned int status, unsigned int message,
927 unsigned int driver_code)
928{
929 return (status | (message << 8) | (driver_code << 16));
930}
931
932static void esp_event_queue_full(struct esp *esp, struct esp_cmd_entry *ent)
933{
934 struct scsi_device *dev = ent->cmd->device;
935 struct esp_lun_data *lp = dev->hostdata;
936
937 scsi_track_queue_full(dev, lp->num_tagged - 1);
938}
939
f281233d 940static int esp_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
cd9ad58d
DM
941{
942 struct scsi_device *dev = cmd->device;
2b14ec78 943 struct esp *esp = shost_priv(dev->host);
cd9ad58d
DM
944 struct esp_cmd_priv *spriv;
945 struct esp_cmd_entry *ent;
946
947 ent = esp_get_ent(esp);
948 if (!ent)
949 return SCSI_MLQUEUE_HOST_BUSY;
950
951 ent->cmd = cmd;
952
953 cmd->scsi_done = done;
954
955 spriv = ESP_CMD_PRIV(cmd);
956 spriv->u.dma_addr = ~(dma_addr_t)0x0;
957
958 list_add_tail(&ent->list, &esp->queued_cmds);
959
960 esp_maybe_execute_command(esp);
961
962 return 0;
963}
964
f281233d
JG
965static DEF_SCSI_QCMD(esp_queuecommand)
966
cd9ad58d
DM
967static int esp_check_gross_error(struct esp *esp)
968{
969 if (esp->sreg & ESP_STAT_SPAM) {
970 /* Gross Error, could be one of:
971 * - top of fifo overwritten
972 * - top of command register overwritten
973 * - DMA programmed with wrong direction
974 * - improper phase change
975 */
a1a75b35
HR
976 shost_printk(KERN_ERR, esp->host,
977 "Gross error sreg[%02x]\n", esp->sreg);
cd9ad58d
DM
978 /* XXX Reset the chip. XXX */
979 return 1;
980 }
981 return 0;
982}
983
984static int esp_check_spur_intr(struct esp *esp)
985{
986 switch (esp->rev) {
987 case ESP100:
988 case ESP100A:
989 /* The interrupt pending bit of the status register cannot
990 * be trusted on these revisions.
991 */
992 esp->sreg &= ~ESP_STAT_INTR;
993 break;
994
995 default:
996 if (!(esp->sreg & ESP_STAT_INTR)) {
cd9ad58d
DM
997 if (esp->ireg & ESP_INTR_SR)
998 return 1;
999
1000 /* If the DMA is indicating interrupt pending and the
1001 * ESP is not, the only possibility is a DMA error.
1002 */
1003 if (!esp->ops->dma_error(esp)) {
a1a75b35
HR
1004 shost_printk(KERN_ERR, esp->host,
1005 "Spurious irq, sreg=%02x.\n",
1006 esp->sreg);
cd9ad58d
DM
1007 return -1;
1008 }
1009
a1a75b35 1010 shost_printk(KERN_ERR, esp->host, "DMA error\n");
cd9ad58d
DM
1011
1012 /* XXX Reset the chip. XXX */
1013 return -1;
1014 }
1015 break;
1016 }
1017
1018 return 0;
1019}
1020
1021static void esp_schedule_reset(struct esp *esp)
1022{
a1a75b35 1023 esp_log_reset("esp_schedule_reset() from %pf\n",
cd9ad58d
DM
1024 __builtin_return_address(0));
1025 esp->flags |= ESP_FLAG_RESETTING;
1026 esp_event(esp, ESP_EVENT_RESET);
1027}
1028
1029/* In order to avoid having to add a special half-reconnected state
1030 * into the driver we just sit here and poll through the rest of
1031 * the reselection process to get the tag message bytes.
1032 */
1033static struct esp_cmd_entry *esp_reconnect_with_tag(struct esp *esp,
1034 struct esp_lun_data *lp)
1035{
1036 struct esp_cmd_entry *ent;
1037 int i;
1038
1039 if (!lp->num_tagged) {
a1a75b35
HR
1040 shost_printk(KERN_ERR, esp->host,
1041 "Reconnect w/num_tagged==0\n");
cd9ad58d
DM
1042 return NULL;
1043 }
1044
a1a75b35 1045 esp_log_reconnect("reconnect tag, ");
cd9ad58d
DM
1046
1047 for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) {
1048 if (esp->ops->irq_pending(esp))
1049 break;
1050 }
1051 if (i == ESP_QUICKIRQ_LIMIT) {
a1a75b35
HR
1052 shost_printk(KERN_ERR, esp->host,
1053 "Reconnect IRQ1 timeout\n");
cd9ad58d
DM
1054 return NULL;
1055 }
1056
1057 esp->sreg = esp_read8(ESP_STATUS);
1058 esp->ireg = esp_read8(ESP_INTRPT);
1059
1060 esp_log_reconnect("IRQ(%d:%x:%x), ",
1061 i, esp->ireg, esp->sreg);
1062
1063 if (esp->ireg & ESP_INTR_DC) {
a1a75b35
HR
1064 shost_printk(KERN_ERR, esp->host,
1065 "Reconnect, got disconnect.\n");
cd9ad58d
DM
1066 return NULL;
1067 }
1068
1069 if ((esp->sreg & ESP_STAT_PMASK) != ESP_MIP) {
a1a75b35
HR
1070 shost_printk(KERN_ERR, esp->host,
1071 "Reconnect, not MIP sreg[%02x].\n", esp->sreg);
cd9ad58d
DM
1072 return NULL;
1073 }
1074
1075 /* DMA in the tag bytes... */
1076 esp->command_block[0] = 0xff;
1077 esp->command_block[1] = 0xff;
1078 esp->ops->send_dma_cmd(esp, esp->command_block_dma,
1079 2, 2, 1, ESP_CMD_DMA | ESP_CMD_TI);
1080
70f23fd6 1081 /* ACK the message. */
cd9ad58d
DM
1082 scsi_esp_cmd(esp, ESP_CMD_MOK);
1083
1084 for (i = 0; i < ESP_RESELECT_TAG_LIMIT; i++) {
1085 if (esp->ops->irq_pending(esp)) {
1086 esp->sreg = esp_read8(ESP_STATUS);
1087 esp->ireg = esp_read8(ESP_INTRPT);
1088 if (esp->ireg & ESP_INTR_FDONE)
1089 break;
1090 }
1091 udelay(1);
1092 }
1093 if (i == ESP_RESELECT_TAG_LIMIT) {
a1a75b35 1094 shost_printk(KERN_ERR, esp->host, "Reconnect IRQ2 timeout\n");
cd9ad58d
DM
1095 return NULL;
1096 }
1097 esp->ops->dma_drain(esp);
1098 esp->ops->dma_invalidate(esp);
1099
1100 esp_log_reconnect("IRQ2(%d:%x:%x) tag[%x:%x]\n",
1101 i, esp->ireg, esp->sreg,
1102 esp->command_block[0],
1103 esp->command_block[1]);
1104
1105 if (esp->command_block[0] < SIMPLE_QUEUE_TAG ||
1106 esp->command_block[0] > ORDERED_QUEUE_TAG) {
a1a75b35
HR
1107 shost_printk(KERN_ERR, esp->host,
1108 "Reconnect, bad tag type %02x.\n",
1109 esp->command_block[0]);
cd9ad58d
DM
1110 return NULL;
1111 }
1112
1113 ent = lp->tagged_cmds[esp->command_block[1]];
1114 if (!ent) {
a1a75b35
HR
1115 shost_printk(KERN_ERR, esp->host,
1116 "Reconnect, no entry for tag %02x.\n",
1117 esp->command_block[1]);
cd9ad58d
DM
1118 return NULL;
1119 }
1120
1121 return ent;
1122}
1123
1124static int esp_reconnect(struct esp *esp)
1125{
1126 struct esp_cmd_entry *ent;
1127 struct esp_target_data *tp;
1128 struct esp_lun_data *lp;
1129 struct scsi_device *dev;
1130 int target, lun;
1131
1132 BUG_ON(esp->active_cmd);
1133 if (esp->rev == FASHME) {
1134 /* FASHME puts the target and lun numbers directly
1135 * into the fifo.
1136 */
1137 target = esp->fifo[0];
1138 lun = esp->fifo[1] & 0x7;
1139 } else {
1140 u8 bits = esp_read8(ESP_FDATA);
1141
1142 /* Older chips put the lun directly into the fifo, but
1143 * the target is given as a sample of the arbitration
1144 * lines on the bus at reselection time. So we should
1145 * see the ID of the ESP and the one reconnecting target
1146 * set in the bitmap.
1147 */
1148 if (!(bits & esp->scsi_id_mask))
1149 goto do_reset;
1150 bits &= ~esp->scsi_id_mask;
1151 if (!bits || (bits & (bits - 1)))
1152 goto do_reset;
1153
1154 target = ffs(bits) - 1;
1155 lun = (esp_read8(ESP_FDATA) & 0x7);
1156
1157 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1158 if (esp->rev == ESP100) {
1159 u8 ireg = esp_read8(ESP_INTRPT);
1160 /* This chip has a bug during reselection that can
1161 * cause a spurious illegal-command interrupt, which
1162 * we simply ACK here. Another possibility is a bus
1163 * reset so we must check for that.
1164 */
1165 if (ireg & ESP_INTR_SR)
1166 goto do_reset;
1167 }
1168 scsi_esp_cmd(esp, ESP_CMD_NULL);
1169 }
1170
1171 esp_write_tgt_sync(esp, target);
1172 esp_write_tgt_config3(esp, target);
1173
1174 scsi_esp_cmd(esp, ESP_CMD_MOK);
1175
1176 if (esp->rev == FASHME)
1177 esp_write8(target | ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT,
1178 ESP_BUSID);
1179
1180 tp = &esp->target[target];
1181 dev = __scsi_device_lookup_by_target(tp->starget, lun);
1182 if (!dev) {
a1a75b35
HR
1183 shost_printk(KERN_ERR, esp->host,
1184 "Reconnect, no lp tgt[%u] lun[%u]\n",
1185 target, lun);
cd9ad58d
DM
1186 goto do_reset;
1187 }
1188 lp = dev->hostdata;
1189
1190 ent = lp->non_tagged_cmd;
1191 if (!ent) {
1192 ent = esp_reconnect_with_tag(esp, lp);
1193 if (!ent)
1194 goto do_reset;
1195 }
1196
1197 esp->active_cmd = ent;
1198
1199 if (ent->flags & ESP_CMD_FLAG_ABORT) {
1200 esp->msg_out[0] = ABORT_TASK_SET;
1201 esp->msg_out_len = 1;
1202 scsi_esp_cmd(esp, ESP_CMD_SATN);
1203 }
1204
1205 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1206 esp_restore_pointers(esp, ent);
1207 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1208 return 1;
1209
1210do_reset:
1211 esp_schedule_reset(esp);
1212 return 0;
1213}
1214
1215static int esp_finish_select(struct esp *esp)
1216{
1217 struct esp_cmd_entry *ent;
1218 struct scsi_cmnd *cmd;
1219 u8 orig_select_state;
1220
1221 orig_select_state = esp->select_state;
1222
1223 /* No longer selecting. */
1224 esp->select_state = ESP_SELECT_NONE;
1225
1226 esp->seqreg = esp_read8(ESP_SSTEP) & ESP_STEP_VBITS;
1227 ent = esp->active_cmd;
1228 cmd = ent->cmd;
1229
1230 if (esp->ops->dma_error(esp)) {
1231 /* If we see a DMA error during or as a result of selection,
1232 * all bets are off.
1233 */
1234 esp_schedule_reset(esp);
1235 esp_cmd_is_done(esp, ent, cmd, (DID_ERROR << 16));
1236 return 0;
1237 }
1238
1239 esp->ops->dma_invalidate(esp);
1240
1241 if (esp->ireg == (ESP_INTR_RSEL | ESP_INTR_FDONE)) {
1242 struct esp_target_data *tp = &esp->target[cmd->device->id];
1243
1244 /* Carefully back out of the selection attempt. Release
1245 * resources (such as DMA mapping & TAG) and reset state (such
1246 * as message out and command delivery variables).
1247 */
1248 if (!(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) {
1249 esp_unmap_dma(esp, cmd);
1250 esp_free_lun_tag(ent, cmd->device->hostdata);
1251 tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_NEGO_WIDE);
1252 esp->flags &= ~ESP_FLAG_DOING_SLOWCMD;
1253 esp->cmd_bytes_ptr = NULL;
1254 esp->cmd_bytes_left = 0;
1255 } else {
1256 esp->ops->unmap_single(esp, ent->sense_dma,
1257 SCSI_SENSE_BUFFERSIZE,
1258 DMA_FROM_DEVICE);
1259 ent->sense_ptr = NULL;
1260 }
1261
1262 /* Now that the state is unwound properly, put back onto
1263 * the issue queue. This command is no longer active.
1264 */
63ce2499 1265 list_move(&ent->list, &esp->queued_cmds);
cd9ad58d
DM
1266 esp->active_cmd = NULL;
1267
1268 /* Return value ignored by caller, it directly invokes
1269 * esp_reconnect().
1270 */
1271 return 0;
1272 }
1273
1274 if (esp->ireg == ESP_INTR_DC) {
1275 struct scsi_device *dev = cmd->device;
1276
1277 /* Disconnect. Make sure we re-negotiate sync and
1278 * wide parameters if this target starts responding
1279 * again in the future.
1280 */
1281 esp->target[dev->id].flags |= ESP_TGT_CHECK_NEGO;
1282
1283 scsi_esp_cmd(esp, ESP_CMD_ESEL);
1284 esp_cmd_is_done(esp, ent, cmd, (DID_BAD_TARGET << 16));
1285 return 1;
1286 }
1287
1288 if (esp->ireg == (ESP_INTR_FDONE | ESP_INTR_BSERV)) {
1289 /* Selection successful. On pre-FAST chips we have
1290 * to do a NOP and possibly clean out the FIFO.
1291 */
1292 if (esp->rev <= ESP236) {
1293 int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
1294
1295 scsi_esp_cmd(esp, ESP_CMD_NULL);
1296
1297 if (!fcnt &&
1298 (!esp->prev_soff ||
1299 ((esp->sreg & ESP_STAT_PMASK) != ESP_DIP)))
1300 esp_flush_fifo(esp);
1301 }
1302
1303 /* If we are doing a slow command, negotiation, etc.
1304 * we'll do the right thing as we transition to the
1305 * next phase.
1306 */
1307 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1308 return 0;
1309 }
1310
a1a75b35
HR
1311 shost_printk(KERN_INFO, esp->host,
1312 "Unexpected selection completion ireg[%x]\n", esp->ireg);
cd9ad58d
DM
1313 esp_schedule_reset(esp);
1314 return 0;
1315}
1316
1317static int esp_data_bytes_sent(struct esp *esp, struct esp_cmd_entry *ent,
1318 struct scsi_cmnd *cmd)
1319{
1320 int fifo_cnt, ecount, bytes_sent, flush_fifo;
1321
1322 fifo_cnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
1323 if (esp->prev_cfg3 & ESP_CONFIG3_EWIDE)
1324 fifo_cnt <<= 1;
1325
1326 ecount = 0;
1327 if (!(esp->sreg & ESP_STAT_TCNT)) {
1328 ecount = ((unsigned int)esp_read8(ESP_TCLOW) |
1329 (((unsigned int)esp_read8(ESP_TCMED)) << 8));
1330 if (esp->rev == FASHME)
1331 ecount |= ((unsigned int)esp_read8(FAS_RLO)) << 16;
1332 }
1333
1334 bytes_sent = esp->data_dma_len;
1335 bytes_sent -= ecount;
1336
1337 if (!(ent->flags & ESP_CMD_FLAG_WRITE))
1338 bytes_sent -= fifo_cnt;
1339
1340 flush_fifo = 0;
1341 if (!esp->prev_soff) {
1342 /* Synchronous data transfer, always flush fifo. */
1343 flush_fifo = 1;
1344 } else {
1345 if (esp->rev == ESP100) {
1346 u32 fflags, phase;
1347
1348 /* ESP100 has a chip bug where in the synchronous data
1349 * phase it can mistake a final long REQ pulse from the
1350 * target as an extra data byte. Fun.
1351 *
1352 * To detect this case we resample the status register
1353 * and fifo flags. If we're still in a data phase and
1354 * we see spurious chunks in the fifo, we return error
1355 * to the caller which should reset and set things up
1356 * such that we only try future transfers to this
1357 * target in synchronous mode.
1358 */
1359 esp->sreg = esp_read8(ESP_STATUS);
1360 phase = esp->sreg & ESP_STAT_PMASK;
1361 fflags = esp_read8(ESP_FFLAGS);
1362
1363 if ((phase == ESP_DOP &&
1364 (fflags & ESP_FF_ONOTZERO)) ||
1365 (phase == ESP_DIP &&
1366 (fflags & ESP_FF_FBYTES)))
1367 return -1;
1368 }
1369 if (!(ent->flags & ESP_CMD_FLAG_WRITE))
1370 flush_fifo = 1;
1371 }
1372
1373 if (flush_fifo)
1374 esp_flush_fifo(esp);
1375
1376 return bytes_sent;
1377}
1378
1379static void esp_setsync(struct esp *esp, struct esp_target_data *tp,
1380 u8 scsi_period, u8 scsi_offset,
1381 u8 esp_stp, u8 esp_soff)
1382{
1383 spi_period(tp->starget) = scsi_period;
1384 spi_offset(tp->starget) = scsi_offset;
1385 spi_width(tp->starget) = (tp->flags & ESP_TGT_WIDE) ? 1 : 0;
1386
1387 if (esp_soff) {
1388 esp_stp &= 0x1f;
1389 esp_soff |= esp->radelay;
1390 if (esp->rev >= FAS236) {
1391 u8 bit = ESP_CONFIG3_FSCSI;
1392 if (esp->rev >= FAS100A)
1393 bit = ESP_CONFIG3_FAST;
1394
1395 if (scsi_period < 50) {
1396 if (esp->rev == FASHME)
1397 esp_soff &= ~esp->radelay;
1398 tp->esp_config3 |= bit;
1399 } else {
1400 tp->esp_config3 &= ~bit;
1401 }
1402 esp->prev_cfg3 = tp->esp_config3;
1403 esp_write8(esp->prev_cfg3, ESP_CFG3);
1404 }
1405 }
1406
1407 tp->esp_period = esp->prev_stp = esp_stp;
1408 tp->esp_offset = esp->prev_soff = esp_soff;
1409
1410 esp_write8(esp_soff, ESP_SOFF);
1411 esp_write8(esp_stp, ESP_STP);
1412
1413 tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO);
1414
1415 spi_display_xfer_agreement(tp->starget);
1416}
1417
1418static void esp_msgin_reject(struct esp *esp)
1419{
1420 struct esp_cmd_entry *ent = esp->active_cmd;
1421 struct scsi_cmnd *cmd = ent->cmd;
1422 struct esp_target_data *tp;
1423 int tgt;
1424
1425 tgt = cmd->device->id;
1426 tp = &esp->target[tgt];
1427
1428 if (tp->flags & ESP_TGT_NEGO_WIDE) {
1429 tp->flags &= ~(ESP_TGT_NEGO_WIDE | ESP_TGT_WIDE);
1430
1431 if (!esp_need_to_nego_sync(tp)) {
1432 tp->flags &= ~ESP_TGT_CHECK_NEGO;
1433 scsi_esp_cmd(esp, ESP_CMD_RATN);
1434 } else {
1435 esp->msg_out_len =
1436 spi_populate_sync_msg(&esp->msg_out[0],
1437 tp->nego_goal_period,
1438 tp->nego_goal_offset);
1439 tp->flags |= ESP_TGT_NEGO_SYNC;
1440 scsi_esp_cmd(esp, ESP_CMD_SATN);
1441 }
1442 return;
1443 }
1444
1445 if (tp->flags & ESP_TGT_NEGO_SYNC) {
1446 tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO);
1447 tp->esp_period = 0;
1448 tp->esp_offset = 0;
1449 esp_setsync(esp, tp, 0, 0, 0, 0);
1450 scsi_esp_cmd(esp, ESP_CMD_RATN);
1451 return;
1452 }
1453
1454 esp->msg_out[0] = ABORT_TASK_SET;
1455 esp->msg_out_len = 1;
1456 scsi_esp_cmd(esp, ESP_CMD_SATN);
1457}
1458
1459static void esp_msgin_sdtr(struct esp *esp, struct esp_target_data *tp)
1460{
1461 u8 period = esp->msg_in[3];
1462 u8 offset = esp->msg_in[4];
1463 u8 stp;
1464
1465 if (!(tp->flags & ESP_TGT_NEGO_SYNC))
1466 goto do_reject;
1467
1468 if (offset > 15)
1469 goto do_reject;
1470
1471 if (offset) {
237abac6 1472 int one_clock;
cd9ad58d
DM
1473
1474 if (period > esp->max_period) {
1475 period = offset = 0;
1476 goto do_sdtr;
1477 }
1478 if (period < esp->min_period)
1479 goto do_reject;
1480
1481 one_clock = esp->ccycle / 1000;
237abac6 1482 stp = DIV_ROUND_UP(period << 2, one_clock);
cd9ad58d
DM
1483 if (stp && esp->rev >= FAS236) {
1484 if (stp >= 50)
1485 stp--;
1486 }
1487 } else {
1488 stp = 0;
1489 }
1490
1491 esp_setsync(esp, tp, period, offset, stp, offset);
1492 return;
1493
1494do_reject:
1495 esp->msg_out[0] = MESSAGE_REJECT;
1496 esp->msg_out_len = 1;
1497 scsi_esp_cmd(esp, ESP_CMD_SATN);
1498 return;
1499
1500do_sdtr:
1501 tp->nego_goal_period = period;
1502 tp->nego_goal_offset = offset;
1503 esp->msg_out_len =
1504 spi_populate_sync_msg(&esp->msg_out[0],
1505 tp->nego_goal_period,
1506 tp->nego_goal_offset);
1507 scsi_esp_cmd(esp, ESP_CMD_SATN);
1508}
1509
1510static void esp_msgin_wdtr(struct esp *esp, struct esp_target_data *tp)
1511{
1512 int size = 8 << esp->msg_in[3];
1513 u8 cfg3;
1514
1515 if (esp->rev != FASHME)
1516 goto do_reject;
1517
1518 if (size != 8 && size != 16)
1519 goto do_reject;
1520
1521 if (!(tp->flags & ESP_TGT_NEGO_WIDE))
1522 goto do_reject;
1523
1524 cfg3 = tp->esp_config3;
1525 if (size == 16) {
1526 tp->flags |= ESP_TGT_WIDE;
1527 cfg3 |= ESP_CONFIG3_EWIDE;
1528 } else {
1529 tp->flags &= ~ESP_TGT_WIDE;
1530 cfg3 &= ~ESP_CONFIG3_EWIDE;
1531 }
1532 tp->esp_config3 = cfg3;
1533 esp->prev_cfg3 = cfg3;
1534 esp_write8(cfg3, ESP_CFG3);
1535
1536 tp->flags &= ~ESP_TGT_NEGO_WIDE;
1537
1538 spi_period(tp->starget) = 0;
1539 spi_offset(tp->starget) = 0;
1540 if (!esp_need_to_nego_sync(tp)) {
1541 tp->flags &= ~ESP_TGT_CHECK_NEGO;
1542 scsi_esp_cmd(esp, ESP_CMD_RATN);
1543 } else {
1544 esp->msg_out_len =
1545 spi_populate_sync_msg(&esp->msg_out[0],
1546 tp->nego_goal_period,
1547 tp->nego_goal_offset);
1548 tp->flags |= ESP_TGT_NEGO_SYNC;
1549 scsi_esp_cmd(esp, ESP_CMD_SATN);
1550 }
1551 return;
1552
1553do_reject:
1554 esp->msg_out[0] = MESSAGE_REJECT;
1555 esp->msg_out_len = 1;
1556 scsi_esp_cmd(esp, ESP_CMD_SATN);
1557}
1558
1559static void esp_msgin_extended(struct esp *esp)
1560{
1561 struct esp_cmd_entry *ent = esp->active_cmd;
1562 struct scsi_cmnd *cmd = ent->cmd;
1563 struct esp_target_data *tp;
1564 int tgt = cmd->device->id;
1565
1566 tp = &esp->target[tgt];
1567 if (esp->msg_in[2] == EXTENDED_SDTR) {
1568 esp_msgin_sdtr(esp, tp);
1569 return;
1570 }
1571 if (esp->msg_in[2] == EXTENDED_WDTR) {
1572 esp_msgin_wdtr(esp, tp);
1573 return;
1574 }
1575
a1a75b35
HR
1576 shost_printk(KERN_INFO, esp->host,
1577 "Unexpected extended msg type %x\n", esp->msg_in[2]);
cd9ad58d
DM
1578
1579 esp->msg_out[0] = ABORT_TASK_SET;
1580 esp->msg_out_len = 1;
1581 scsi_esp_cmd(esp, ESP_CMD_SATN);
1582}
1583
1584/* Analyze msgin bytes received from target so far. Return non-zero
1585 * if there are more bytes needed to complete the message.
1586 */
1587static int esp_msgin_process(struct esp *esp)
1588{
1589 u8 msg0 = esp->msg_in[0];
1590 int len = esp->msg_in_len;
1591
1592 if (msg0 & 0x80) {
1593 /* Identify */
a1a75b35
HR
1594 shost_printk(KERN_INFO, esp->host,
1595 "Unexpected msgin identify\n");
cd9ad58d
DM
1596 return 0;
1597 }
1598
1599 switch (msg0) {
1600 case EXTENDED_MESSAGE:
1601 if (len == 1)
1602 return 1;
1603 if (len < esp->msg_in[1] + 2)
1604 return 1;
1605 esp_msgin_extended(esp);
1606 return 0;
1607
1608 case IGNORE_WIDE_RESIDUE: {
1609 struct esp_cmd_entry *ent;
1610 struct esp_cmd_priv *spriv;
1611 if (len == 1)
1612 return 1;
1613
1614 if (esp->msg_in[1] != 1)
1615 goto do_reject;
1616
1617 ent = esp->active_cmd;
1618 spriv = ESP_CMD_PRIV(ent->cmd);
1619
1620 if (spriv->cur_residue == sg_dma_len(spriv->cur_sg)) {
1621 spriv->cur_sg--;
1622 spriv->cur_residue = 1;
1623 } else
1624 spriv->cur_residue++;
1625 spriv->tot_residue++;
1626 return 0;
1627 }
1628 case NOP:
1629 return 0;
1630 case RESTORE_POINTERS:
1631 esp_restore_pointers(esp, esp->active_cmd);
1632 return 0;
1633 case SAVE_POINTERS:
1634 esp_save_pointers(esp, esp->active_cmd);
1635 return 0;
1636
1637 case COMMAND_COMPLETE:
1638 case DISCONNECT: {
1639 struct esp_cmd_entry *ent = esp->active_cmd;
1640
1641 ent->message = msg0;
1642 esp_event(esp, ESP_EVENT_FREE_BUS);
1643 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1644 return 0;
1645 }
1646 case MESSAGE_REJECT:
1647 esp_msgin_reject(esp);
1648 return 0;
1649
1650 default:
1651 do_reject:
1652 esp->msg_out[0] = MESSAGE_REJECT;
1653 esp->msg_out_len = 1;
1654 scsi_esp_cmd(esp, ESP_CMD_SATN);
1655 return 0;
1656 }
1657}
1658
1659static int esp_process_event(struct esp *esp)
1660{
3170866f 1661 int write, i;
cd9ad58d
DM
1662
1663again:
1664 write = 0;
1af6f603
HR
1665 esp_log_event("process event %d phase %x\n",
1666 esp->event, esp->sreg & ESP_STAT_PMASK);
cd9ad58d
DM
1667 switch (esp->event) {
1668 case ESP_EVENT_CHECK_PHASE:
1669 switch (esp->sreg & ESP_STAT_PMASK) {
1670 case ESP_DOP:
1671 esp_event(esp, ESP_EVENT_DATA_OUT);
1672 break;
1673 case ESP_DIP:
1674 esp_event(esp, ESP_EVENT_DATA_IN);
1675 break;
1676 case ESP_STATP:
1677 esp_flush_fifo(esp);
1678 scsi_esp_cmd(esp, ESP_CMD_ICCSEQ);
1679 esp_event(esp, ESP_EVENT_STATUS);
1680 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1681 return 1;
1682
1683 case ESP_MOP:
1684 esp_event(esp, ESP_EVENT_MSGOUT);
1685 break;
1686
1687 case ESP_MIP:
1688 esp_event(esp, ESP_EVENT_MSGIN);
1689 break;
1690
1691 case ESP_CMDP:
1692 esp_event(esp, ESP_EVENT_CMD_START);
1693 break;
1694
1695 default:
a1a75b35
HR
1696 shost_printk(KERN_INFO, esp->host,
1697 "Unexpected phase, sreg=%02x\n",
1698 esp->sreg);
cd9ad58d
DM
1699 esp_schedule_reset(esp);
1700 return 0;
1701 }
1702 goto again;
1703 break;
1704
1705 case ESP_EVENT_DATA_IN:
1706 write = 1;
1707 /* fallthru */
1708
1709 case ESP_EVENT_DATA_OUT: {
1710 struct esp_cmd_entry *ent = esp->active_cmd;
1711 struct scsi_cmnd *cmd = ent->cmd;
1712 dma_addr_t dma_addr = esp_cur_dma_addr(ent, cmd);
1713 unsigned int dma_len = esp_cur_dma_len(ent, cmd);
1714
1715 if (esp->rev == ESP100)
1716 scsi_esp_cmd(esp, ESP_CMD_NULL);
1717
1718 if (write)
1719 ent->flags |= ESP_CMD_FLAG_WRITE;
1720 else
1721 ent->flags &= ~ESP_CMD_FLAG_WRITE;
1722
6fe07aaf
FT
1723 if (esp->ops->dma_length_limit)
1724 dma_len = esp->ops->dma_length_limit(esp, dma_addr,
1725 dma_len);
1726 else
1727 dma_len = esp_dma_length_limit(esp, dma_addr, dma_len);
1728
cd9ad58d
DM
1729 esp->data_dma_len = dma_len;
1730
1731 if (!dma_len) {
a1a75b35
HR
1732 shost_printk(KERN_ERR, esp->host,
1733 "DMA length is zero!\n");
1734 shost_printk(KERN_ERR, esp->host,
1735 "cur adr[%08llx] len[%08x]\n",
1736 (unsigned long long)esp_cur_dma_addr(ent, cmd),
1737 esp_cur_dma_len(ent, cmd));
cd9ad58d
DM
1738 esp_schedule_reset(esp);
1739 return 0;
1740 }
1741
a1a75b35 1742 esp_log_datastart("start data addr[%08llx] len[%u] write(%d)\n",
e1f2a094 1743 (unsigned long long)dma_addr, dma_len, write);
cd9ad58d
DM
1744
1745 esp->ops->send_dma_cmd(esp, dma_addr, dma_len, dma_len,
1746 write, ESP_CMD_DMA | ESP_CMD_TI);
1747 esp_event(esp, ESP_EVENT_DATA_DONE);
1748 break;
1749 }
1750 case ESP_EVENT_DATA_DONE: {
1751 struct esp_cmd_entry *ent = esp->active_cmd;
1752 struct scsi_cmnd *cmd = ent->cmd;
1753 int bytes_sent;
1754
1755 if (esp->ops->dma_error(esp)) {
a1a75b35
HR
1756 shost_printk(KERN_INFO, esp->host,
1757 "data done, DMA error, resetting\n");
cd9ad58d
DM
1758 esp_schedule_reset(esp);
1759 return 0;
1760 }
1761
1762 if (ent->flags & ESP_CMD_FLAG_WRITE) {
1763 /* XXX parity errors, etc. XXX */
1764
1765 esp->ops->dma_drain(esp);
1766 }
1767 esp->ops->dma_invalidate(esp);
1768
1769 if (esp->ireg != ESP_INTR_BSERV) {
1770 /* We should always see exactly a bus-service
1771 * interrupt at the end of a successful transfer.
1772 */
a1a75b35
HR
1773 shost_printk(KERN_INFO, esp->host,
1774 "data done, not BSERV, resetting\n");
cd9ad58d
DM
1775 esp_schedule_reset(esp);
1776 return 0;
1777 }
1778
1779 bytes_sent = esp_data_bytes_sent(esp, ent, cmd);
1780
a1a75b35 1781 esp_log_datadone("data done flgs[%x] sent[%d]\n",
cd9ad58d
DM
1782 ent->flags, bytes_sent);
1783
1784 if (bytes_sent < 0) {
1785 /* XXX force sync mode for this target XXX */
1786 esp_schedule_reset(esp);
1787 return 0;
1788 }
1789
1790 esp_advance_dma(esp, ent, cmd, bytes_sent);
1791 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1792 goto again;
cd9ad58d
DM
1793 }
1794
1795 case ESP_EVENT_STATUS: {
1796 struct esp_cmd_entry *ent = esp->active_cmd;
1797
1798 if (esp->ireg & ESP_INTR_FDONE) {
1799 ent->status = esp_read8(ESP_FDATA);
1800 ent->message = esp_read8(ESP_FDATA);
1801 scsi_esp_cmd(esp, ESP_CMD_MOK);
1802 } else if (esp->ireg == ESP_INTR_BSERV) {
1803 ent->status = esp_read8(ESP_FDATA);
1804 ent->message = 0xff;
1805 esp_event(esp, ESP_EVENT_MSGIN);
1806 return 0;
1807 }
1808
1809 if (ent->message != COMMAND_COMPLETE) {
a1a75b35
HR
1810 shost_printk(KERN_INFO, esp->host,
1811 "Unexpected message %x in status\n",
1812 ent->message);
cd9ad58d
DM
1813 esp_schedule_reset(esp);
1814 return 0;
1815 }
1816
1817 esp_event(esp, ESP_EVENT_FREE_BUS);
1818 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1819 break;
1820 }
1821 case ESP_EVENT_FREE_BUS: {
1822 struct esp_cmd_entry *ent = esp->active_cmd;
1823 struct scsi_cmnd *cmd = ent->cmd;
1824
1825 if (ent->message == COMMAND_COMPLETE ||
1826 ent->message == DISCONNECT)
1827 scsi_esp_cmd(esp, ESP_CMD_ESEL);
1828
1829 if (ent->message == COMMAND_COMPLETE) {
a1a75b35 1830 esp_log_cmddone("Command done status[%x] message[%x]\n",
cd9ad58d
DM
1831 ent->status, ent->message);
1832 if (ent->status == SAM_STAT_TASK_SET_FULL)
1833 esp_event_queue_full(esp, ent);
1834
1835 if (ent->status == SAM_STAT_CHECK_CONDITION &&
1836 !(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) {
1837 ent->flags |= ESP_CMD_FLAG_AUTOSENSE;
1838 esp_autosense(esp, ent);
1839 } else {
1840 esp_cmd_is_done(esp, ent, cmd,
1841 compose_result(ent->status,
1842 ent->message,
1843 DID_OK));
1844 }
1845 } else if (ent->message == DISCONNECT) {
a1a75b35 1846 esp_log_disconnect("Disconnecting tgt[%d] tag[%x:%x]\n",
cd9ad58d
DM
1847 cmd->device->id,
1848 ent->tag[0], ent->tag[1]);
1849
1850 esp->active_cmd = NULL;
1851 esp_maybe_execute_command(esp);
1852 } else {
a1a75b35
HR
1853 shost_printk(KERN_INFO, esp->host,
1854 "Unexpected message %x in freebus\n",
1855 ent->message);
cd9ad58d
DM
1856 esp_schedule_reset(esp);
1857 return 0;
1858 }
1859 if (esp->active_cmd)
1860 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1861 break;
1862 }
1863 case ESP_EVENT_MSGOUT: {
1864 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1865
1866 if (esp_debug & ESP_DEBUG_MSGOUT) {
1867 int i;
1868 printk("ESP: Sending message [ ");
1869 for (i = 0; i < esp->msg_out_len; i++)
1870 printk("%02x ", esp->msg_out[i]);
1871 printk("]\n");
1872 }
1873
1874 if (esp->rev == FASHME) {
1875 int i;
1876
1877 /* Always use the fifo. */
1878 for (i = 0; i < esp->msg_out_len; i++) {
1879 esp_write8(esp->msg_out[i], ESP_FDATA);
1880 esp_write8(0, ESP_FDATA);
1881 }
1882 scsi_esp_cmd(esp, ESP_CMD_TI);
1883 } else {
1884 if (esp->msg_out_len == 1) {
1885 esp_write8(esp->msg_out[0], ESP_FDATA);
1886 scsi_esp_cmd(esp, ESP_CMD_TI);
3170866f
HR
1887 } else if (esp->flags & ESP_FLAG_USE_FIFO) {
1888 for (i = 0; i < esp->msg_out_len; i++)
1889 esp_write8(esp->msg_out[i], ESP_FDATA);
1890 scsi_esp_cmd(esp, ESP_CMD_TI);
cd9ad58d
DM
1891 } else {
1892 /* Use DMA. */
1893 memcpy(esp->command_block,
1894 esp->msg_out,
1895 esp->msg_out_len);
1896
1897 esp->ops->send_dma_cmd(esp,
1898 esp->command_block_dma,
1899 esp->msg_out_len,
1900 esp->msg_out_len,
1901 0,
1902 ESP_CMD_DMA|ESP_CMD_TI);
1903 }
1904 }
1905 esp_event(esp, ESP_EVENT_MSGOUT_DONE);
1906 break;
1907 }
1908 case ESP_EVENT_MSGOUT_DONE:
1909 if (esp->rev == FASHME) {
1910 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1911 } else {
1912 if (esp->msg_out_len > 1)
1913 esp->ops->dma_invalidate(esp);
1914 }
1915
1916 if (!(esp->ireg & ESP_INTR_DC)) {
1917 if (esp->rev != FASHME)
1918 scsi_esp_cmd(esp, ESP_CMD_NULL);
1919 }
1920 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1921 goto again;
1922 case ESP_EVENT_MSGIN:
1923 if (esp->ireg & ESP_INTR_BSERV) {
1924 if (esp->rev == FASHME) {
1925 if (!(esp_read8(ESP_STATUS2) &
1926 ESP_STAT2_FEMPTY))
1927 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1928 } else {
1929 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1930 if (esp->rev == ESP100)
1931 scsi_esp_cmd(esp, ESP_CMD_NULL);
1932 }
1933 scsi_esp_cmd(esp, ESP_CMD_TI);
1934 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1935 return 1;
1936 }
1937 if (esp->ireg & ESP_INTR_FDONE) {
1938 u8 val;
1939
1940 if (esp->rev == FASHME)
1941 val = esp->fifo[0];
1942 else
1943 val = esp_read8(ESP_FDATA);
1944 esp->msg_in[esp->msg_in_len++] = val;
1945
a1a75b35 1946 esp_log_msgin("Got msgin byte %x\n", val);
cd9ad58d
DM
1947
1948 if (!esp_msgin_process(esp))
1949 esp->msg_in_len = 0;
1950
1951 if (esp->rev == FASHME)
1952 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1953
1954 scsi_esp_cmd(esp, ESP_CMD_MOK);
1955
1956 if (esp->event != ESP_EVENT_FREE_BUS)
1957 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1958 } else {
a1a75b35
HR
1959 shost_printk(KERN_INFO, esp->host,
1960 "MSGIN neither BSERV not FDON, resetting");
cd9ad58d
DM
1961 esp_schedule_reset(esp);
1962 return 0;
1963 }
1964 break;
1965 case ESP_EVENT_CMD_START:
1966 memcpy(esp->command_block, esp->cmd_bytes_ptr,
1967 esp->cmd_bytes_left);
3170866f 1968 esp_send_dma_cmd(esp, esp->cmd_bytes_left, 16, ESP_CMD_TI);
cd9ad58d
DM
1969 esp_event(esp, ESP_EVENT_CMD_DONE);
1970 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1971 break;
1972 case ESP_EVENT_CMD_DONE:
1973 esp->ops->dma_invalidate(esp);
1974 if (esp->ireg & ESP_INTR_BSERV) {
1975 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1976 goto again;
1977 }
1978 esp_schedule_reset(esp);
1979 return 0;
1980 break;
1981
1982 case ESP_EVENT_RESET:
1983 scsi_esp_cmd(esp, ESP_CMD_RS);
1984 break;
1985
1986 default:
a1a75b35
HR
1987 shost_printk(KERN_INFO, esp->host,
1988 "Unexpected event %x, resetting\n", esp->event);
cd9ad58d
DM
1989 esp_schedule_reset(esp);
1990 return 0;
1991 break;
1992 }
1993 return 1;
1994}
1995
1996static void esp_reset_cleanup_one(struct esp *esp, struct esp_cmd_entry *ent)
1997{
1998 struct scsi_cmnd *cmd = ent->cmd;
1999
2000 esp_unmap_dma(esp, cmd);
2001 esp_free_lun_tag(ent, cmd->device->hostdata);
2002 cmd->result = DID_RESET << 16;
2003
2004 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
2005 esp->ops->unmap_single(esp, ent->sense_dma,
2006 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
2007 ent->sense_ptr = NULL;
2008 }
2009
2010 cmd->scsi_done(cmd);
2011 list_del(&ent->list);
2012 esp_put_ent(esp, ent);
2013}
2014
2015static void esp_clear_hold(struct scsi_device *dev, void *data)
2016{
2017 struct esp_lun_data *lp = dev->hostdata;
2018
2019 BUG_ON(lp->num_tagged);
2020 lp->hold = 0;
2021}
2022
2023static void esp_reset_cleanup(struct esp *esp)
2024{
2025 struct esp_cmd_entry *ent, *tmp;
2026 int i;
2027
2028 list_for_each_entry_safe(ent, tmp, &esp->queued_cmds, list) {
2029 struct scsi_cmnd *cmd = ent->cmd;
2030
2031 list_del(&ent->list);
2032 cmd->result = DID_RESET << 16;
2033 cmd->scsi_done(cmd);
2034 esp_put_ent(esp, ent);
2035 }
2036
2037 list_for_each_entry_safe(ent, tmp, &esp->active_cmds, list) {
2038 if (ent == esp->active_cmd)
2039 esp->active_cmd = NULL;
2040 esp_reset_cleanup_one(esp, ent);
2041 }
2042
2043 BUG_ON(esp->active_cmd != NULL);
2044
2045 /* Force renegotiation of sync/wide transfers. */
2046 for (i = 0; i < ESP_MAX_TARGET; i++) {
2047 struct esp_target_data *tp = &esp->target[i];
2048
2049 tp->esp_period = 0;
2050 tp->esp_offset = 0;
2051 tp->esp_config3 &= ~(ESP_CONFIG3_EWIDE |
2052 ESP_CONFIG3_FSCSI |
2053 ESP_CONFIG3_FAST);
2054 tp->flags &= ~ESP_TGT_WIDE;
2055 tp->flags |= ESP_TGT_CHECK_NEGO;
2056
2057 if (tp->starget)
522939d4
MR
2058 __starget_for_each_device(tp->starget, NULL,
2059 esp_clear_hold);
cd9ad58d 2060 }
204abf28 2061 esp->flags &= ~ESP_FLAG_RESETTING;
cd9ad58d
DM
2062}
2063
2064/* Runs under host->lock */
2065static void __esp_interrupt(struct esp *esp)
2066{
2067 int finish_reset, intr_done;
2068 u8 phase;
2069
9535fff3
HR
2070 /*
2071 * Once INTRPT is read STATUS and SSTEP are cleared.
2072 */
cd9ad58d 2073 esp->sreg = esp_read8(ESP_STATUS);
9535fff3
HR
2074 esp->seqreg = esp_read8(ESP_SSTEP);
2075 esp->ireg = esp_read8(ESP_INTRPT);
cd9ad58d
DM
2076
2077 if (esp->flags & ESP_FLAG_RESETTING) {
2078 finish_reset = 1;
2079 } else {
2080 if (esp_check_gross_error(esp))
2081 return;
2082
2083 finish_reset = esp_check_spur_intr(esp);
2084 if (finish_reset < 0)
2085 return;
2086 }
2087
cd9ad58d
DM
2088 if (esp->ireg & ESP_INTR_SR)
2089 finish_reset = 1;
2090
2091 if (finish_reset) {
2092 esp_reset_cleanup(esp);
2093 if (esp->eh_reset) {
2094 complete(esp->eh_reset);
2095 esp->eh_reset = NULL;
2096 }
2097 return;
2098 }
2099
2100 phase = (esp->sreg & ESP_STAT_PMASK);
2101 if (esp->rev == FASHME) {
2102 if (((phase != ESP_DIP && phase != ESP_DOP) &&
2103 esp->select_state == ESP_SELECT_NONE &&
2104 esp->event != ESP_EVENT_STATUS &&
2105 esp->event != ESP_EVENT_DATA_DONE) ||
2106 (esp->ireg & ESP_INTR_RSEL)) {
2107 esp->sreg2 = esp_read8(ESP_STATUS2);
2108 if (!(esp->sreg2 & ESP_STAT2_FEMPTY) ||
2109 (esp->sreg2 & ESP_STAT2_F1BYTE))
2110 hme_read_fifo(esp);
2111 }
2112 }
2113
a1a75b35 2114 esp_log_intr("intr sreg[%02x] seqreg[%02x] "
cd9ad58d
DM
2115 "sreg2[%02x] ireg[%02x]\n",
2116 esp->sreg, esp->seqreg, esp->sreg2, esp->ireg);
2117
2118 intr_done = 0;
2119
2120 if (esp->ireg & (ESP_INTR_S | ESP_INTR_SATN | ESP_INTR_IC)) {
a1a75b35
HR
2121 shost_printk(KERN_INFO, esp->host,
2122 "unexpected IREG %02x\n", esp->ireg);
cd9ad58d
DM
2123 if (esp->ireg & ESP_INTR_IC)
2124 esp_dump_cmd_log(esp);
2125
2126 esp_schedule_reset(esp);
2127 } else {
2128 if (!(esp->ireg & ESP_INTR_RSEL)) {
2129 /* Some combination of FDONE, BSERV, DC. */
2130 if (esp->select_state != ESP_SELECT_NONE)
2131 intr_done = esp_finish_select(esp);
2132 } else if (esp->ireg & ESP_INTR_RSEL) {
2133 if (esp->active_cmd)
2134 (void) esp_finish_select(esp);
2135 intr_done = esp_reconnect(esp);
2136 }
2137 }
2138 while (!intr_done)
2139 intr_done = esp_process_event(esp);
2140}
2141
2142irqreturn_t scsi_esp_intr(int irq, void *dev_id)
2143{
2144 struct esp *esp = dev_id;
2145 unsigned long flags;
2146 irqreturn_t ret;
2147
2148 spin_lock_irqsave(esp->host->host_lock, flags);
2149 ret = IRQ_NONE;
2150 if (esp->ops->irq_pending(esp)) {
2151 ret = IRQ_HANDLED;
2152 for (;;) {
2153 int i;
2154
2155 __esp_interrupt(esp);
2156 if (!(esp->flags & ESP_FLAG_QUICKIRQ_CHECK))
2157 break;
2158 esp->flags &= ~ESP_FLAG_QUICKIRQ_CHECK;
2159
2160 for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) {
2161 if (esp->ops->irq_pending(esp))
2162 break;
2163 }
2164 if (i == ESP_QUICKIRQ_LIMIT)
2165 break;
2166 }
2167 }
2168 spin_unlock_irqrestore(esp->host->host_lock, flags);
2169
2170 return ret;
2171}
2172EXPORT_SYMBOL(scsi_esp_intr);
2173
76246808 2174static void esp_get_revision(struct esp *esp)
cd9ad58d
DM
2175{
2176 u8 val;
2177
2178 esp->config1 = (ESP_CONFIG1_PENABLE | (esp->scsi_id & 7));
2179 esp->config2 = (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY);
2180 esp_write8(esp->config2, ESP_CFG2);
2181
2182 val = esp_read8(ESP_CFG2);
2183 val &= ~ESP_CONFIG2_MAGIC;
2184 if (val != (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY)) {
2185 /* If what we write to cfg2 does not come back, cfg2 is not
2186 * implemented, therefore this must be a plain esp100.
2187 */
2188 esp->rev = ESP100;
2189 } else {
2190 esp->config2 = 0;
2191 esp_set_all_config3(esp, 5);
2192 esp->prev_cfg3 = 5;
2193 esp_write8(esp->config2, ESP_CFG2);
2194 esp_write8(0, ESP_CFG3);
2195 esp_write8(esp->prev_cfg3, ESP_CFG3);
2196
2197 val = esp_read8(ESP_CFG3);
2198 if (val != 5) {
2199 /* The cfg2 register is implemented, however
2200 * cfg3 is not, must be esp100a.
2201 */
2202 esp->rev = ESP100A;
2203 } else {
2204 esp_set_all_config3(esp, 0);
2205 esp->prev_cfg3 = 0;
2206 esp_write8(esp->prev_cfg3, ESP_CFG3);
2207
2208 /* All of cfg{1,2,3} implemented, must be one of
2209 * the fas variants, figure out which one.
2210 */
2211 if (esp->cfact == 0 || esp->cfact > ESP_CCF_F5) {
2212 esp->rev = FAST;
2213 esp->sync_defp = SYNC_DEFP_FAST;
2214 } else {
2215 esp->rev = ESP236;
2216 }
2217 esp->config2 = 0;
2218 esp_write8(esp->config2, ESP_CFG2);
2219 }
2220 }
2221}
2222
76246808 2223static void esp_init_swstate(struct esp *esp)
cd9ad58d
DM
2224{
2225 int i;
2226
2227 INIT_LIST_HEAD(&esp->queued_cmds);
2228 INIT_LIST_HEAD(&esp->active_cmds);
2229 INIT_LIST_HEAD(&esp->esp_cmd_pool);
2230
2231 /* Start with a clear state, domain validation (via ->slave_configure,
2232 * spi_dv_device()) will attempt to enable SYNC, WIDE, and tagged
2233 * commands.
2234 */
2235 for (i = 0 ; i < ESP_MAX_TARGET; i++) {
2236 esp->target[i].flags = 0;
2237 esp->target[i].nego_goal_period = 0;
2238 esp->target[i].nego_goal_offset = 0;
2239 esp->target[i].nego_goal_width = 0;
2240 esp->target[i].nego_goal_tags = 0;
2241 }
2242}
2243
2244/* This places the ESP into a known state at boot time. */
d679f805 2245static void esp_bootup_reset(struct esp *esp)
cd9ad58d
DM
2246{
2247 u8 val;
2248
2249 /* Reset the DMA */
2250 esp->ops->reset_dma(esp);
2251
2252 /* Reset the ESP */
2253 esp_reset_esp(esp);
2254
2255 /* Reset the SCSI bus, but tell ESP not to generate an irq */
2256 val = esp_read8(ESP_CFG1);
2257 val |= ESP_CONFIG1_SRRDISAB;
2258 esp_write8(val, ESP_CFG1);
2259
2260 scsi_esp_cmd(esp, ESP_CMD_RS);
2261 udelay(400);
2262
2263 esp_write8(esp->config1, ESP_CFG1);
2264
2265 /* Eat any bitrot in the chip and we are done... */
2266 esp_read8(ESP_INTRPT);
2267}
2268
76246808 2269static void esp_set_clock_params(struct esp *esp)
cd9ad58d 2270{
6fe07aaf 2271 int fhz;
cd9ad58d
DM
2272 u8 ccf;
2273
2274 /* This is getting messy but it has to be done correctly or else
2275 * you get weird behavior all over the place. We are trying to
2276 * basically figure out three pieces of information.
2277 *
2278 * a) Clock Conversion Factor
2279 *
2280 * This is a representation of the input crystal clock frequency
2281 * going into the ESP on this machine. Any operation whose timing
2282 * is longer than 400ns depends on this value being correct. For
2283 * example, you'll get blips for arbitration/selection during high
2284 * load or with multiple targets if this is not set correctly.
2285 *
2286 * b) Selection Time-Out
2287 *
2288 * The ESP isn't very bright and will arbitrate for the bus and try
2289 * to select a target forever if you let it. This value tells the
2290 * ESP when it has taken too long to negotiate and that it should
2291 * interrupt the CPU so we can see what happened. The value is
2292 * computed as follows (from NCR/Symbios chip docs).
2293 *
2294 * (Time Out Period) * (Input Clock)
2295 * STO = ----------------------------------
2296 * (8192) * (Clock Conversion Factor)
2297 *
2298 * We use a time out period of 250ms (ESP_BUS_TIMEOUT).
2299 *
2300 * c) Imperical constants for synchronous offset and transfer period
2301 * register values
2302 *
2303 * This entails the smallest and largest sync period we could ever
2304 * handle on this ESP.
2305 */
6fe07aaf 2306 fhz = esp->cfreq;
cd9ad58d 2307
6fe07aaf 2308 ccf = ((fhz / 1000000) + 4) / 5;
cd9ad58d
DM
2309 if (ccf == 1)
2310 ccf = 2;
2311
2312 /* If we can't find anything reasonable, just assume 20MHZ.
2313 * This is the clock frequency of the older sun4c's where I've
2314 * been unable to find the clock-frequency PROM property. All
2315 * other machines provide useful values it seems.
2316 */
6fe07aaf
FT
2317 if (fhz <= 5000000 || ccf < 1 || ccf > 8) {
2318 fhz = 20000000;
cd9ad58d
DM
2319 ccf = 4;
2320 }
2321
2322 esp->cfact = (ccf == 8 ? 0 : ccf);
6fe07aaf
FT
2323 esp->cfreq = fhz;
2324 esp->ccycle = ESP_HZ_TO_CYCLE(fhz);
cd9ad58d 2325 esp->ctick = ESP_TICK(ccf, esp->ccycle);
6fe07aaf 2326 esp->neg_defp = ESP_NEG_DEFP(fhz, ccf);
cd9ad58d
DM
2327 esp->sync_defp = SYNC_DEFP_SLOW;
2328}
2329
2330static const char *esp_chip_names[] = {
2331 "ESP100",
2332 "ESP100A",
2333 "ESP236",
2334 "FAS236",
2335 "FAS100A",
2336 "FAST",
2337 "FASHME",
2338};
2339
2340static struct scsi_transport_template *esp_transport_template;
2341
76246808 2342int scsi_esp_register(struct esp *esp, struct device *dev)
cd9ad58d
DM
2343{
2344 static int instance;
2345 int err;
2346
3707a186
HR
2347 if (!esp->num_tags)
2348 esp->num_tags = ESP_DEFAULT_TAGS;
2349 else if (esp->num_tags >= ESP_MAX_TAG)
2350 esp->num_tags = ESP_MAX_TAG - 1;
cd9ad58d
DM
2351 esp->host->transportt = esp_transport_template;
2352 esp->host->max_lun = ESP_MAX_LUN;
2353 esp->host->cmd_per_lun = 2;
ff4abd6c 2354 esp->host->unique_id = instance;
cd9ad58d
DM
2355
2356 esp_set_clock_params(esp);
2357
2358 esp_get_revision(esp);
2359
2360 esp_init_swstate(esp);
2361
2362 esp_bootup_reset(esp);
2363
a1a75b35
HR
2364 dev_printk(KERN_INFO, dev, "esp%u: regs[%1p:%1p] irq[%u]\n",
2365 esp->host->unique_id, esp->regs, esp->dma_regs,
2366 esp->host->irq);
2367 dev_printk(KERN_INFO, dev,
2368 "esp%u: is a %s, %u MHz (ccf=%u), SCSI ID %u\n",
2369 esp->host->unique_id, esp_chip_names[esp->rev],
2370 esp->cfreq / 1000000, esp->cfact, esp->scsi_id);
cd9ad58d
DM
2371
2372 /* Let the SCSI bus reset settle. */
2373 ssleep(esp_bus_reset_settle);
2374
2375 err = scsi_add_host(esp->host, dev);
2376 if (err)
2377 return err;
2378
ff4abd6c 2379 instance++;
cd9ad58d
DM
2380
2381 scsi_scan_host(esp->host);
2382
2383 return 0;
2384}
2385EXPORT_SYMBOL(scsi_esp_register);
2386
76246808 2387void scsi_esp_unregister(struct esp *esp)
cd9ad58d
DM
2388{
2389 scsi_remove_host(esp->host);
2390}
2391EXPORT_SYMBOL(scsi_esp_unregister);
2392
ec5e69f6
JB
2393static int esp_target_alloc(struct scsi_target *starget)
2394{
2395 struct esp *esp = shost_priv(dev_to_shost(&starget->dev));
2396 struct esp_target_data *tp = &esp->target[starget->id];
2397
2398 tp->starget = starget;
2399
2400 return 0;
2401}
2402
2403static void esp_target_destroy(struct scsi_target *starget)
2404{
2405 struct esp *esp = shost_priv(dev_to_shost(&starget->dev));
2406 struct esp_target_data *tp = &esp->target[starget->id];
2407
2408 tp->starget = NULL;
2409}
2410
cd9ad58d
DM
2411static int esp_slave_alloc(struct scsi_device *dev)
2412{
2b14ec78 2413 struct esp *esp = shost_priv(dev->host);
cd9ad58d
DM
2414 struct esp_target_data *tp = &esp->target[dev->id];
2415 struct esp_lun_data *lp;
2416
2417 lp = kzalloc(sizeof(*lp), GFP_KERNEL);
2418 if (!lp)
2419 return -ENOMEM;
2420 dev->hostdata = lp;
2421
cd9ad58d
DM
2422 spi_min_period(tp->starget) = esp->min_period;
2423 spi_max_offset(tp->starget) = 15;
2424
2425 if (esp->flags & ESP_FLAG_WIDE_CAPABLE)
2426 spi_max_width(tp->starget) = 1;
2427 else
2428 spi_max_width(tp->starget) = 0;
2429
2430 return 0;
2431}
2432
2433static int esp_slave_configure(struct scsi_device *dev)
2434{
2b14ec78 2435 struct esp *esp = shost_priv(dev->host);
cd9ad58d 2436 struct esp_target_data *tp = &esp->target[dev->id];
cd9ad58d 2437
3707a186
HR
2438 if (dev->tagged_supported)
2439 scsi_change_queue_depth(dev, esp->num_tags);
cd9ad58d 2440
cd9ad58d
DM
2441 tp->flags |= ESP_TGT_DISCONNECT;
2442
2443 if (!spi_initial_dv(dev->sdev_target))
2444 spi_dv_device(dev);
2445
2446 return 0;
2447}
2448
2449static void esp_slave_destroy(struct scsi_device *dev)
2450{
2451 struct esp_lun_data *lp = dev->hostdata;
2452
2453 kfree(lp);
2454 dev->hostdata = NULL;
2455}
2456
2457static int esp_eh_abort_handler(struct scsi_cmnd *cmd)
2458{
2b14ec78 2459 struct esp *esp = shost_priv(cmd->device->host);
cd9ad58d
DM
2460 struct esp_cmd_entry *ent, *tmp;
2461 struct completion eh_done;
2462 unsigned long flags;
2463
2464 /* XXX This helps a lot with debugging but might be a bit
2465 * XXX much for the final driver.
2466 */
2467 spin_lock_irqsave(esp->host->host_lock, flags);
a1a75b35
HR
2468 shost_printk(KERN_ERR, esp->host, "Aborting command [%p:%02x]\n",
2469 cmd, cmd->cmnd[0]);
cd9ad58d
DM
2470 ent = esp->active_cmd;
2471 if (ent)
a1a75b35
HR
2472 shost_printk(KERN_ERR, esp->host,
2473 "Current command [%p:%02x]\n",
2474 ent->cmd, ent->cmd->cmnd[0]);
cd9ad58d 2475 list_for_each_entry(ent, &esp->queued_cmds, list) {
a1a75b35
HR
2476 shost_printk(KERN_ERR, esp->host, "Queued command [%p:%02x]\n",
2477 ent->cmd, ent->cmd->cmnd[0]);
cd9ad58d
DM
2478 }
2479 list_for_each_entry(ent, &esp->active_cmds, list) {
a1a75b35
HR
2480 shost_printk(KERN_ERR, esp->host, " Active command [%p:%02x]\n",
2481 ent->cmd, ent->cmd->cmnd[0]);
cd9ad58d
DM
2482 }
2483 esp_dump_cmd_log(esp);
2484 spin_unlock_irqrestore(esp->host->host_lock, flags);
2485
2486 spin_lock_irqsave(esp->host->host_lock, flags);
2487
2488 ent = NULL;
2489 list_for_each_entry(tmp, &esp->queued_cmds, list) {
2490 if (tmp->cmd == cmd) {
2491 ent = tmp;
2492 break;
2493 }
2494 }
2495
2496 if (ent) {
2497 /* Easiest case, we didn't even issue the command
2498 * yet so it is trivial to abort.
2499 */
2500 list_del(&ent->list);
2501
2502 cmd->result = DID_ABORT << 16;
2503 cmd->scsi_done(cmd);
2504
2505 esp_put_ent(esp, ent);
2506
2507 goto out_success;
2508 }
2509
2510 init_completion(&eh_done);
2511
2512 ent = esp->active_cmd;
2513 if (ent && ent->cmd == cmd) {
2514 /* Command is the currently active command on
2515 * the bus. If we already have an output message
2516 * pending, no dice.
2517 */
2518 if (esp->msg_out_len)
2519 goto out_failure;
2520
2521 /* Send out an abort, encouraging the target to
2522 * go to MSGOUT phase by asserting ATN.
2523 */
2524 esp->msg_out[0] = ABORT_TASK_SET;
2525 esp->msg_out_len = 1;
2526 ent->eh_done = &eh_done;
2527
2528 scsi_esp_cmd(esp, ESP_CMD_SATN);
2529 } else {
2530 /* The command is disconnected. This is not easy to
2531 * abort. For now we fail and let the scsi error
2532 * handling layer go try a scsi bus reset or host
2533 * reset.
2534 *
2535 * What we could do is put together a scsi command
2536 * solely for the purpose of sending an abort message
2537 * to the target. Coming up with all the code to
2538 * cook up scsi commands, special case them everywhere,
2539 * etc. is for questionable gain and it would be better
2540 * if the generic scsi error handling layer could do at
2541 * least some of that for us.
2542 *
2543 * Anyways this is an area for potential future improvement
2544 * in this driver.
2545 */
2546 goto out_failure;
2547 }
2548
2549 spin_unlock_irqrestore(esp->host->host_lock, flags);
2550
2551 if (!wait_for_completion_timeout(&eh_done, 5 * HZ)) {
2552 spin_lock_irqsave(esp->host->host_lock, flags);
2553 ent->eh_done = NULL;
2554 spin_unlock_irqrestore(esp->host->host_lock, flags);
2555
2556 return FAILED;
2557 }
2558
2559 return SUCCESS;
2560
2561out_success:
2562 spin_unlock_irqrestore(esp->host->host_lock, flags);
2563 return SUCCESS;
2564
2565out_failure:
2566 /* XXX This might be a good location to set ESP_TGT_BROKEN
2567 * XXX since we know which target/lun in particular is
2568 * XXX causing trouble.
2569 */
2570 spin_unlock_irqrestore(esp->host->host_lock, flags);
2571 return FAILED;
2572}
2573
2574static int esp_eh_bus_reset_handler(struct scsi_cmnd *cmd)
2575{
2b14ec78 2576 struct esp *esp = shost_priv(cmd->device->host);
cd9ad58d
DM
2577 struct completion eh_reset;
2578 unsigned long flags;
2579
2580 init_completion(&eh_reset);
2581
2582 spin_lock_irqsave(esp->host->host_lock, flags);
2583
2584 esp->eh_reset = &eh_reset;
2585
2586 /* XXX This is too simple... We should add lots of
2587 * XXX checks here so that if we find that the chip is
2588 * XXX very wedged we return failure immediately so
2589 * XXX that we can perform a full chip reset.
2590 */
2591 esp->flags |= ESP_FLAG_RESETTING;
2592 scsi_esp_cmd(esp, ESP_CMD_RS);
2593
2594 spin_unlock_irqrestore(esp->host->host_lock, flags);
2595
2596 ssleep(esp_bus_reset_settle);
2597
2598 if (!wait_for_completion_timeout(&eh_reset, 5 * HZ)) {
2599 spin_lock_irqsave(esp->host->host_lock, flags);
2600 esp->eh_reset = NULL;
2601 spin_unlock_irqrestore(esp->host->host_lock, flags);
2602
2603 return FAILED;
2604 }
2605
2606 return SUCCESS;
2607}
2608
2609/* All bets are off, reset the entire device. */
2610static int esp_eh_host_reset_handler(struct scsi_cmnd *cmd)
2611{
2b14ec78 2612 struct esp *esp = shost_priv(cmd->device->host);
cd9ad58d
DM
2613 unsigned long flags;
2614
2615 spin_lock_irqsave(esp->host->host_lock, flags);
2616 esp_bootup_reset(esp);
2617 esp_reset_cleanup(esp);
2618 spin_unlock_irqrestore(esp->host->host_lock, flags);
2619
2620 ssleep(esp_bus_reset_settle);
2621
2622 return SUCCESS;
2623}
2624
2625static const char *esp_info(struct Scsi_Host *host)
2626{
2627 return "esp";
2628}
2629
2630struct scsi_host_template scsi_esp_template = {
2631 .module = THIS_MODULE,
2632 .name = "esp",
2633 .info = esp_info,
2634 .queuecommand = esp_queuecommand,
ec5e69f6
JB
2635 .target_alloc = esp_target_alloc,
2636 .target_destroy = esp_target_destroy,
cd9ad58d
DM
2637 .slave_alloc = esp_slave_alloc,
2638 .slave_configure = esp_slave_configure,
2639 .slave_destroy = esp_slave_destroy,
2640 .eh_abort_handler = esp_eh_abort_handler,
2641 .eh_bus_reset_handler = esp_eh_bus_reset_handler,
2642 .eh_host_reset_handler = esp_eh_host_reset_handler,
2643 .can_queue = 7,
2644 .this_id = 7,
2645 .sg_tablesize = SG_ALL,
2646 .use_clustering = ENABLE_CLUSTERING,
2647 .max_sectors = 0xffff,
2648 .skip_settle_delay = 1,
2ecb204d 2649 .use_blk_tags = 1,
cd9ad58d
DM
2650};
2651EXPORT_SYMBOL(scsi_esp_template);
2652
2653static void esp_get_signalling(struct Scsi_Host *host)
2654{
2b14ec78 2655 struct esp *esp = shost_priv(host);
cd9ad58d
DM
2656 enum spi_signal_type type;
2657
2658 if (esp->flags & ESP_FLAG_DIFFERENTIAL)
2659 type = SPI_SIGNAL_HVD;
2660 else
2661 type = SPI_SIGNAL_SE;
2662
2663 spi_signalling(host) = type;
2664}
2665
2666static void esp_set_offset(struct scsi_target *target, int offset)
2667{
2668 struct Scsi_Host *host = dev_to_shost(target->dev.parent);
2b14ec78 2669 struct esp *esp = shost_priv(host);
cd9ad58d
DM
2670 struct esp_target_data *tp = &esp->target[target->id];
2671
02507a80
FT
2672 if (esp->flags & ESP_FLAG_DISABLE_SYNC)
2673 tp->nego_goal_offset = 0;
2674 else
2675 tp->nego_goal_offset = offset;
cd9ad58d
DM
2676 tp->flags |= ESP_TGT_CHECK_NEGO;
2677}
2678
2679static void esp_set_period(struct scsi_target *target, int period)
2680{
2681 struct Scsi_Host *host = dev_to_shost(target->dev.parent);
2b14ec78 2682 struct esp *esp = shost_priv(host);
cd9ad58d
DM
2683 struct esp_target_data *tp = &esp->target[target->id];
2684
2685 tp->nego_goal_period = period;
2686 tp->flags |= ESP_TGT_CHECK_NEGO;
2687}
2688
2689static void esp_set_width(struct scsi_target *target, int width)
2690{
2691 struct Scsi_Host *host = dev_to_shost(target->dev.parent);
2b14ec78 2692 struct esp *esp = shost_priv(host);
cd9ad58d
DM
2693 struct esp_target_data *tp = &esp->target[target->id];
2694
2695 tp->nego_goal_width = (width ? 1 : 0);
2696 tp->flags |= ESP_TGT_CHECK_NEGO;
2697}
2698
2699static struct spi_function_template esp_transport_ops = {
2700 .set_offset = esp_set_offset,
2701 .show_offset = 1,
2702 .set_period = esp_set_period,
2703 .show_period = 1,
2704 .set_width = esp_set_width,
2705 .show_width = 1,
2706 .get_signalling = esp_get_signalling,
2707};
2708
2709static int __init esp_init(void)
2710{
2711 BUILD_BUG_ON(sizeof(struct scsi_pointer) <
2712 sizeof(struct esp_cmd_priv));
2713
2714 esp_transport_template = spi_attach_transport(&esp_transport_ops);
2715 if (!esp_transport_template)
2716 return -ENODEV;
2717
2718 return 0;
2719}
2720
2721static void __exit esp_exit(void)
2722{
2723 spi_release_transport(esp_transport_template);
2724}
2725
2726MODULE_DESCRIPTION("ESP SCSI driver core");
2727MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
2728MODULE_LICENSE("GPL");
2729MODULE_VERSION(DRV_VERSION);
2730
2731module_param(esp_bus_reset_settle, int, 0);
2732MODULE_PARM_DESC(esp_bus_reset_settle,
2733 "ESP scsi bus reset delay in seconds");
2734
2735module_param(esp_debug, int, 0);
2736MODULE_PARM_DESC(esp_debug,
2737"ESP bitmapped debugging message enable value:\n"
2738" 0x00000001 Log interrupt events\n"
2739" 0x00000002 Log scsi commands\n"
2740" 0x00000004 Log resets\n"
2741" 0x00000008 Log message in events\n"
2742" 0x00000010 Log message out events\n"
2743" 0x00000020 Log command completion\n"
2744" 0x00000040 Log disconnects\n"
2745" 0x00000080 Log data start\n"
2746" 0x00000100 Log data done\n"
2747" 0x00000200 Log reconnects\n"
2748" 0x00000400 Log auto-sense data\n"
2749);
2750
2751module_init(esp_init);
2752module_exit(esp_exit);
This page took 0.857599 seconds and 5 git commands to generate.