esp_scsi: let DMA driver provide a config2 value
[deliverable/linux.git] / drivers / scsi / esp_scsi.c
CommitLineData
cd9ad58d
DM
1/* esp_scsi.c: ESP SCSI driver.
2 *
3 * Copyright (C) 2007 David S. Miller (davem@davemloft.net)
4 */
5
6#include <linux/kernel.h>
7#include <linux/types.h>
8#include <linux/slab.h>
9#include <linux/delay.h>
10#include <linux/list.h>
11#include <linux/completion.h>
12#include <linux/kallsyms.h>
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15#include <linux/init.h>
e1f2a094 16#include <linux/irqreturn.h>
cd9ad58d
DM
17
18#include <asm/irq.h>
19#include <asm/io.h>
20#include <asm/dma.h>
21
22#include <scsi/scsi.h>
23#include <scsi/scsi_host.h>
24#include <scsi/scsi_cmnd.h>
25#include <scsi/scsi_device.h>
26#include <scsi/scsi_tcq.h>
27#include <scsi/scsi_dbg.h>
28#include <scsi/scsi_transport_spi.h>
29
30#include "esp_scsi.h"
31
32#define DRV_MODULE_NAME "esp"
33#define PFX DRV_MODULE_NAME ": "
34#define DRV_VERSION "2.000"
35#define DRV_MODULE_RELDATE "April 19, 2007"
36
37/* SCSI bus reset settle time in seconds. */
38static int esp_bus_reset_settle = 3;
39
40static u32 esp_debug;
41#define ESP_DEBUG_INTR 0x00000001
42#define ESP_DEBUG_SCSICMD 0x00000002
43#define ESP_DEBUG_RESET 0x00000004
44#define ESP_DEBUG_MSGIN 0x00000008
45#define ESP_DEBUG_MSGOUT 0x00000010
46#define ESP_DEBUG_CMDDONE 0x00000020
47#define ESP_DEBUG_DISCONNECT 0x00000040
48#define ESP_DEBUG_DATASTART 0x00000080
49#define ESP_DEBUG_DATADONE 0x00000100
50#define ESP_DEBUG_RECONNECT 0x00000200
51#define ESP_DEBUG_AUTOSENSE 0x00000400
1af6f603
HR
52#define ESP_DEBUG_EVENT 0x00000800
53#define ESP_DEBUG_COMMAND 0x00001000
cd9ad58d
DM
54
55#define esp_log_intr(f, a...) \
56do { if (esp_debug & ESP_DEBUG_INTR) \
a1a75b35 57 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
cd9ad58d
DM
58} while (0)
59
60#define esp_log_reset(f, a...) \
61do { if (esp_debug & ESP_DEBUG_RESET) \
a1a75b35 62 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
cd9ad58d
DM
63} while (0)
64
65#define esp_log_msgin(f, a...) \
66do { if (esp_debug & ESP_DEBUG_MSGIN) \
a1a75b35 67 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
cd9ad58d
DM
68} while (0)
69
70#define esp_log_msgout(f, a...) \
71do { if (esp_debug & ESP_DEBUG_MSGOUT) \
a1a75b35 72 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
cd9ad58d
DM
73} while (0)
74
75#define esp_log_cmddone(f, a...) \
76do { if (esp_debug & ESP_DEBUG_CMDDONE) \
a1a75b35 77 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
cd9ad58d
DM
78} while (0)
79
80#define esp_log_disconnect(f, a...) \
81do { if (esp_debug & ESP_DEBUG_DISCONNECT) \
a1a75b35 82 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
cd9ad58d
DM
83} while (0)
84
85#define esp_log_datastart(f, a...) \
86do { if (esp_debug & ESP_DEBUG_DATASTART) \
a1a75b35 87 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
cd9ad58d
DM
88} while (0)
89
90#define esp_log_datadone(f, a...) \
91do { if (esp_debug & ESP_DEBUG_DATADONE) \
a1a75b35 92 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
cd9ad58d
DM
93} while (0)
94
95#define esp_log_reconnect(f, a...) \
96do { if (esp_debug & ESP_DEBUG_RECONNECT) \
a1a75b35 97 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
cd9ad58d
DM
98} while (0)
99
100#define esp_log_autosense(f, a...) \
101do { if (esp_debug & ESP_DEBUG_AUTOSENSE) \
a1a75b35 102 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
cd9ad58d
DM
103} while (0)
104
1af6f603
HR
105#define esp_log_event(f, a...) \
106do { if (esp_debug & ESP_DEBUG_EVENT) \
107 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
108} while (0)
109
110#define esp_log_command(f, a...) \
111do { if (esp_debug & ESP_DEBUG_COMMAND) \
112 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
113} while (0)
114
cd9ad58d
DM
115#define esp_read8(REG) esp->ops->esp_read8(esp, REG)
116#define esp_write8(VAL,REG) esp->ops->esp_write8(esp, VAL, REG)
117
118static void esp_log_fill_regs(struct esp *esp,
119 struct esp_event_ent *p)
120{
121 p->sreg = esp->sreg;
122 p->seqreg = esp->seqreg;
123 p->sreg2 = esp->sreg2;
124 p->ireg = esp->ireg;
125 p->select_state = esp->select_state;
126 p->event = esp->event;
127}
128
129void scsi_esp_cmd(struct esp *esp, u8 val)
130{
131 struct esp_event_ent *p;
132 int idx = esp->esp_event_cur;
133
134 p = &esp->esp_event_log[idx];
135 p->type = ESP_EVENT_TYPE_CMD;
136 p->val = val;
137 esp_log_fill_regs(esp, p);
138
139 esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
140
1af6f603 141 esp_log_command("cmd[%02x]\n", val);
cd9ad58d
DM
142 esp_write8(val, ESP_CMD);
143}
144EXPORT_SYMBOL(scsi_esp_cmd);
145
3170866f
HR
146static void esp_send_dma_cmd(struct esp *esp, int len, int max_len, int cmd)
147{
148 if (esp->flags & ESP_FLAG_USE_FIFO) {
149 int i;
150
151 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
152 for (i = 0; i < len; i++)
153 esp_write8(esp->command_block[i], ESP_FDATA);
154 scsi_esp_cmd(esp, cmd);
155 } else {
156 if (esp->rev == FASHME)
157 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
158 cmd |= ESP_CMD_DMA;
159 esp->ops->send_dma_cmd(esp, esp->command_block_dma,
160 len, max_len, 0, cmd);
161 }
162}
163
cd9ad58d
DM
164static void esp_event(struct esp *esp, u8 val)
165{
166 struct esp_event_ent *p;
167 int idx = esp->esp_event_cur;
168
169 p = &esp->esp_event_log[idx];
170 p->type = ESP_EVENT_TYPE_EVENT;
171 p->val = val;
172 esp_log_fill_regs(esp, p);
173
174 esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
175
176 esp->event = val;
177}
178
179static void esp_dump_cmd_log(struct esp *esp)
180{
181 int idx = esp->esp_event_cur;
182 int stop = idx;
183
a1a75b35 184 shost_printk(KERN_INFO, esp->host, "Dumping command log\n");
cd9ad58d
DM
185 do {
186 struct esp_event_ent *p = &esp->esp_event_log[idx];
187
a1a75b35
HR
188 shost_printk(KERN_INFO, esp->host,
189 "ent[%d] %s val[%02x] sreg[%02x] seqreg[%02x] "
190 "sreg2[%02x] ireg[%02x] ss[%02x] event[%02x]\n",
191 idx,
192 p->type == ESP_EVENT_TYPE_CMD ? "CMD" : "EVENT",
193 p->val, p->sreg, p->seqreg,
194 p->sreg2, p->ireg, p->select_state, p->event);
cd9ad58d
DM
195
196 idx = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
197 } while (idx != stop);
198}
199
200static void esp_flush_fifo(struct esp *esp)
201{
202 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
203 if (esp->rev == ESP236) {
204 int lim = 1000;
205
206 while (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES) {
207 if (--lim == 0) {
a1a75b35
HR
208 shost_printk(KERN_ALERT, esp->host,
209 "ESP_FF_BYTES will not clear!\n");
cd9ad58d
DM
210 break;
211 }
212 udelay(1);
213 }
214 }
215}
216
217static void hme_read_fifo(struct esp *esp)
218{
219 int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
220 int idx = 0;
221
222 while (fcnt--) {
223 esp->fifo[idx++] = esp_read8(ESP_FDATA);
224 esp->fifo[idx++] = esp_read8(ESP_FDATA);
225 }
226 if (esp->sreg2 & ESP_STAT2_F1BYTE) {
227 esp_write8(0, ESP_FDATA);
228 esp->fifo[idx++] = esp_read8(ESP_FDATA);
229 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
230 }
231 esp->fifo_cnt = idx;
232}
233
234static void esp_set_all_config3(struct esp *esp, u8 val)
235{
236 int i;
237
238 for (i = 0; i < ESP_MAX_TARGET; i++)
239 esp->target[i].esp_config3 = val;
240}
241
242/* Reset the ESP chip, _not_ the SCSI bus. */
243static void esp_reset_esp(struct esp *esp)
244{
245 u8 family_code, version;
246
247 /* Now reset the ESP chip */
248 scsi_esp_cmd(esp, ESP_CMD_RC);
249 scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
a793804f
DM
250 if (esp->rev == FAST)
251 esp_write8(ESP_CONFIG2_FENAB, ESP_CFG2);
cd9ad58d
DM
252 scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
253
cd9ad58d
DM
254 /* This is the only point at which it is reliable to read
255 * the ID-code for a fast ESP chip variants.
256 */
257 esp->max_period = ((35 * esp->ccycle) / 1000);
258 if (esp->rev == FAST) {
259 version = esp_read8(ESP_UID);
260 family_code = (version & 0xf8) >> 3;
261 if (family_code == 0x02)
262 esp->rev = FAS236;
263 else if (family_code == 0x0a)
264 esp->rev = FASHME; /* Version is usually '5'. */
265 else
266 esp->rev = FAS100A;
267 esp->min_period = ((4 * esp->ccycle) / 1000);
268 } else {
269 esp->min_period = ((5 * esp->ccycle) / 1000);
270 }
eeea2f9c
HR
271 if (esp->rev == FAS236) {
272 /*
273 * The AM53c974 chip returns the same ID as FAS236;
274 * try to configure glitch eater.
275 */
276 u8 config4 = ESP_CONFIG4_GE1;
277 esp_write8(config4, ESP_CFG4);
278 config4 = esp_read8(ESP_CFG4);
279 if (config4 & ESP_CONFIG4_GE1) {
280 esp->rev = PCSCSI;
281 esp_write8(esp->config4, ESP_CFG4);
282 }
283 }
cd9ad58d
DM
284 esp->max_period = (esp->max_period + 3)>>2;
285 esp->min_period = (esp->min_period + 3)>>2;
286
287 esp_write8(esp->config1, ESP_CFG1);
288 switch (esp->rev) {
289 case ESP100:
290 /* nothing to do */
291 break;
292
293 case ESP100A:
294 esp_write8(esp->config2, ESP_CFG2);
295 break;
296
297 case ESP236:
298 /* Slow 236 */
299 esp_write8(esp->config2, ESP_CFG2);
300 esp->prev_cfg3 = esp->target[0].esp_config3;
301 esp_write8(esp->prev_cfg3, ESP_CFG3);
302 break;
303
304 case FASHME:
305 esp->config2 |= (ESP_CONFIG2_HME32 | ESP_CONFIG2_HMEFENAB);
306 /* fallthrough... */
307
308 case FAS236:
eeea2f9c
HR
309 case PCSCSI:
310 /* Fast 236, AM53c974 or HME */
cd9ad58d
DM
311 esp_write8(esp->config2, ESP_CFG2);
312 if (esp->rev == FASHME) {
313 u8 cfg3 = esp->target[0].esp_config3;
314
315 cfg3 |= ESP_CONFIG3_FCLOCK | ESP_CONFIG3_OBPUSH;
316 if (esp->scsi_id >= 8)
317 cfg3 |= ESP_CONFIG3_IDBIT3;
318 esp_set_all_config3(esp, cfg3);
319 } else {
320 u32 cfg3 = esp->target[0].esp_config3;
321
322 cfg3 |= ESP_CONFIG3_FCLK;
323 esp_set_all_config3(esp, cfg3);
324 }
325 esp->prev_cfg3 = esp->target[0].esp_config3;
326 esp_write8(esp->prev_cfg3, ESP_CFG3);
327 if (esp->rev == FASHME) {
328 esp->radelay = 80;
329 } else {
330 if (esp->flags & ESP_FLAG_DIFFERENTIAL)
331 esp->radelay = 0;
332 else
333 esp->radelay = 96;
334 }
335 break;
336
337 case FAS100A:
338 /* Fast 100a */
339 esp_write8(esp->config2, ESP_CFG2);
340 esp_set_all_config3(esp,
341 (esp->target[0].esp_config3 |
342 ESP_CONFIG3_FCLOCK));
343 esp->prev_cfg3 = esp->target[0].esp_config3;
344 esp_write8(esp->prev_cfg3, ESP_CFG3);
345 esp->radelay = 32;
346 break;
347
348 default:
349 break;
350 }
351
a793804f
DM
352 /* Reload the configuration registers */
353 esp_write8(esp->cfact, ESP_CFACT);
354
355 esp->prev_stp = 0;
356 esp_write8(esp->prev_stp, ESP_STP);
357
358 esp->prev_soff = 0;
359 esp_write8(esp->prev_soff, ESP_SOFF);
360
361 esp_write8(esp->neg_defp, ESP_TIMEO);
362
cd9ad58d
DM
363 /* Eat any bitrot in the chip */
364 esp_read8(ESP_INTRPT);
365 udelay(100);
366}
367
368static void esp_map_dma(struct esp *esp, struct scsi_cmnd *cmd)
369{
370 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
4c2baaaf 371 struct scatterlist *sg = scsi_sglist(cmd);
cd9ad58d
DM
372 int dir = cmd->sc_data_direction;
373 int total, i;
374
375 if (dir == DMA_NONE)
376 return;
377
4c2baaaf 378 spriv->u.num_sg = esp->ops->map_sg(esp, sg, scsi_sg_count(cmd), dir);
cd9ad58d
DM
379 spriv->cur_residue = sg_dma_len(sg);
380 spriv->cur_sg = sg;
381
382 total = 0;
383 for (i = 0; i < spriv->u.num_sg; i++)
384 total += sg_dma_len(&sg[i]);
385 spriv->tot_residue = total;
386}
387
388static dma_addr_t esp_cur_dma_addr(struct esp_cmd_entry *ent,
389 struct scsi_cmnd *cmd)
390{
391 struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
392
393 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
394 return ent->sense_dma +
395 (ent->sense_ptr - cmd->sense_buffer);
396 }
397
398 return sg_dma_address(p->cur_sg) +
399 (sg_dma_len(p->cur_sg) -
400 p->cur_residue);
401}
402
403static unsigned int esp_cur_dma_len(struct esp_cmd_entry *ent,
404 struct scsi_cmnd *cmd)
405{
406 struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
407
408 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
409 return SCSI_SENSE_BUFFERSIZE -
410 (ent->sense_ptr - cmd->sense_buffer);
411 }
412 return p->cur_residue;
413}
414
415static void esp_advance_dma(struct esp *esp, struct esp_cmd_entry *ent,
416 struct scsi_cmnd *cmd, unsigned int len)
417{
418 struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
419
420 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
421 ent->sense_ptr += len;
422 return;
423 }
424
425 p->cur_residue -= len;
426 p->tot_residue -= len;
427 if (p->cur_residue < 0 || p->tot_residue < 0) {
a1a75b35
HR
428 shost_printk(KERN_ERR, esp->host,
429 "Data transfer overflow.\n");
430 shost_printk(KERN_ERR, esp->host,
431 "cur_residue[%d] tot_residue[%d] len[%u]\n",
432 p->cur_residue, p->tot_residue, len);
cd9ad58d
DM
433 p->cur_residue = 0;
434 p->tot_residue = 0;
435 }
436 if (!p->cur_residue && p->tot_residue) {
437 p->cur_sg++;
438 p->cur_residue = sg_dma_len(p->cur_sg);
439 }
440}
441
442static void esp_unmap_dma(struct esp *esp, struct scsi_cmnd *cmd)
443{
444 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
445 int dir = cmd->sc_data_direction;
446
447 if (dir == DMA_NONE)
448 return;
449
4c2baaaf 450 esp->ops->unmap_sg(esp, scsi_sglist(cmd), spriv->u.num_sg, dir);
cd9ad58d
DM
451}
452
453static void esp_save_pointers(struct esp *esp, struct esp_cmd_entry *ent)
454{
455 struct scsi_cmnd *cmd = ent->cmd;
456 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
457
458 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
459 ent->saved_sense_ptr = ent->sense_ptr;
460 return;
461 }
462 ent->saved_cur_residue = spriv->cur_residue;
463 ent->saved_cur_sg = spriv->cur_sg;
464 ent->saved_tot_residue = spriv->tot_residue;
465}
466
467static void esp_restore_pointers(struct esp *esp, struct esp_cmd_entry *ent)
468{
469 struct scsi_cmnd *cmd = ent->cmd;
470 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
471
472 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
473 ent->sense_ptr = ent->saved_sense_ptr;
474 return;
475 }
476 spriv->cur_residue = ent->saved_cur_residue;
477 spriv->cur_sg = ent->saved_cur_sg;
478 spriv->tot_residue = ent->saved_tot_residue;
479}
480
481static void esp_check_command_len(struct esp *esp, struct scsi_cmnd *cmd)
482{
483 if (cmd->cmd_len == 6 ||
484 cmd->cmd_len == 10 ||
485 cmd->cmd_len == 12) {
486 esp->flags &= ~ESP_FLAG_DOING_SLOWCMD;
487 } else {
488 esp->flags |= ESP_FLAG_DOING_SLOWCMD;
489 }
490}
491
492static void esp_write_tgt_config3(struct esp *esp, int tgt)
493{
494 if (esp->rev > ESP100A) {
495 u8 val = esp->target[tgt].esp_config3;
496
497 if (val != esp->prev_cfg3) {
498 esp->prev_cfg3 = val;
499 esp_write8(val, ESP_CFG3);
500 }
501 }
502}
503
504static void esp_write_tgt_sync(struct esp *esp, int tgt)
505{
506 u8 off = esp->target[tgt].esp_offset;
507 u8 per = esp->target[tgt].esp_period;
508
509 if (off != esp->prev_soff) {
510 esp->prev_soff = off;
511 esp_write8(off, ESP_SOFF);
512 }
513 if (per != esp->prev_stp) {
514 esp->prev_stp = per;
515 esp_write8(per, ESP_STP);
516 }
517}
518
519static u32 esp_dma_length_limit(struct esp *esp, u32 dma_addr, u32 dma_len)
520{
521 if (esp->rev == FASHME) {
522 /* Arbitrary segment boundaries, 24-bit counts. */
523 if (dma_len > (1U << 24))
524 dma_len = (1U << 24);
525 } else {
526 u32 base, end;
527
528 /* ESP chip limits other variants by 16-bits of transfer
529 * count. Actually on FAS100A and FAS236 we could get
530 * 24-bits of transfer count by enabling ESP_CONFIG2_FENAB
531 * in the ESP_CFG2 register but that causes other unwanted
532 * changes so we don't use it currently.
533 */
534 if (dma_len > (1U << 16))
535 dma_len = (1U << 16);
536
537 /* All of the DMA variants hooked up to these chips
538 * cannot handle crossing a 24-bit address boundary.
539 */
540 base = dma_addr & ((1U << 24) - 1U);
541 end = base + dma_len;
542 if (end > (1U << 24))
543 end = (1U <<24);
544 dma_len = end - base;
545 }
546 return dma_len;
547}
548
549static int esp_need_to_nego_wide(struct esp_target_data *tp)
550{
551 struct scsi_target *target = tp->starget;
552
553 return spi_width(target) != tp->nego_goal_width;
554}
555
556static int esp_need_to_nego_sync(struct esp_target_data *tp)
557{
558 struct scsi_target *target = tp->starget;
559
560 /* When offset is zero, period is "don't care". */
561 if (!spi_offset(target) && !tp->nego_goal_offset)
562 return 0;
563
564 if (spi_offset(target) == tp->nego_goal_offset &&
565 spi_period(target) == tp->nego_goal_period)
566 return 0;
567
568 return 1;
569}
570
571static int esp_alloc_lun_tag(struct esp_cmd_entry *ent,
572 struct esp_lun_data *lp)
573{
21af8107 574 if (!ent->orig_tag[0]) {
cd9ad58d
DM
575 /* Non-tagged, slot already taken? */
576 if (lp->non_tagged_cmd)
577 return -EBUSY;
578
579 if (lp->hold) {
580 /* We are being held by active tagged
581 * commands.
582 */
583 if (lp->num_tagged)
584 return -EBUSY;
585
586 /* Tagged commands completed, we can unplug
587 * the queue and run this untagged command.
588 */
589 lp->hold = 0;
590 } else if (lp->num_tagged) {
591 /* Plug the queue until num_tagged decreases
592 * to zero in esp_free_lun_tag.
593 */
594 lp->hold = 1;
595 return -EBUSY;
596 }
597
598 lp->non_tagged_cmd = ent;
599 return 0;
600 } else {
601 /* Tagged command, see if blocked by a
602 * non-tagged one.
603 */
604 if (lp->non_tagged_cmd || lp->hold)
605 return -EBUSY;
606 }
607
21af8107 608 BUG_ON(lp->tagged_cmds[ent->orig_tag[1]]);
cd9ad58d 609
21af8107 610 lp->tagged_cmds[ent->orig_tag[1]] = ent;
cd9ad58d
DM
611 lp->num_tagged++;
612
613 return 0;
614}
615
616static void esp_free_lun_tag(struct esp_cmd_entry *ent,
617 struct esp_lun_data *lp)
618{
21af8107
DM
619 if (ent->orig_tag[0]) {
620 BUG_ON(lp->tagged_cmds[ent->orig_tag[1]] != ent);
621 lp->tagged_cmds[ent->orig_tag[1]] = NULL;
cd9ad58d
DM
622 lp->num_tagged--;
623 } else {
624 BUG_ON(lp->non_tagged_cmd != ent);
625 lp->non_tagged_cmd = NULL;
626 }
627}
628
629/* When a contingent allegiance conditon is created, we force feed a
630 * REQUEST_SENSE command to the device to fetch the sense data. I
631 * tried many other schemes, relying on the scsi error handling layer
632 * to send out the REQUEST_SENSE automatically, but this was difficult
633 * to get right especially in the presence of applications like smartd
634 * which use SG_IO to send out their own REQUEST_SENSE commands.
635 */
636static void esp_autosense(struct esp *esp, struct esp_cmd_entry *ent)
637{
638 struct scsi_cmnd *cmd = ent->cmd;
639 struct scsi_device *dev = cmd->device;
640 int tgt, lun;
641 u8 *p, val;
642
643 tgt = dev->id;
644 lun = dev->lun;
645
646
647 if (!ent->sense_ptr) {
a1a75b35
HR
648 esp_log_autosense("Doing auto-sense for tgt[%d] lun[%d]\n",
649 tgt, lun);
cd9ad58d
DM
650
651 ent->sense_ptr = cmd->sense_buffer;
652 ent->sense_dma = esp->ops->map_single(esp,
653 ent->sense_ptr,
654 SCSI_SENSE_BUFFERSIZE,
655 DMA_FROM_DEVICE);
656 }
657 ent->saved_sense_ptr = ent->sense_ptr;
658
659 esp->active_cmd = ent;
660
661 p = esp->command_block;
662 esp->msg_out_len = 0;
663
664 *p++ = IDENTIFY(0, lun);
665 *p++ = REQUEST_SENSE;
666 *p++ = ((dev->scsi_level <= SCSI_2) ?
667 (lun << 5) : 0);
668 *p++ = 0;
669 *p++ = 0;
670 *p++ = SCSI_SENSE_BUFFERSIZE;
671 *p++ = 0;
672
673 esp->select_state = ESP_SELECT_BASIC;
674
675 val = tgt;
676 if (esp->rev == FASHME)
677 val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT;
678 esp_write8(val, ESP_BUSID);
679
680 esp_write_tgt_sync(esp, tgt);
681 esp_write_tgt_config3(esp, tgt);
682
683 val = (p - esp->command_block);
684
3170866f 685 esp_send_dma_cmd(esp, val, 16, ESP_CMD_SELA);
cd9ad58d
DM
686}
687
688static struct esp_cmd_entry *find_and_prep_issuable_command(struct esp *esp)
689{
690 struct esp_cmd_entry *ent;
691
692 list_for_each_entry(ent, &esp->queued_cmds, list) {
693 struct scsi_cmnd *cmd = ent->cmd;
694 struct scsi_device *dev = cmd->device;
695 struct esp_lun_data *lp = dev->hostdata;
696
697 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
698 ent->tag[0] = 0;
699 ent->tag[1] = 0;
700 return ent;
701 }
702
50668633 703 if (!spi_populate_tag_msg(&ent->tag[0], cmd)) {
cd9ad58d
DM
704 ent->tag[0] = 0;
705 ent->tag[1] = 0;
706 }
21af8107
DM
707 ent->orig_tag[0] = ent->tag[0];
708 ent->orig_tag[1] = ent->tag[1];
cd9ad58d
DM
709
710 if (esp_alloc_lun_tag(ent, lp) < 0)
711 continue;
712
713 return ent;
714 }
715
716 return NULL;
717}
718
719static void esp_maybe_execute_command(struct esp *esp)
720{
721 struct esp_target_data *tp;
722 struct esp_lun_data *lp;
723 struct scsi_device *dev;
724 struct scsi_cmnd *cmd;
725 struct esp_cmd_entry *ent;
726 int tgt, lun, i;
727 u32 val, start_cmd;
728 u8 *p;
729
730 if (esp->active_cmd ||
731 (esp->flags & ESP_FLAG_RESETTING))
732 return;
733
734 ent = find_and_prep_issuable_command(esp);
735 if (!ent)
736 return;
737
738 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
739 esp_autosense(esp, ent);
740 return;
741 }
742
743 cmd = ent->cmd;
744 dev = cmd->device;
745 tgt = dev->id;
746 lun = dev->lun;
747 tp = &esp->target[tgt];
748 lp = dev->hostdata;
749
63ce2499 750 list_move(&ent->list, &esp->active_cmds);
cd9ad58d
DM
751
752 esp->active_cmd = ent;
753
754 esp_map_dma(esp, cmd);
755 esp_save_pointers(esp, ent);
756
757 esp_check_command_len(esp, cmd);
758
759 p = esp->command_block;
760
761 esp->msg_out_len = 0;
762 if (tp->flags & ESP_TGT_CHECK_NEGO) {
763 /* Need to negotiate. If the target is broken
764 * go for synchronous transfers and non-wide.
765 */
766 if (tp->flags & ESP_TGT_BROKEN) {
767 tp->flags &= ~ESP_TGT_DISCONNECT;
768 tp->nego_goal_period = 0;
769 tp->nego_goal_offset = 0;
770 tp->nego_goal_width = 0;
771 tp->nego_goal_tags = 0;
772 }
773
774 /* If the settings are not changing, skip this. */
775 if (spi_width(tp->starget) == tp->nego_goal_width &&
776 spi_period(tp->starget) == tp->nego_goal_period &&
777 spi_offset(tp->starget) == tp->nego_goal_offset) {
778 tp->flags &= ~ESP_TGT_CHECK_NEGO;
779 goto build_identify;
780 }
781
782 if (esp->rev == FASHME && esp_need_to_nego_wide(tp)) {
783 esp->msg_out_len =
784 spi_populate_width_msg(&esp->msg_out[0],
785 (tp->nego_goal_width ?
786 1 : 0));
787 tp->flags |= ESP_TGT_NEGO_WIDE;
788 } else if (esp_need_to_nego_sync(tp)) {
789 esp->msg_out_len =
790 spi_populate_sync_msg(&esp->msg_out[0],
791 tp->nego_goal_period,
792 tp->nego_goal_offset);
793 tp->flags |= ESP_TGT_NEGO_SYNC;
794 } else {
795 tp->flags &= ~ESP_TGT_CHECK_NEGO;
796 }
797
798 /* Process it like a slow command. */
799 if (tp->flags & (ESP_TGT_NEGO_WIDE | ESP_TGT_NEGO_SYNC))
800 esp->flags |= ESP_FLAG_DOING_SLOWCMD;
801 }
802
803build_identify:
804 /* If we don't have a lun-data struct yet, we're probing
805 * so do not disconnect. Also, do not disconnect unless
806 * we have a tag on this command.
807 */
808 if (lp && (tp->flags & ESP_TGT_DISCONNECT) && ent->tag[0])
809 *p++ = IDENTIFY(1, lun);
810 else
811 *p++ = IDENTIFY(0, lun);
812
813 if (ent->tag[0] && esp->rev == ESP100) {
814 /* ESP100 lacks select w/atn3 command, use select
815 * and stop instead.
816 */
817 esp->flags |= ESP_FLAG_DOING_SLOWCMD;
818 }
819
820 if (!(esp->flags & ESP_FLAG_DOING_SLOWCMD)) {
3170866f 821 start_cmd = ESP_CMD_SELA;
cd9ad58d
DM
822 if (ent->tag[0]) {
823 *p++ = ent->tag[0];
824 *p++ = ent->tag[1];
825
3170866f 826 start_cmd = ESP_CMD_SA3;
cd9ad58d
DM
827 }
828
829 for (i = 0; i < cmd->cmd_len; i++)
830 *p++ = cmd->cmnd[i];
831
832 esp->select_state = ESP_SELECT_BASIC;
833 } else {
834 esp->cmd_bytes_left = cmd->cmd_len;
835 esp->cmd_bytes_ptr = &cmd->cmnd[0];
836
837 if (ent->tag[0]) {
838 for (i = esp->msg_out_len - 1;
839 i >= 0; i--)
840 esp->msg_out[i + 2] = esp->msg_out[i];
841 esp->msg_out[0] = ent->tag[0];
842 esp->msg_out[1] = ent->tag[1];
843 esp->msg_out_len += 2;
844 }
845
3170866f 846 start_cmd = ESP_CMD_SELAS;
cd9ad58d
DM
847 esp->select_state = ESP_SELECT_MSGOUT;
848 }
849 val = tgt;
850 if (esp->rev == FASHME)
851 val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT;
852 esp_write8(val, ESP_BUSID);
853
854 esp_write_tgt_sync(esp, tgt);
855 esp_write_tgt_config3(esp, tgt);
856
857 val = (p - esp->command_block);
858
859 if (esp_debug & ESP_DEBUG_SCSICMD) {
860 printk("ESP: tgt[%d] lun[%d] scsi_cmd [ ", tgt, lun);
861 for (i = 0; i < cmd->cmd_len; i++)
862 printk("%02x ", cmd->cmnd[i]);
863 printk("]\n");
864 }
865
3170866f 866 esp_send_dma_cmd(esp, val, 16, start_cmd);
cd9ad58d
DM
867}
868
869static struct esp_cmd_entry *esp_get_ent(struct esp *esp)
870{
871 struct list_head *head = &esp->esp_cmd_pool;
872 struct esp_cmd_entry *ret;
873
874 if (list_empty(head)) {
875 ret = kzalloc(sizeof(struct esp_cmd_entry), GFP_ATOMIC);
876 } else {
877 ret = list_entry(head->next, struct esp_cmd_entry, list);
878 list_del(&ret->list);
879 memset(ret, 0, sizeof(*ret));
880 }
881 return ret;
882}
883
884static void esp_put_ent(struct esp *esp, struct esp_cmd_entry *ent)
885{
886 list_add(&ent->list, &esp->esp_cmd_pool);
887}
888
889static void esp_cmd_is_done(struct esp *esp, struct esp_cmd_entry *ent,
890 struct scsi_cmnd *cmd, unsigned int result)
891{
892 struct scsi_device *dev = cmd->device;
893 int tgt = dev->id;
894 int lun = dev->lun;
895
896 esp->active_cmd = NULL;
897 esp_unmap_dma(esp, cmd);
898 esp_free_lun_tag(ent, dev->hostdata);
899 cmd->result = result;
900
901 if (ent->eh_done) {
902 complete(ent->eh_done);
903 ent->eh_done = NULL;
904 }
905
906 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
907 esp->ops->unmap_single(esp, ent->sense_dma,
908 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
909 ent->sense_ptr = NULL;
910
911 /* Restore the message/status bytes to what we actually
912 * saw originally. Also, report that we are providing
913 * the sense data.
914 */
915 cmd->result = ((DRIVER_SENSE << 24) |
916 (DID_OK << 16) |
917 (COMMAND_COMPLETE << 8) |
918 (SAM_STAT_CHECK_CONDITION << 0));
919
920 ent->flags &= ~ESP_CMD_FLAG_AUTOSENSE;
921 if (esp_debug & ESP_DEBUG_AUTOSENSE) {
922 int i;
923
924 printk("esp%d: tgt[%d] lun[%d] AUTO SENSE[ ",
925 esp->host->unique_id, tgt, lun);
926 for (i = 0; i < 18; i++)
927 printk("%02x ", cmd->sense_buffer[i]);
928 printk("]\n");
929 }
930 }
931
932 cmd->scsi_done(cmd);
933
934 list_del(&ent->list);
935 esp_put_ent(esp, ent);
936
937 esp_maybe_execute_command(esp);
938}
939
940static unsigned int compose_result(unsigned int status, unsigned int message,
941 unsigned int driver_code)
942{
943 return (status | (message << 8) | (driver_code << 16));
944}
945
946static void esp_event_queue_full(struct esp *esp, struct esp_cmd_entry *ent)
947{
948 struct scsi_device *dev = ent->cmd->device;
949 struct esp_lun_data *lp = dev->hostdata;
950
951 scsi_track_queue_full(dev, lp->num_tagged - 1);
952}
953
f281233d 954static int esp_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
cd9ad58d
DM
955{
956 struct scsi_device *dev = cmd->device;
2b14ec78 957 struct esp *esp = shost_priv(dev->host);
cd9ad58d
DM
958 struct esp_cmd_priv *spriv;
959 struct esp_cmd_entry *ent;
960
961 ent = esp_get_ent(esp);
962 if (!ent)
963 return SCSI_MLQUEUE_HOST_BUSY;
964
965 ent->cmd = cmd;
966
967 cmd->scsi_done = done;
968
969 spriv = ESP_CMD_PRIV(cmd);
970 spriv->u.dma_addr = ~(dma_addr_t)0x0;
971
972 list_add_tail(&ent->list, &esp->queued_cmds);
973
974 esp_maybe_execute_command(esp);
975
976 return 0;
977}
978
f281233d
JG
979static DEF_SCSI_QCMD(esp_queuecommand)
980
cd9ad58d
DM
981static int esp_check_gross_error(struct esp *esp)
982{
983 if (esp->sreg & ESP_STAT_SPAM) {
984 /* Gross Error, could be one of:
985 * - top of fifo overwritten
986 * - top of command register overwritten
987 * - DMA programmed with wrong direction
988 * - improper phase change
989 */
a1a75b35
HR
990 shost_printk(KERN_ERR, esp->host,
991 "Gross error sreg[%02x]\n", esp->sreg);
cd9ad58d
DM
992 /* XXX Reset the chip. XXX */
993 return 1;
994 }
995 return 0;
996}
997
998static int esp_check_spur_intr(struct esp *esp)
999{
1000 switch (esp->rev) {
1001 case ESP100:
1002 case ESP100A:
1003 /* The interrupt pending bit of the status register cannot
1004 * be trusted on these revisions.
1005 */
1006 esp->sreg &= ~ESP_STAT_INTR;
1007 break;
1008
1009 default:
1010 if (!(esp->sreg & ESP_STAT_INTR)) {
cd9ad58d
DM
1011 if (esp->ireg & ESP_INTR_SR)
1012 return 1;
1013
1014 /* If the DMA is indicating interrupt pending and the
1015 * ESP is not, the only possibility is a DMA error.
1016 */
1017 if (!esp->ops->dma_error(esp)) {
a1a75b35
HR
1018 shost_printk(KERN_ERR, esp->host,
1019 "Spurious irq, sreg=%02x.\n",
1020 esp->sreg);
cd9ad58d
DM
1021 return -1;
1022 }
1023
a1a75b35 1024 shost_printk(KERN_ERR, esp->host, "DMA error\n");
cd9ad58d
DM
1025
1026 /* XXX Reset the chip. XXX */
1027 return -1;
1028 }
1029 break;
1030 }
1031
1032 return 0;
1033}
1034
1035static void esp_schedule_reset(struct esp *esp)
1036{
a1a75b35 1037 esp_log_reset("esp_schedule_reset() from %pf\n",
cd9ad58d
DM
1038 __builtin_return_address(0));
1039 esp->flags |= ESP_FLAG_RESETTING;
1040 esp_event(esp, ESP_EVENT_RESET);
1041}
1042
1043/* In order to avoid having to add a special half-reconnected state
1044 * into the driver we just sit here and poll through the rest of
1045 * the reselection process to get the tag message bytes.
1046 */
1047static struct esp_cmd_entry *esp_reconnect_with_tag(struct esp *esp,
1048 struct esp_lun_data *lp)
1049{
1050 struct esp_cmd_entry *ent;
1051 int i;
1052
1053 if (!lp->num_tagged) {
a1a75b35
HR
1054 shost_printk(KERN_ERR, esp->host,
1055 "Reconnect w/num_tagged==0\n");
cd9ad58d
DM
1056 return NULL;
1057 }
1058
a1a75b35 1059 esp_log_reconnect("reconnect tag, ");
cd9ad58d
DM
1060
1061 for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) {
1062 if (esp->ops->irq_pending(esp))
1063 break;
1064 }
1065 if (i == ESP_QUICKIRQ_LIMIT) {
a1a75b35
HR
1066 shost_printk(KERN_ERR, esp->host,
1067 "Reconnect IRQ1 timeout\n");
cd9ad58d
DM
1068 return NULL;
1069 }
1070
1071 esp->sreg = esp_read8(ESP_STATUS);
1072 esp->ireg = esp_read8(ESP_INTRPT);
1073
1074 esp_log_reconnect("IRQ(%d:%x:%x), ",
1075 i, esp->ireg, esp->sreg);
1076
1077 if (esp->ireg & ESP_INTR_DC) {
a1a75b35
HR
1078 shost_printk(KERN_ERR, esp->host,
1079 "Reconnect, got disconnect.\n");
cd9ad58d
DM
1080 return NULL;
1081 }
1082
1083 if ((esp->sreg & ESP_STAT_PMASK) != ESP_MIP) {
a1a75b35
HR
1084 shost_printk(KERN_ERR, esp->host,
1085 "Reconnect, not MIP sreg[%02x].\n", esp->sreg);
cd9ad58d
DM
1086 return NULL;
1087 }
1088
1089 /* DMA in the tag bytes... */
1090 esp->command_block[0] = 0xff;
1091 esp->command_block[1] = 0xff;
1092 esp->ops->send_dma_cmd(esp, esp->command_block_dma,
1093 2, 2, 1, ESP_CMD_DMA | ESP_CMD_TI);
1094
70f23fd6 1095 /* ACK the message. */
cd9ad58d
DM
1096 scsi_esp_cmd(esp, ESP_CMD_MOK);
1097
1098 for (i = 0; i < ESP_RESELECT_TAG_LIMIT; i++) {
1099 if (esp->ops->irq_pending(esp)) {
1100 esp->sreg = esp_read8(ESP_STATUS);
1101 esp->ireg = esp_read8(ESP_INTRPT);
1102 if (esp->ireg & ESP_INTR_FDONE)
1103 break;
1104 }
1105 udelay(1);
1106 }
1107 if (i == ESP_RESELECT_TAG_LIMIT) {
a1a75b35 1108 shost_printk(KERN_ERR, esp->host, "Reconnect IRQ2 timeout\n");
cd9ad58d
DM
1109 return NULL;
1110 }
1111 esp->ops->dma_drain(esp);
1112 esp->ops->dma_invalidate(esp);
1113
1114 esp_log_reconnect("IRQ2(%d:%x:%x) tag[%x:%x]\n",
1115 i, esp->ireg, esp->sreg,
1116 esp->command_block[0],
1117 esp->command_block[1]);
1118
1119 if (esp->command_block[0] < SIMPLE_QUEUE_TAG ||
1120 esp->command_block[0] > ORDERED_QUEUE_TAG) {
a1a75b35
HR
1121 shost_printk(KERN_ERR, esp->host,
1122 "Reconnect, bad tag type %02x.\n",
1123 esp->command_block[0]);
cd9ad58d
DM
1124 return NULL;
1125 }
1126
1127 ent = lp->tagged_cmds[esp->command_block[1]];
1128 if (!ent) {
a1a75b35
HR
1129 shost_printk(KERN_ERR, esp->host,
1130 "Reconnect, no entry for tag %02x.\n",
1131 esp->command_block[1]);
cd9ad58d
DM
1132 return NULL;
1133 }
1134
1135 return ent;
1136}
1137
1138static int esp_reconnect(struct esp *esp)
1139{
1140 struct esp_cmd_entry *ent;
1141 struct esp_target_data *tp;
1142 struct esp_lun_data *lp;
1143 struct scsi_device *dev;
1144 int target, lun;
1145
1146 BUG_ON(esp->active_cmd);
1147 if (esp->rev == FASHME) {
1148 /* FASHME puts the target and lun numbers directly
1149 * into the fifo.
1150 */
1151 target = esp->fifo[0];
1152 lun = esp->fifo[1] & 0x7;
1153 } else {
1154 u8 bits = esp_read8(ESP_FDATA);
1155
1156 /* Older chips put the lun directly into the fifo, but
1157 * the target is given as a sample of the arbitration
1158 * lines on the bus at reselection time. So we should
1159 * see the ID of the ESP and the one reconnecting target
1160 * set in the bitmap.
1161 */
1162 if (!(bits & esp->scsi_id_mask))
1163 goto do_reset;
1164 bits &= ~esp->scsi_id_mask;
1165 if (!bits || (bits & (bits - 1)))
1166 goto do_reset;
1167
1168 target = ffs(bits) - 1;
1169 lun = (esp_read8(ESP_FDATA) & 0x7);
1170
1171 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1172 if (esp->rev == ESP100) {
1173 u8 ireg = esp_read8(ESP_INTRPT);
1174 /* This chip has a bug during reselection that can
1175 * cause a spurious illegal-command interrupt, which
1176 * we simply ACK here. Another possibility is a bus
1177 * reset so we must check for that.
1178 */
1179 if (ireg & ESP_INTR_SR)
1180 goto do_reset;
1181 }
1182 scsi_esp_cmd(esp, ESP_CMD_NULL);
1183 }
1184
1185 esp_write_tgt_sync(esp, target);
1186 esp_write_tgt_config3(esp, target);
1187
1188 scsi_esp_cmd(esp, ESP_CMD_MOK);
1189
1190 if (esp->rev == FASHME)
1191 esp_write8(target | ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT,
1192 ESP_BUSID);
1193
1194 tp = &esp->target[target];
1195 dev = __scsi_device_lookup_by_target(tp->starget, lun);
1196 if (!dev) {
a1a75b35
HR
1197 shost_printk(KERN_ERR, esp->host,
1198 "Reconnect, no lp tgt[%u] lun[%u]\n",
1199 target, lun);
cd9ad58d
DM
1200 goto do_reset;
1201 }
1202 lp = dev->hostdata;
1203
1204 ent = lp->non_tagged_cmd;
1205 if (!ent) {
1206 ent = esp_reconnect_with_tag(esp, lp);
1207 if (!ent)
1208 goto do_reset;
1209 }
1210
1211 esp->active_cmd = ent;
1212
1213 if (ent->flags & ESP_CMD_FLAG_ABORT) {
1214 esp->msg_out[0] = ABORT_TASK_SET;
1215 esp->msg_out_len = 1;
1216 scsi_esp_cmd(esp, ESP_CMD_SATN);
1217 }
1218
1219 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1220 esp_restore_pointers(esp, ent);
1221 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1222 return 1;
1223
1224do_reset:
1225 esp_schedule_reset(esp);
1226 return 0;
1227}
1228
1229static int esp_finish_select(struct esp *esp)
1230{
1231 struct esp_cmd_entry *ent;
1232 struct scsi_cmnd *cmd;
1233 u8 orig_select_state;
1234
1235 orig_select_state = esp->select_state;
1236
1237 /* No longer selecting. */
1238 esp->select_state = ESP_SELECT_NONE;
1239
1240 esp->seqreg = esp_read8(ESP_SSTEP) & ESP_STEP_VBITS;
1241 ent = esp->active_cmd;
1242 cmd = ent->cmd;
1243
1244 if (esp->ops->dma_error(esp)) {
1245 /* If we see a DMA error during or as a result of selection,
1246 * all bets are off.
1247 */
1248 esp_schedule_reset(esp);
1249 esp_cmd_is_done(esp, ent, cmd, (DID_ERROR << 16));
1250 return 0;
1251 }
1252
1253 esp->ops->dma_invalidate(esp);
1254
1255 if (esp->ireg == (ESP_INTR_RSEL | ESP_INTR_FDONE)) {
1256 struct esp_target_data *tp = &esp->target[cmd->device->id];
1257
1258 /* Carefully back out of the selection attempt. Release
1259 * resources (such as DMA mapping & TAG) and reset state (such
1260 * as message out and command delivery variables).
1261 */
1262 if (!(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) {
1263 esp_unmap_dma(esp, cmd);
1264 esp_free_lun_tag(ent, cmd->device->hostdata);
1265 tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_NEGO_WIDE);
1266 esp->flags &= ~ESP_FLAG_DOING_SLOWCMD;
1267 esp->cmd_bytes_ptr = NULL;
1268 esp->cmd_bytes_left = 0;
1269 } else {
1270 esp->ops->unmap_single(esp, ent->sense_dma,
1271 SCSI_SENSE_BUFFERSIZE,
1272 DMA_FROM_DEVICE);
1273 ent->sense_ptr = NULL;
1274 }
1275
1276 /* Now that the state is unwound properly, put back onto
1277 * the issue queue. This command is no longer active.
1278 */
63ce2499 1279 list_move(&ent->list, &esp->queued_cmds);
cd9ad58d
DM
1280 esp->active_cmd = NULL;
1281
1282 /* Return value ignored by caller, it directly invokes
1283 * esp_reconnect().
1284 */
1285 return 0;
1286 }
1287
1288 if (esp->ireg == ESP_INTR_DC) {
1289 struct scsi_device *dev = cmd->device;
1290
1291 /* Disconnect. Make sure we re-negotiate sync and
1292 * wide parameters if this target starts responding
1293 * again in the future.
1294 */
1295 esp->target[dev->id].flags |= ESP_TGT_CHECK_NEGO;
1296
1297 scsi_esp_cmd(esp, ESP_CMD_ESEL);
1298 esp_cmd_is_done(esp, ent, cmd, (DID_BAD_TARGET << 16));
1299 return 1;
1300 }
1301
1302 if (esp->ireg == (ESP_INTR_FDONE | ESP_INTR_BSERV)) {
1303 /* Selection successful. On pre-FAST chips we have
1304 * to do a NOP and possibly clean out the FIFO.
1305 */
1306 if (esp->rev <= ESP236) {
1307 int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
1308
1309 scsi_esp_cmd(esp, ESP_CMD_NULL);
1310
1311 if (!fcnt &&
1312 (!esp->prev_soff ||
1313 ((esp->sreg & ESP_STAT_PMASK) != ESP_DIP)))
1314 esp_flush_fifo(esp);
1315 }
1316
1317 /* If we are doing a slow command, negotiation, etc.
1318 * we'll do the right thing as we transition to the
1319 * next phase.
1320 */
1321 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1322 return 0;
1323 }
1324
a1a75b35
HR
1325 shost_printk(KERN_INFO, esp->host,
1326 "Unexpected selection completion ireg[%x]\n", esp->ireg);
cd9ad58d
DM
1327 esp_schedule_reset(esp);
1328 return 0;
1329}
1330
1331static int esp_data_bytes_sent(struct esp *esp, struct esp_cmd_entry *ent,
1332 struct scsi_cmnd *cmd)
1333{
1334 int fifo_cnt, ecount, bytes_sent, flush_fifo;
1335
1336 fifo_cnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
1337 if (esp->prev_cfg3 & ESP_CONFIG3_EWIDE)
1338 fifo_cnt <<= 1;
1339
1340 ecount = 0;
1341 if (!(esp->sreg & ESP_STAT_TCNT)) {
1342 ecount = ((unsigned int)esp_read8(ESP_TCLOW) |
1343 (((unsigned int)esp_read8(ESP_TCMED)) << 8));
1344 if (esp->rev == FASHME)
1345 ecount |= ((unsigned int)esp_read8(FAS_RLO)) << 16;
1346 }
1347
1348 bytes_sent = esp->data_dma_len;
1349 bytes_sent -= ecount;
1350
6df388f2
HR
1351 /*
1352 * The am53c974 has a DMA 'pecularity'. The doc states:
1353 * In some odd byte conditions, one residual byte will
1354 * be left in the SCSI FIFO, and the FIFO Flags will
1355 * never count to '0 '. When this happens, the residual
1356 * byte should be retrieved via PIO following completion
1357 * of the BLAST operation.
1358 */
1359 if (fifo_cnt == 1 && ent->flags & ESP_CMD_FLAG_RESIDUAL) {
1360 size_t count = 1;
1361 size_t offset = bytes_sent;
1362 u8 bval = esp_read8(ESP_FDATA);
1363
1364 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE)
1365 ent->sense_ptr[bytes_sent] = bval;
1366 else {
1367 struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
1368 u8 *ptr;
1369
1370 ptr = scsi_kmap_atomic_sg(p->cur_sg, p->u.num_sg,
1371 &offset, &count);
1372 if (likely(ptr)) {
1373 *(ptr + offset) = bval;
1374 scsi_kunmap_atomic_sg(ptr);
1375 }
1376 }
1377 bytes_sent += fifo_cnt;
1378 ent->flags &= ~ESP_CMD_FLAG_RESIDUAL;
1379 }
cd9ad58d
DM
1380 if (!(ent->flags & ESP_CMD_FLAG_WRITE))
1381 bytes_sent -= fifo_cnt;
1382
1383 flush_fifo = 0;
1384 if (!esp->prev_soff) {
1385 /* Synchronous data transfer, always flush fifo. */
1386 flush_fifo = 1;
1387 } else {
1388 if (esp->rev == ESP100) {
1389 u32 fflags, phase;
1390
1391 /* ESP100 has a chip bug where in the synchronous data
1392 * phase it can mistake a final long REQ pulse from the
1393 * target as an extra data byte. Fun.
1394 *
1395 * To detect this case we resample the status register
1396 * and fifo flags. If we're still in a data phase and
1397 * we see spurious chunks in the fifo, we return error
1398 * to the caller which should reset and set things up
1399 * such that we only try future transfers to this
1400 * target in synchronous mode.
1401 */
1402 esp->sreg = esp_read8(ESP_STATUS);
1403 phase = esp->sreg & ESP_STAT_PMASK;
1404 fflags = esp_read8(ESP_FFLAGS);
1405
1406 if ((phase == ESP_DOP &&
1407 (fflags & ESP_FF_ONOTZERO)) ||
1408 (phase == ESP_DIP &&
1409 (fflags & ESP_FF_FBYTES)))
1410 return -1;
1411 }
1412 if (!(ent->flags & ESP_CMD_FLAG_WRITE))
1413 flush_fifo = 1;
1414 }
1415
1416 if (flush_fifo)
1417 esp_flush_fifo(esp);
1418
1419 return bytes_sent;
1420}
1421
1422static void esp_setsync(struct esp *esp, struct esp_target_data *tp,
1423 u8 scsi_period, u8 scsi_offset,
1424 u8 esp_stp, u8 esp_soff)
1425{
1426 spi_period(tp->starget) = scsi_period;
1427 spi_offset(tp->starget) = scsi_offset;
1428 spi_width(tp->starget) = (tp->flags & ESP_TGT_WIDE) ? 1 : 0;
1429
1430 if (esp_soff) {
1431 esp_stp &= 0x1f;
1432 esp_soff |= esp->radelay;
1433 if (esp->rev >= FAS236) {
1434 u8 bit = ESP_CONFIG3_FSCSI;
1435 if (esp->rev >= FAS100A)
1436 bit = ESP_CONFIG3_FAST;
1437
1438 if (scsi_period < 50) {
1439 if (esp->rev == FASHME)
1440 esp_soff &= ~esp->radelay;
1441 tp->esp_config3 |= bit;
1442 } else {
1443 tp->esp_config3 &= ~bit;
1444 }
1445 esp->prev_cfg3 = tp->esp_config3;
1446 esp_write8(esp->prev_cfg3, ESP_CFG3);
1447 }
1448 }
1449
1450 tp->esp_period = esp->prev_stp = esp_stp;
1451 tp->esp_offset = esp->prev_soff = esp_soff;
1452
1453 esp_write8(esp_soff, ESP_SOFF);
1454 esp_write8(esp_stp, ESP_STP);
1455
1456 tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO);
1457
1458 spi_display_xfer_agreement(tp->starget);
1459}
1460
1461static void esp_msgin_reject(struct esp *esp)
1462{
1463 struct esp_cmd_entry *ent = esp->active_cmd;
1464 struct scsi_cmnd *cmd = ent->cmd;
1465 struct esp_target_data *tp;
1466 int tgt;
1467
1468 tgt = cmd->device->id;
1469 tp = &esp->target[tgt];
1470
1471 if (tp->flags & ESP_TGT_NEGO_WIDE) {
1472 tp->flags &= ~(ESP_TGT_NEGO_WIDE | ESP_TGT_WIDE);
1473
1474 if (!esp_need_to_nego_sync(tp)) {
1475 tp->flags &= ~ESP_TGT_CHECK_NEGO;
1476 scsi_esp_cmd(esp, ESP_CMD_RATN);
1477 } else {
1478 esp->msg_out_len =
1479 spi_populate_sync_msg(&esp->msg_out[0],
1480 tp->nego_goal_period,
1481 tp->nego_goal_offset);
1482 tp->flags |= ESP_TGT_NEGO_SYNC;
1483 scsi_esp_cmd(esp, ESP_CMD_SATN);
1484 }
1485 return;
1486 }
1487
1488 if (tp->flags & ESP_TGT_NEGO_SYNC) {
1489 tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO);
1490 tp->esp_period = 0;
1491 tp->esp_offset = 0;
1492 esp_setsync(esp, tp, 0, 0, 0, 0);
1493 scsi_esp_cmd(esp, ESP_CMD_RATN);
1494 return;
1495 }
1496
1497 esp->msg_out[0] = ABORT_TASK_SET;
1498 esp->msg_out_len = 1;
1499 scsi_esp_cmd(esp, ESP_CMD_SATN);
1500}
1501
1502static void esp_msgin_sdtr(struct esp *esp, struct esp_target_data *tp)
1503{
1504 u8 period = esp->msg_in[3];
1505 u8 offset = esp->msg_in[4];
1506 u8 stp;
1507
1508 if (!(tp->flags & ESP_TGT_NEGO_SYNC))
1509 goto do_reject;
1510
1511 if (offset > 15)
1512 goto do_reject;
1513
1514 if (offset) {
237abac6 1515 int one_clock;
cd9ad58d
DM
1516
1517 if (period > esp->max_period) {
1518 period = offset = 0;
1519 goto do_sdtr;
1520 }
1521 if (period < esp->min_period)
1522 goto do_reject;
1523
1524 one_clock = esp->ccycle / 1000;
237abac6 1525 stp = DIV_ROUND_UP(period << 2, one_clock);
cd9ad58d
DM
1526 if (stp && esp->rev >= FAS236) {
1527 if (stp >= 50)
1528 stp--;
1529 }
1530 } else {
1531 stp = 0;
1532 }
1533
1534 esp_setsync(esp, tp, period, offset, stp, offset);
1535 return;
1536
1537do_reject:
1538 esp->msg_out[0] = MESSAGE_REJECT;
1539 esp->msg_out_len = 1;
1540 scsi_esp_cmd(esp, ESP_CMD_SATN);
1541 return;
1542
1543do_sdtr:
1544 tp->nego_goal_period = period;
1545 tp->nego_goal_offset = offset;
1546 esp->msg_out_len =
1547 spi_populate_sync_msg(&esp->msg_out[0],
1548 tp->nego_goal_period,
1549 tp->nego_goal_offset);
1550 scsi_esp_cmd(esp, ESP_CMD_SATN);
1551}
1552
1553static void esp_msgin_wdtr(struct esp *esp, struct esp_target_data *tp)
1554{
1555 int size = 8 << esp->msg_in[3];
1556 u8 cfg3;
1557
1558 if (esp->rev != FASHME)
1559 goto do_reject;
1560
1561 if (size != 8 && size != 16)
1562 goto do_reject;
1563
1564 if (!(tp->flags & ESP_TGT_NEGO_WIDE))
1565 goto do_reject;
1566
1567 cfg3 = tp->esp_config3;
1568 if (size == 16) {
1569 tp->flags |= ESP_TGT_WIDE;
1570 cfg3 |= ESP_CONFIG3_EWIDE;
1571 } else {
1572 tp->flags &= ~ESP_TGT_WIDE;
1573 cfg3 &= ~ESP_CONFIG3_EWIDE;
1574 }
1575 tp->esp_config3 = cfg3;
1576 esp->prev_cfg3 = cfg3;
1577 esp_write8(cfg3, ESP_CFG3);
1578
1579 tp->flags &= ~ESP_TGT_NEGO_WIDE;
1580
1581 spi_period(tp->starget) = 0;
1582 spi_offset(tp->starget) = 0;
1583 if (!esp_need_to_nego_sync(tp)) {
1584 tp->flags &= ~ESP_TGT_CHECK_NEGO;
1585 scsi_esp_cmd(esp, ESP_CMD_RATN);
1586 } else {
1587 esp->msg_out_len =
1588 spi_populate_sync_msg(&esp->msg_out[0],
1589 tp->nego_goal_period,
1590 tp->nego_goal_offset);
1591 tp->flags |= ESP_TGT_NEGO_SYNC;
1592 scsi_esp_cmd(esp, ESP_CMD_SATN);
1593 }
1594 return;
1595
1596do_reject:
1597 esp->msg_out[0] = MESSAGE_REJECT;
1598 esp->msg_out_len = 1;
1599 scsi_esp_cmd(esp, ESP_CMD_SATN);
1600}
1601
1602static void esp_msgin_extended(struct esp *esp)
1603{
1604 struct esp_cmd_entry *ent = esp->active_cmd;
1605 struct scsi_cmnd *cmd = ent->cmd;
1606 struct esp_target_data *tp;
1607 int tgt = cmd->device->id;
1608
1609 tp = &esp->target[tgt];
1610 if (esp->msg_in[2] == EXTENDED_SDTR) {
1611 esp_msgin_sdtr(esp, tp);
1612 return;
1613 }
1614 if (esp->msg_in[2] == EXTENDED_WDTR) {
1615 esp_msgin_wdtr(esp, tp);
1616 return;
1617 }
1618
a1a75b35
HR
1619 shost_printk(KERN_INFO, esp->host,
1620 "Unexpected extended msg type %x\n", esp->msg_in[2]);
cd9ad58d
DM
1621
1622 esp->msg_out[0] = ABORT_TASK_SET;
1623 esp->msg_out_len = 1;
1624 scsi_esp_cmd(esp, ESP_CMD_SATN);
1625}
1626
1627/* Analyze msgin bytes received from target so far. Return non-zero
1628 * if there are more bytes needed to complete the message.
1629 */
1630static int esp_msgin_process(struct esp *esp)
1631{
1632 u8 msg0 = esp->msg_in[0];
1633 int len = esp->msg_in_len;
1634
1635 if (msg0 & 0x80) {
1636 /* Identify */
a1a75b35
HR
1637 shost_printk(KERN_INFO, esp->host,
1638 "Unexpected msgin identify\n");
cd9ad58d
DM
1639 return 0;
1640 }
1641
1642 switch (msg0) {
1643 case EXTENDED_MESSAGE:
1644 if (len == 1)
1645 return 1;
1646 if (len < esp->msg_in[1] + 2)
1647 return 1;
1648 esp_msgin_extended(esp);
1649 return 0;
1650
1651 case IGNORE_WIDE_RESIDUE: {
1652 struct esp_cmd_entry *ent;
1653 struct esp_cmd_priv *spriv;
1654 if (len == 1)
1655 return 1;
1656
1657 if (esp->msg_in[1] != 1)
1658 goto do_reject;
1659
1660 ent = esp->active_cmd;
1661 spriv = ESP_CMD_PRIV(ent->cmd);
1662
1663 if (spriv->cur_residue == sg_dma_len(spriv->cur_sg)) {
1664 spriv->cur_sg--;
1665 spriv->cur_residue = 1;
1666 } else
1667 spriv->cur_residue++;
1668 spriv->tot_residue++;
1669 return 0;
1670 }
1671 case NOP:
1672 return 0;
1673 case RESTORE_POINTERS:
1674 esp_restore_pointers(esp, esp->active_cmd);
1675 return 0;
1676 case SAVE_POINTERS:
1677 esp_save_pointers(esp, esp->active_cmd);
1678 return 0;
1679
1680 case COMMAND_COMPLETE:
1681 case DISCONNECT: {
1682 struct esp_cmd_entry *ent = esp->active_cmd;
1683
1684 ent->message = msg0;
1685 esp_event(esp, ESP_EVENT_FREE_BUS);
1686 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1687 return 0;
1688 }
1689 case MESSAGE_REJECT:
1690 esp_msgin_reject(esp);
1691 return 0;
1692
1693 default:
1694 do_reject:
1695 esp->msg_out[0] = MESSAGE_REJECT;
1696 esp->msg_out_len = 1;
1697 scsi_esp_cmd(esp, ESP_CMD_SATN);
1698 return 0;
1699 }
1700}
1701
1702static int esp_process_event(struct esp *esp)
1703{
3170866f 1704 int write, i;
cd9ad58d
DM
1705
1706again:
1707 write = 0;
1af6f603
HR
1708 esp_log_event("process event %d phase %x\n",
1709 esp->event, esp->sreg & ESP_STAT_PMASK);
cd9ad58d
DM
1710 switch (esp->event) {
1711 case ESP_EVENT_CHECK_PHASE:
1712 switch (esp->sreg & ESP_STAT_PMASK) {
1713 case ESP_DOP:
1714 esp_event(esp, ESP_EVENT_DATA_OUT);
1715 break;
1716 case ESP_DIP:
1717 esp_event(esp, ESP_EVENT_DATA_IN);
1718 break;
1719 case ESP_STATP:
1720 esp_flush_fifo(esp);
1721 scsi_esp_cmd(esp, ESP_CMD_ICCSEQ);
1722 esp_event(esp, ESP_EVENT_STATUS);
1723 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1724 return 1;
1725
1726 case ESP_MOP:
1727 esp_event(esp, ESP_EVENT_MSGOUT);
1728 break;
1729
1730 case ESP_MIP:
1731 esp_event(esp, ESP_EVENT_MSGIN);
1732 break;
1733
1734 case ESP_CMDP:
1735 esp_event(esp, ESP_EVENT_CMD_START);
1736 break;
1737
1738 default:
a1a75b35
HR
1739 shost_printk(KERN_INFO, esp->host,
1740 "Unexpected phase, sreg=%02x\n",
1741 esp->sreg);
cd9ad58d
DM
1742 esp_schedule_reset(esp);
1743 return 0;
1744 }
1745 goto again;
1746 break;
1747
1748 case ESP_EVENT_DATA_IN:
1749 write = 1;
1750 /* fallthru */
1751
1752 case ESP_EVENT_DATA_OUT: {
1753 struct esp_cmd_entry *ent = esp->active_cmd;
1754 struct scsi_cmnd *cmd = ent->cmd;
1755 dma_addr_t dma_addr = esp_cur_dma_addr(ent, cmd);
1756 unsigned int dma_len = esp_cur_dma_len(ent, cmd);
1757
1758 if (esp->rev == ESP100)
1759 scsi_esp_cmd(esp, ESP_CMD_NULL);
1760
1761 if (write)
1762 ent->flags |= ESP_CMD_FLAG_WRITE;
1763 else
1764 ent->flags &= ~ESP_CMD_FLAG_WRITE;
1765
6fe07aaf
FT
1766 if (esp->ops->dma_length_limit)
1767 dma_len = esp->ops->dma_length_limit(esp, dma_addr,
1768 dma_len);
1769 else
1770 dma_len = esp_dma_length_limit(esp, dma_addr, dma_len);
1771
cd9ad58d
DM
1772 esp->data_dma_len = dma_len;
1773
1774 if (!dma_len) {
a1a75b35
HR
1775 shost_printk(KERN_ERR, esp->host,
1776 "DMA length is zero!\n");
1777 shost_printk(KERN_ERR, esp->host,
1778 "cur adr[%08llx] len[%08x]\n",
1779 (unsigned long long)esp_cur_dma_addr(ent, cmd),
1780 esp_cur_dma_len(ent, cmd));
cd9ad58d
DM
1781 esp_schedule_reset(esp);
1782 return 0;
1783 }
1784
a1a75b35 1785 esp_log_datastart("start data addr[%08llx] len[%u] write(%d)\n",
e1f2a094 1786 (unsigned long long)dma_addr, dma_len, write);
cd9ad58d
DM
1787
1788 esp->ops->send_dma_cmd(esp, dma_addr, dma_len, dma_len,
1789 write, ESP_CMD_DMA | ESP_CMD_TI);
1790 esp_event(esp, ESP_EVENT_DATA_DONE);
1791 break;
1792 }
1793 case ESP_EVENT_DATA_DONE: {
1794 struct esp_cmd_entry *ent = esp->active_cmd;
1795 struct scsi_cmnd *cmd = ent->cmd;
1796 int bytes_sent;
1797
1798 if (esp->ops->dma_error(esp)) {
a1a75b35
HR
1799 shost_printk(KERN_INFO, esp->host,
1800 "data done, DMA error, resetting\n");
cd9ad58d
DM
1801 esp_schedule_reset(esp);
1802 return 0;
1803 }
1804
1805 if (ent->flags & ESP_CMD_FLAG_WRITE) {
1806 /* XXX parity errors, etc. XXX */
1807
1808 esp->ops->dma_drain(esp);
1809 }
1810 esp->ops->dma_invalidate(esp);
1811
1812 if (esp->ireg != ESP_INTR_BSERV) {
1813 /* We should always see exactly a bus-service
1814 * interrupt at the end of a successful transfer.
1815 */
a1a75b35
HR
1816 shost_printk(KERN_INFO, esp->host,
1817 "data done, not BSERV, resetting\n");
cd9ad58d
DM
1818 esp_schedule_reset(esp);
1819 return 0;
1820 }
1821
1822 bytes_sent = esp_data_bytes_sent(esp, ent, cmd);
1823
a1a75b35 1824 esp_log_datadone("data done flgs[%x] sent[%d]\n",
cd9ad58d
DM
1825 ent->flags, bytes_sent);
1826
1827 if (bytes_sent < 0) {
1828 /* XXX force sync mode for this target XXX */
1829 esp_schedule_reset(esp);
1830 return 0;
1831 }
1832
1833 esp_advance_dma(esp, ent, cmd, bytes_sent);
1834 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1835 goto again;
cd9ad58d
DM
1836 }
1837
1838 case ESP_EVENT_STATUS: {
1839 struct esp_cmd_entry *ent = esp->active_cmd;
1840
1841 if (esp->ireg & ESP_INTR_FDONE) {
1842 ent->status = esp_read8(ESP_FDATA);
1843 ent->message = esp_read8(ESP_FDATA);
1844 scsi_esp_cmd(esp, ESP_CMD_MOK);
1845 } else if (esp->ireg == ESP_INTR_BSERV) {
1846 ent->status = esp_read8(ESP_FDATA);
1847 ent->message = 0xff;
1848 esp_event(esp, ESP_EVENT_MSGIN);
1849 return 0;
1850 }
1851
1852 if (ent->message != COMMAND_COMPLETE) {
a1a75b35
HR
1853 shost_printk(KERN_INFO, esp->host,
1854 "Unexpected message %x in status\n",
1855 ent->message);
cd9ad58d
DM
1856 esp_schedule_reset(esp);
1857 return 0;
1858 }
1859
1860 esp_event(esp, ESP_EVENT_FREE_BUS);
1861 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1862 break;
1863 }
1864 case ESP_EVENT_FREE_BUS: {
1865 struct esp_cmd_entry *ent = esp->active_cmd;
1866 struct scsi_cmnd *cmd = ent->cmd;
1867
1868 if (ent->message == COMMAND_COMPLETE ||
1869 ent->message == DISCONNECT)
1870 scsi_esp_cmd(esp, ESP_CMD_ESEL);
1871
1872 if (ent->message == COMMAND_COMPLETE) {
a1a75b35 1873 esp_log_cmddone("Command done status[%x] message[%x]\n",
cd9ad58d
DM
1874 ent->status, ent->message);
1875 if (ent->status == SAM_STAT_TASK_SET_FULL)
1876 esp_event_queue_full(esp, ent);
1877
1878 if (ent->status == SAM_STAT_CHECK_CONDITION &&
1879 !(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) {
1880 ent->flags |= ESP_CMD_FLAG_AUTOSENSE;
1881 esp_autosense(esp, ent);
1882 } else {
1883 esp_cmd_is_done(esp, ent, cmd,
1884 compose_result(ent->status,
1885 ent->message,
1886 DID_OK));
1887 }
1888 } else if (ent->message == DISCONNECT) {
a1a75b35 1889 esp_log_disconnect("Disconnecting tgt[%d] tag[%x:%x]\n",
cd9ad58d
DM
1890 cmd->device->id,
1891 ent->tag[0], ent->tag[1]);
1892
1893 esp->active_cmd = NULL;
1894 esp_maybe_execute_command(esp);
1895 } else {
a1a75b35
HR
1896 shost_printk(KERN_INFO, esp->host,
1897 "Unexpected message %x in freebus\n",
1898 ent->message);
cd9ad58d
DM
1899 esp_schedule_reset(esp);
1900 return 0;
1901 }
1902 if (esp->active_cmd)
1903 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1904 break;
1905 }
1906 case ESP_EVENT_MSGOUT: {
1907 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1908
1909 if (esp_debug & ESP_DEBUG_MSGOUT) {
1910 int i;
1911 printk("ESP: Sending message [ ");
1912 for (i = 0; i < esp->msg_out_len; i++)
1913 printk("%02x ", esp->msg_out[i]);
1914 printk("]\n");
1915 }
1916
1917 if (esp->rev == FASHME) {
1918 int i;
1919
1920 /* Always use the fifo. */
1921 for (i = 0; i < esp->msg_out_len; i++) {
1922 esp_write8(esp->msg_out[i], ESP_FDATA);
1923 esp_write8(0, ESP_FDATA);
1924 }
1925 scsi_esp_cmd(esp, ESP_CMD_TI);
1926 } else {
1927 if (esp->msg_out_len == 1) {
1928 esp_write8(esp->msg_out[0], ESP_FDATA);
1929 scsi_esp_cmd(esp, ESP_CMD_TI);
3170866f
HR
1930 } else if (esp->flags & ESP_FLAG_USE_FIFO) {
1931 for (i = 0; i < esp->msg_out_len; i++)
1932 esp_write8(esp->msg_out[i], ESP_FDATA);
1933 scsi_esp_cmd(esp, ESP_CMD_TI);
cd9ad58d
DM
1934 } else {
1935 /* Use DMA. */
1936 memcpy(esp->command_block,
1937 esp->msg_out,
1938 esp->msg_out_len);
1939
1940 esp->ops->send_dma_cmd(esp,
1941 esp->command_block_dma,
1942 esp->msg_out_len,
1943 esp->msg_out_len,
1944 0,
1945 ESP_CMD_DMA|ESP_CMD_TI);
1946 }
1947 }
1948 esp_event(esp, ESP_EVENT_MSGOUT_DONE);
1949 break;
1950 }
1951 case ESP_EVENT_MSGOUT_DONE:
1952 if (esp->rev == FASHME) {
1953 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1954 } else {
1955 if (esp->msg_out_len > 1)
1956 esp->ops->dma_invalidate(esp);
1957 }
1958
1959 if (!(esp->ireg & ESP_INTR_DC)) {
1960 if (esp->rev != FASHME)
1961 scsi_esp_cmd(esp, ESP_CMD_NULL);
1962 }
1963 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1964 goto again;
1965 case ESP_EVENT_MSGIN:
1966 if (esp->ireg & ESP_INTR_BSERV) {
1967 if (esp->rev == FASHME) {
1968 if (!(esp_read8(ESP_STATUS2) &
1969 ESP_STAT2_FEMPTY))
1970 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1971 } else {
1972 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1973 if (esp->rev == ESP100)
1974 scsi_esp_cmd(esp, ESP_CMD_NULL);
1975 }
1976 scsi_esp_cmd(esp, ESP_CMD_TI);
1977 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1978 return 1;
1979 }
1980 if (esp->ireg & ESP_INTR_FDONE) {
1981 u8 val;
1982
1983 if (esp->rev == FASHME)
1984 val = esp->fifo[0];
1985 else
1986 val = esp_read8(ESP_FDATA);
1987 esp->msg_in[esp->msg_in_len++] = val;
1988
a1a75b35 1989 esp_log_msgin("Got msgin byte %x\n", val);
cd9ad58d
DM
1990
1991 if (!esp_msgin_process(esp))
1992 esp->msg_in_len = 0;
1993
1994 if (esp->rev == FASHME)
1995 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1996
1997 scsi_esp_cmd(esp, ESP_CMD_MOK);
1998
1999 if (esp->event != ESP_EVENT_FREE_BUS)
2000 esp_event(esp, ESP_EVENT_CHECK_PHASE);
2001 } else {
a1a75b35
HR
2002 shost_printk(KERN_INFO, esp->host,
2003 "MSGIN neither BSERV not FDON, resetting");
cd9ad58d
DM
2004 esp_schedule_reset(esp);
2005 return 0;
2006 }
2007 break;
2008 case ESP_EVENT_CMD_START:
2009 memcpy(esp->command_block, esp->cmd_bytes_ptr,
2010 esp->cmd_bytes_left);
3170866f 2011 esp_send_dma_cmd(esp, esp->cmd_bytes_left, 16, ESP_CMD_TI);
cd9ad58d
DM
2012 esp_event(esp, ESP_EVENT_CMD_DONE);
2013 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
2014 break;
2015 case ESP_EVENT_CMD_DONE:
2016 esp->ops->dma_invalidate(esp);
2017 if (esp->ireg & ESP_INTR_BSERV) {
2018 esp_event(esp, ESP_EVENT_CHECK_PHASE);
2019 goto again;
2020 }
2021 esp_schedule_reset(esp);
2022 return 0;
2023 break;
2024
2025 case ESP_EVENT_RESET:
2026 scsi_esp_cmd(esp, ESP_CMD_RS);
2027 break;
2028
2029 default:
a1a75b35
HR
2030 shost_printk(KERN_INFO, esp->host,
2031 "Unexpected event %x, resetting\n", esp->event);
cd9ad58d
DM
2032 esp_schedule_reset(esp);
2033 return 0;
2034 break;
2035 }
2036 return 1;
2037}
2038
2039static void esp_reset_cleanup_one(struct esp *esp, struct esp_cmd_entry *ent)
2040{
2041 struct scsi_cmnd *cmd = ent->cmd;
2042
2043 esp_unmap_dma(esp, cmd);
2044 esp_free_lun_tag(ent, cmd->device->hostdata);
2045 cmd->result = DID_RESET << 16;
2046
2047 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
2048 esp->ops->unmap_single(esp, ent->sense_dma,
2049 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
2050 ent->sense_ptr = NULL;
2051 }
2052
2053 cmd->scsi_done(cmd);
2054 list_del(&ent->list);
2055 esp_put_ent(esp, ent);
2056}
2057
2058static void esp_clear_hold(struct scsi_device *dev, void *data)
2059{
2060 struct esp_lun_data *lp = dev->hostdata;
2061
2062 BUG_ON(lp->num_tagged);
2063 lp->hold = 0;
2064}
2065
2066static void esp_reset_cleanup(struct esp *esp)
2067{
2068 struct esp_cmd_entry *ent, *tmp;
2069 int i;
2070
2071 list_for_each_entry_safe(ent, tmp, &esp->queued_cmds, list) {
2072 struct scsi_cmnd *cmd = ent->cmd;
2073
2074 list_del(&ent->list);
2075 cmd->result = DID_RESET << 16;
2076 cmd->scsi_done(cmd);
2077 esp_put_ent(esp, ent);
2078 }
2079
2080 list_for_each_entry_safe(ent, tmp, &esp->active_cmds, list) {
2081 if (ent == esp->active_cmd)
2082 esp->active_cmd = NULL;
2083 esp_reset_cleanup_one(esp, ent);
2084 }
2085
2086 BUG_ON(esp->active_cmd != NULL);
2087
2088 /* Force renegotiation of sync/wide transfers. */
2089 for (i = 0; i < ESP_MAX_TARGET; i++) {
2090 struct esp_target_data *tp = &esp->target[i];
2091
2092 tp->esp_period = 0;
2093 tp->esp_offset = 0;
2094 tp->esp_config3 &= ~(ESP_CONFIG3_EWIDE |
2095 ESP_CONFIG3_FSCSI |
2096 ESP_CONFIG3_FAST);
2097 tp->flags &= ~ESP_TGT_WIDE;
2098 tp->flags |= ESP_TGT_CHECK_NEGO;
2099
2100 if (tp->starget)
522939d4
MR
2101 __starget_for_each_device(tp->starget, NULL,
2102 esp_clear_hold);
cd9ad58d 2103 }
204abf28 2104 esp->flags &= ~ESP_FLAG_RESETTING;
cd9ad58d
DM
2105}
2106
2107/* Runs under host->lock */
2108static void __esp_interrupt(struct esp *esp)
2109{
2110 int finish_reset, intr_done;
2111 u8 phase;
2112
9535fff3
HR
2113 /*
2114 * Once INTRPT is read STATUS and SSTEP are cleared.
2115 */
cd9ad58d 2116 esp->sreg = esp_read8(ESP_STATUS);
9535fff3
HR
2117 esp->seqreg = esp_read8(ESP_SSTEP);
2118 esp->ireg = esp_read8(ESP_INTRPT);
cd9ad58d
DM
2119
2120 if (esp->flags & ESP_FLAG_RESETTING) {
2121 finish_reset = 1;
2122 } else {
2123 if (esp_check_gross_error(esp))
2124 return;
2125
2126 finish_reset = esp_check_spur_intr(esp);
2127 if (finish_reset < 0)
2128 return;
2129 }
2130
cd9ad58d
DM
2131 if (esp->ireg & ESP_INTR_SR)
2132 finish_reset = 1;
2133
2134 if (finish_reset) {
2135 esp_reset_cleanup(esp);
2136 if (esp->eh_reset) {
2137 complete(esp->eh_reset);
2138 esp->eh_reset = NULL;
2139 }
2140 return;
2141 }
2142
2143 phase = (esp->sreg & ESP_STAT_PMASK);
2144 if (esp->rev == FASHME) {
2145 if (((phase != ESP_DIP && phase != ESP_DOP) &&
2146 esp->select_state == ESP_SELECT_NONE &&
2147 esp->event != ESP_EVENT_STATUS &&
2148 esp->event != ESP_EVENT_DATA_DONE) ||
2149 (esp->ireg & ESP_INTR_RSEL)) {
2150 esp->sreg2 = esp_read8(ESP_STATUS2);
2151 if (!(esp->sreg2 & ESP_STAT2_FEMPTY) ||
2152 (esp->sreg2 & ESP_STAT2_F1BYTE))
2153 hme_read_fifo(esp);
2154 }
2155 }
2156
a1a75b35 2157 esp_log_intr("intr sreg[%02x] seqreg[%02x] "
cd9ad58d
DM
2158 "sreg2[%02x] ireg[%02x]\n",
2159 esp->sreg, esp->seqreg, esp->sreg2, esp->ireg);
2160
2161 intr_done = 0;
2162
2163 if (esp->ireg & (ESP_INTR_S | ESP_INTR_SATN | ESP_INTR_IC)) {
a1a75b35
HR
2164 shost_printk(KERN_INFO, esp->host,
2165 "unexpected IREG %02x\n", esp->ireg);
cd9ad58d
DM
2166 if (esp->ireg & ESP_INTR_IC)
2167 esp_dump_cmd_log(esp);
2168
2169 esp_schedule_reset(esp);
2170 } else {
2171 if (!(esp->ireg & ESP_INTR_RSEL)) {
2172 /* Some combination of FDONE, BSERV, DC. */
2173 if (esp->select_state != ESP_SELECT_NONE)
2174 intr_done = esp_finish_select(esp);
2175 } else if (esp->ireg & ESP_INTR_RSEL) {
2176 if (esp->active_cmd)
2177 (void) esp_finish_select(esp);
2178 intr_done = esp_reconnect(esp);
2179 }
2180 }
2181 while (!intr_done)
2182 intr_done = esp_process_event(esp);
2183}
2184
2185irqreturn_t scsi_esp_intr(int irq, void *dev_id)
2186{
2187 struct esp *esp = dev_id;
2188 unsigned long flags;
2189 irqreturn_t ret;
2190
2191 spin_lock_irqsave(esp->host->host_lock, flags);
2192 ret = IRQ_NONE;
2193 if (esp->ops->irq_pending(esp)) {
2194 ret = IRQ_HANDLED;
2195 for (;;) {
2196 int i;
2197
2198 __esp_interrupt(esp);
2199 if (!(esp->flags & ESP_FLAG_QUICKIRQ_CHECK))
2200 break;
2201 esp->flags &= ~ESP_FLAG_QUICKIRQ_CHECK;
2202
2203 for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) {
2204 if (esp->ops->irq_pending(esp))
2205 break;
2206 }
2207 if (i == ESP_QUICKIRQ_LIMIT)
2208 break;
2209 }
2210 }
2211 spin_unlock_irqrestore(esp->host->host_lock, flags);
2212
2213 return ret;
2214}
2215EXPORT_SYMBOL(scsi_esp_intr);
2216
76246808 2217static void esp_get_revision(struct esp *esp)
cd9ad58d
DM
2218{
2219 u8 val;
2220
2221 esp->config1 = (ESP_CONFIG1_PENABLE | (esp->scsi_id & 7));
8a9aeb45
PB
2222 if (esp->config2 == 0) {
2223 esp->config2 = (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY);
2224 esp_write8(esp->config2, ESP_CFG2);
2225
2226 val = esp_read8(ESP_CFG2);
2227 val &= ~ESP_CONFIG2_MAGIC;
2228
2229 esp->config2 = 0;
2230 if (val != (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY)) {
2231 /*
2232 * If what we write to cfg2 does not come back,
2233 * cfg2 is not implemented.
2234 * Therefore this must be a plain esp100.
2235 */
2236 esp->rev = ESP100;
2237 return;
2238 }
2239 }
2240
2241 esp_set_all_config3(esp, 5);
2242 esp->prev_cfg3 = 5;
cd9ad58d 2243 esp_write8(esp->config2, ESP_CFG2);
8a9aeb45
PB
2244 esp_write8(0, ESP_CFG3);
2245 esp_write8(esp->prev_cfg3, ESP_CFG3);
cd9ad58d 2246
8a9aeb45
PB
2247 val = esp_read8(ESP_CFG3);
2248 if (val != 5) {
2249 /* The cfg2 register is implemented, however
2250 * cfg3 is not, must be esp100a.
cd9ad58d 2251 */
8a9aeb45 2252 esp->rev = ESP100A;
cd9ad58d 2253 } else {
8a9aeb45
PB
2254 esp_set_all_config3(esp, 0);
2255 esp->prev_cfg3 = 0;
cd9ad58d
DM
2256 esp_write8(esp->prev_cfg3, ESP_CFG3);
2257
8a9aeb45
PB
2258 /* All of cfg{1,2,3} implemented, must be one of
2259 * the fas variants, figure out which one.
2260 */
2261 if (esp->cfact == 0 || esp->cfact > ESP_CCF_F5) {
2262 esp->rev = FAST;
2263 esp->sync_defp = SYNC_DEFP_FAST;
cd9ad58d 2264 } else {
8a9aeb45 2265 esp->rev = ESP236;
cd9ad58d
DM
2266 }
2267 }
2268}
2269
76246808 2270static void esp_init_swstate(struct esp *esp)
cd9ad58d
DM
2271{
2272 int i;
2273
2274 INIT_LIST_HEAD(&esp->queued_cmds);
2275 INIT_LIST_HEAD(&esp->active_cmds);
2276 INIT_LIST_HEAD(&esp->esp_cmd_pool);
2277
2278 /* Start with a clear state, domain validation (via ->slave_configure,
2279 * spi_dv_device()) will attempt to enable SYNC, WIDE, and tagged
2280 * commands.
2281 */
2282 for (i = 0 ; i < ESP_MAX_TARGET; i++) {
2283 esp->target[i].flags = 0;
2284 esp->target[i].nego_goal_period = 0;
2285 esp->target[i].nego_goal_offset = 0;
2286 esp->target[i].nego_goal_width = 0;
2287 esp->target[i].nego_goal_tags = 0;
2288 }
2289}
2290
2291/* This places the ESP into a known state at boot time. */
d679f805 2292static void esp_bootup_reset(struct esp *esp)
cd9ad58d
DM
2293{
2294 u8 val;
2295
2296 /* Reset the DMA */
2297 esp->ops->reset_dma(esp);
2298
2299 /* Reset the ESP */
2300 esp_reset_esp(esp);
2301
2302 /* Reset the SCSI bus, but tell ESP not to generate an irq */
2303 val = esp_read8(ESP_CFG1);
2304 val |= ESP_CONFIG1_SRRDISAB;
2305 esp_write8(val, ESP_CFG1);
2306
2307 scsi_esp_cmd(esp, ESP_CMD_RS);
2308 udelay(400);
2309
2310 esp_write8(esp->config1, ESP_CFG1);
2311
2312 /* Eat any bitrot in the chip and we are done... */
2313 esp_read8(ESP_INTRPT);
2314}
2315
76246808 2316static void esp_set_clock_params(struct esp *esp)
cd9ad58d 2317{
6fe07aaf 2318 int fhz;
cd9ad58d
DM
2319 u8 ccf;
2320
2321 /* This is getting messy but it has to be done correctly or else
2322 * you get weird behavior all over the place. We are trying to
2323 * basically figure out three pieces of information.
2324 *
2325 * a) Clock Conversion Factor
2326 *
2327 * This is a representation of the input crystal clock frequency
2328 * going into the ESP on this machine. Any operation whose timing
2329 * is longer than 400ns depends on this value being correct. For
2330 * example, you'll get blips for arbitration/selection during high
2331 * load or with multiple targets if this is not set correctly.
2332 *
2333 * b) Selection Time-Out
2334 *
2335 * The ESP isn't very bright and will arbitrate for the bus and try
2336 * to select a target forever if you let it. This value tells the
2337 * ESP when it has taken too long to negotiate and that it should
2338 * interrupt the CPU so we can see what happened. The value is
2339 * computed as follows (from NCR/Symbios chip docs).
2340 *
2341 * (Time Out Period) * (Input Clock)
2342 * STO = ----------------------------------
2343 * (8192) * (Clock Conversion Factor)
2344 *
2345 * We use a time out period of 250ms (ESP_BUS_TIMEOUT).
2346 *
2347 * c) Imperical constants for synchronous offset and transfer period
2348 * register values
2349 *
2350 * This entails the smallest and largest sync period we could ever
2351 * handle on this ESP.
2352 */
6fe07aaf 2353 fhz = esp->cfreq;
cd9ad58d 2354
6fe07aaf 2355 ccf = ((fhz / 1000000) + 4) / 5;
cd9ad58d
DM
2356 if (ccf == 1)
2357 ccf = 2;
2358
2359 /* If we can't find anything reasonable, just assume 20MHZ.
2360 * This is the clock frequency of the older sun4c's where I've
2361 * been unable to find the clock-frequency PROM property. All
2362 * other machines provide useful values it seems.
2363 */
6fe07aaf
FT
2364 if (fhz <= 5000000 || ccf < 1 || ccf > 8) {
2365 fhz = 20000000;
cd9ad58d
DM
2366 ccf = 4;
2367 }
2368
2369 esp->cfact = (ccf == 8 ? 0 : ccf);
6fe07aaf
FT
2370 esp->cfreq = fhz;
2371 esp->ccycle = ESP_HZ_TO_CYCLE(fhz);
cd9ad58d 2372 esp->ctick = ESP_TICK(ccf, esp->ccycle);
6fe07aaf 2373 esp->neg_defp = ESP_NEG_DEFP(fhz, ccf);
cd9ad58d
DM
2374 esp->sync_defp = SYNC_DEFP_SLOW;
2375}
2376
2377static const char *esp_chip_names[] = {
2378 "ESP100",
2379 "ESP100A",
2380 "ESP236",
2381 "FAS236",
2382 "FAS100A",
2383 "FAST",
2384 "FASHME",
eeea2f9c 2385 "AM53C974",
cd9ad58d
DM
2386};
2387
2388static struct scsi_transport_template *esp_transport_template;
2389
76246808 2390int scsi_esp_register(struct esp *esp, struct device *dev)
cd9ad58d
DM
2391{
2392 static int instance;
2393 int err;
2394
3707a186
HR
2395 if (!esp->num_tags)
2396 esp->num_tags = ESP_DEFAULT_TAGS;
2397 else if (esp->num_tags >= ESP_MAX_TAG)
2398 esp->num_tags = ESP_MAX_TAG - 1;
cd9ad58d
DM
2399 esp->host->transportt = esp_transport_template;
2400 esp->host->max_lun = ESP_MAX_LUN;
2401 esp->host->cmd_per_lun = 2;
ff4abd6c 2402 esp->host->unique_id = instance;
cd9ad58d
DM
2403
2404 esp_set_clock_params(esp);
2405
2406 esp_get_revision(esp);
2407
2408 esp_init_swstate(esp);
2409
2410 esp_bootup_reset(esp);
2411
a1a75b35
HR
2412 dev_printk(KERN_INFO, dev, "esp%u: regs[%1p:%1p] irq[%u]\n",
2413 esp->host->unique_id, esp->regs, esp->dma_regs,
2414 esp->host->irq);
2415 dev_printk(KERN_INFO, dev,
2416 "esp%u: is a %s, %u MHz (ccf=%u), SCSI ID %u\n",
2417 esp->host->unique_id, esp_chip_names[esp->rev],
2418 esp->cfreq / 1000000, esp->cfact, esp->scsi_id);
cd9ad58d
DM
2419
2420 /* Let the SCSI bus reset settle. */
2421 ssleep(esp_bus_reset_settle);
2422
2423 err = scsi_add_host(esp->host, dev);
2424 if (err)
2425 return err;
2426
ff4abd6c 2427 instance++;
cd9ad58d
DM
2428
2429 scsi_scan_host(esp->host);
2430
2431 return 0;
2432}
2433EXPORT_SYMBOL(scsi_esp_register);
2434
76246808 2435void scsi_esp_unregister(struct esp *esp)
cd9ad58d
DM
2436{
2437 scsi_remove_host(esp->host);
2438}
2439EXPORT_SYMBOL(scsi_esp_unregister);
2440
ec5e69f6
JB
2441static int esp_target_alloc(struct scsi_target *starget)
2442{
2443 struct esp *esp = shost_priv(dev_to_shost(&starget->dev));
2444 struct esp_target_data *tp = &esp->target[starget->id];
2445
2446 tp->starget = starget;
2447
2448 return 0;
2449}
2450
2451static void esp_target_destroy(struct scsi_target *starget)
2452{
2453 struct esp *esp = shost_priv(dev_to_shost(&starget->dev));
2454 struct esp_target_data *tp = &esp->target[starget->id];
2455
2456 tp->starget = NULL;
2457}
2458
cd9ad58d
DM
2459static int esp_slave_alloc(struct scsi_device *dev)
2460{
2b14ec78 2461 struct esp *esp = shost_priv(dev->host);
cd9ad58d
DM
2462 struct esp_target_data *tp = &esp->target[dev->id];
2463 struct esp_lun_data *lp;
2464
2465 lp = kzalloc(sizeof(*lp), GFP_KERNEL);
2466 if (!lp)
2467 return -ENOMEM;
2468 dev->hostdata = lp;
2469
cd9ad58d
DM
2470 spi_min_period(tp->starget) = esp->min_period;
2471 spi_max_offset(tp->starget) = 15;
2472
2473 if (esp->flags & ESP_FLAG_WIDE_CAPABLE)
2474 spi_max_width(tp->starget) = 1;
2475 else
2476 spi_max_width(tp->starget) = 0;
2477
2478 return 0;
2479}
2480
2481static int esp_slave_configure(struct scsi_device *dev)
2482{
2b14ec78 2483 struct esp *esp = shost_priv(dev->host);
cd9ad58d 2484 struct esp_target_data *tp = &esp->target[dev->id];
cd9ad58d 2485
3707a186
HR
2486 if (dev->tagged_supported)
2487 scsi_change_queue_depth(dev, esp->num_tags);
cd9ad58d 2488
cd9ad58d
DM
2489 tp->flags |= ESP_TGT_DISCONNECT;
2490
2491 if (!spi_initial_dv(dev->sdev_target))
2492 spi_dv_device(dev);
2493
2494 return 0;
2495}
2496
2497static void esp_slave_destroy(struct scsi_device *dev)
2498{
2499 struct esp_lun_data *lp = dev->hostdata;
2500
2501 kfree(lp);
2502 dev->hostdata = NULL;
2503}
2504
2505static int esp_eh_abort_handler(struct scsi_cmnd *cmd)
2506{
2b14ec78 2507 struct esp *esp = shost_priv(cmd->device->host);
cd9ad58d
DM
2508 struct esp_cmd_entry *ent, *tmp;
2509 struct completion eh_done;
2510 unsigned long flags;
2511
2512 /* XXX This helps a lot with debugging but might be a bit
2513 * XXX much for the final driver.
2514 */
2515 spin_lock_irqsave(esp->host->host_lock, flags);
a1a75b35
HR
2516 shost_printk(KERN_ERR, esp->host, "Aborting command [%p:%02x]\n",
2517 cmd, cmd->cmnd[0]);
cd9ad58d
DM
2518 ent = esp->active_cmd;
2519 if (ent)
a1a75b35
HR
2520 shost_printk(KERN_ERR, esp->host,
2521 "Current command [%p:%02x]\n",
2522 ent->cmd, ent->cmd->cmnd[0]);
cd9ad58d 2523 list_for_each_entry(ent, &esp->queued_cmds, list) {
a1a75b35
HR
2524 shost_printk(KERN_ERR, esp->host, "Queued command [%p:%02x]\n",
2525 ent->cmd, ent->cmd->cmnd[0]);
cd9ad58d
DM
2526 }
2527 list_for_each_entry(ent, &esp->active_cmds, list) {
a1a75b35
HR
2528 shost_printk(KERN_ERR, esp->host, " Active command [%p:%02x]\n",
2529 ent->cmd, ent->cmd->cmnd[0]);
cd9ad58d
DM
2530 }
2531 esp_dump_cmd_log(esp);
2532 spin_unlock_irqrestore(esp->host->host_lock, flags);
2533
2534 spin_lock_irqsave(esp->host->host_lock, flags);
2535
2536 ent = NULL;
2537 list_for_each_entry(tmp, &esp->queued_cmds, list) {
2538 if (tmp->cmd == cmd) {
2539 ent = tmp;
2540 break;
2541 }
2542 }
2543
2544 if (ent) {
2545 /* Easiest case, we didn't even issue the command
2546 * yet so it is trivial to abort.
2547 */
2548 list_del(&ent->list);
2549
2550 cmd->result = DID_ABORT << 16;
2551 cmd->scsi_done(cmd);
2552
2553 esp_put_ent(esp, ent);
2554
2555 goto out_success;
2556 }
2557
2558 init_completion(&eh_done);
2559
2560 ent = esp->active_cmd;
2561 if (ent && ent->cmd == cmd) {
2562 /* Command is the currently active command on
2563 * the bus. If we already have an output message
2564 * pending, no dice.
2565 */
2566 if (esp->msg_out_len)
2567 goto out_failure;
2568
2569 /* Send out an abort, encouraging the target to
2570 * go to MSGOUT phase by asserting ATN.
2571 */
2572 esp->msg_out[0] = ABORT_TASK_SET;
2573 esp->msg_out_len = 1;
2574 ent->eh_done = &eh_done;
2575
2576 scsi_esp_cmd(esp, ESP_CMD_SATN);
2577 } else {
2578 /* The command is disconnected. This is not easy to
2579 * abort. For now we fail and let the scsi error
2580 * handling layer go try a scsi bus reset or host
2581 * reset.
2582 *
2583 * What we could do is put together a scsi command
2584 * solely for the purpose of sending an abort message
2585 * to the target. Coming up with all the code to
2586 * cook up scsi commands, special case them everywhere,
2587 * etc. is for questionable gain and it would be better
2588 * if the generic scsi error handling layer could do at
2589 * least some of that for us.
2590 *
2591 * Anyways this is an area for potential future improvement
2592 * in this driver.
2593 */
2594 goto out_failure;
2595 }
2596
2597 spin_unlock_irqrestore(esp->host->host_lock, flags);
2598
2599 if (!wait_for_completion_timeout(&eh_done, 5 * HZ)) {
2600 spin_lock_irqsave(esp->host->host_lock, flags);
2601 ent->eh_done = NULL;
2602 spin_unlock_irqrestore(esp->host->host_lock, flags);
2603
2604 return FAILED;
2605 }
2606
2607 return SUCCESS;
2608
2609out_success:
2610 spin_unlock_irqrestore(esp->host->host_lock, flags);
2611 return SUCCESS;
2612
2613out_failure:
2614 /* XXX This might be a good location to set ESP_TGT_BROKEN
2615 * XXX since we know which target/lun in particular is
2616 * XXX causing trouble.
2617 */
2618 spin_unlock_irqrestore(esp->host->host_lock, flags);
2619 return FAILED;
2620}
2621
2622static int esp_eh_bus_reset_handler(struct scsi_cmnd *cmd)
2623{
2b14ec78 2624 struct esp *esp = shost_priv(cmd->device->host);
cd9ad58d
DM
2625 struct completion eh_reset;
2626 unsigned long flags;
2627
2628 init_completion(&eh_reset);
2629
2630 spin_lock_irqsave(esp->host->host_lock, flags);
2631
2632 esp->eh_reset = &eh_reset;
2633
2634 /* XXX This is too simple... We should add lots of
2635 * XXX checks here so that if we find that the chip is
2636 * XXX very wedged we return failure immediately so
2637 * XXX that we can perform a full chip reset.
2638 */
2639 esp->flags |= ESP_FLAG_RESETTING;
2640 scsi_esp_cmd(esp, ESP_CMD_RS);
2641
2642 spin_unlock_irqrestore(esp->host->host_lock, flags);
2643
2644 ssleep(esp_bus_reset_settle);
2645
2646 if (!wait_for_completion_timeout(&eh_reset, 5 * HZ)) {
2647 spin_lock_irqsave(esp->host->host_lock, flags);
2648 esp->eh_reset = NULL;
2649 spin_unlock_irqrestore(esp->host->host_lock, flags);
2650
2651 return FAILED;
2652 }
2653
2654 return SUCCESS;
2655}
2656
2657/* All bets are off, reset the entire device. */
2658static int esp_eh_host_reset_handler(struct scsi_cmnd *cmd)
2659{
2b14ec78 2660 struct esp *esp = shost_priv(cmd->device->host);
cd9ad58d
DM
2661 unsigned long flags;
2662
2663 spin_lock_irqsave(esp->host->host_lock, flags);
2664 esp_bootup_reset(esp);
2665 esp_reset_cleanup(esp);
2666 spin_unlock_irqrestore(esp->host->host_lock, flags);
2667
2668 ssleep(esp_bus_reset_settle);
2669
2670 return SUCCESS;
2671}
2672
2673static const char *esp_info(struct Scsi_Host *host)
2674{
2675 return "esp";
2676}
2677
2678struct scsi_host_template scsi_esp_template = {
2679 .module = THIS_MODULE,
2680 .name = "esp",
2681 .info = esp_info,
2682 .queuecommand = esp_queuecommand,
ec5e69f6
JB
2683 .target_alloc = esp_target_alloc,
2684 .target_destroy = esp_target_destroy,
cd9ad58d
DM
2685 .slave_alloc = esp_slave_alloc,
2686 .slave_configure = esp_slave_configure,
2687 .slave_destroy = esp_slave_destroy,
2688 .eh_abort_handler = esp_eh_abort_handler,
2689 .eh_bus_reset_handler = esp_eh_bus_reset_handler,
2690 .eh_host_reset_handler = esp_eh_host_reset_handler,
2691 .can_queue = 7,
2692 .this_id = 7,
2693 .sg_tablesize = SG_ALL,
2694 .use_clustering = ENABLE_CLUSTERING,
2695 .max_sectors = 0xffff,
2696 .skip_settle_delay = 1,
2ecb204d 2697 .use_blk_tags = 1,
cd9ad58d
DM
2698};
2699EXPORT_SYMBOL(scsi_esp_template);
2700
2701static void esp_get_signalling(struct Scsi_Host *host)
2702{
2b14ec78 2703 struct esp *esp = shost_priv(host);
cd9ad58d
DM
2704 enum spi_signal_type type;
2705
2706 if (esp->flags & ESP_FLAG_DIFFERENTIAL)
2707 type = SPI_SIGNAL_HVD;
2708 else
2709 type = SPI_SIGNAL_SE;
2710
2711 spi_signalling(host) = type;
2712}
2713
2714static void esp_set_offset(struct scsi_target *target, int offset)
2715{
2716 struct Scsi_Host *host = dev_to_shost(target->dev.parent);
2b14ec78 2717 struct esp *esp = shost_priv(host);
cd9ad58d
DM
2718 struct esp_target_data *tp = &esp->target[target->id];
2719
02507a80
FT
2720 if (esp->flags & ESP_FLAG_DISABLE_SYNC)
2721 tp->nego_goal_offset = 0;
2722 else
2723 tp->nego_goal_offset = offset;
cd9ad58d
DM
2724 tp->flags |= ESP_TGT_CHECK_NEGO;
2725}
2726
2727static void esp_set_period(struct scsi_target *target, int period)
2728{
2729 struct Scsi_Host *host = dev_to_shost(target->dev.parent);
2b14ec78 2730 struct esp *esp = shost_priv(host);
cd9ad58d
DM
2731 struct esp_target_data *tp = &esp->target[target->id];
2732
2733 tp->nego_goal_period = period;
2734 tp->flags |= ESP_TGT_CHECK_NEGO;
2735}
2736
2737static void esp_set_width(struct scsi_target *target, int width)
2738{
2739 struct Scsi_Host *host = dev_to_shost(target->dev.parent);
2b14ec78 2740 struct esp *esp = shost_priv(host);
cd9ad58d
DM
2741 struct esp_target_data *tp = &esp->target[target->id];
2742
2743 tp->nego_goal_width = (width ? 1 : 0);
2744 tp->flags |= ESP_TGT_CHECK_NEGO;
2745}
2746
2747static struct spi_function_template esp_transport_ops = {
2748 .set_offset = esp_set_offset,
2749 .show_offset = 1,
2750 .set_period = esp_set_period,
2751 .show_period = 1,
2752 .set_width = esp_set_width,
2753 .show_width = 1,
2754 .get_signalling = esp_get_signalling,
2755};
2756
2757static int __init esp_init(void)
2758{
2759 BUILD_BUG_ON(sizeof(struct scsi_pointer) <
2760 sizeof(struct esp_cmd_priv));
2761
2762 esp_transport_template = spi_attach_transport(&esp_transport_ops);
2763 if (!esp_transport_template)
2764 return -ENODEV;
2765
2766 return 0;
2767}
2768
2769static void __exit esp_exit(void)
2770{
2771 spi_release_transport(esp_transport_template);
2772}
2773
2774MODULE_DESCRIPTION("ESP SCSI driver core");
2775MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
2776MODULE_LICENSE("GPL");
2777MODULE_VERSION(DRV_VERSION);
2778
2779module_param(esp_bus_reset_settle, int, 0);
2780MODULE_PARM_DESC(esp_bus_reset_settle,
2781 "ESP scsi bus reset delay in seconds");
2782
2783module_param(esp_debug, int, 0);
2784MODULE_PARM_DESC(esp_debug,
2785"ESP bitmapped debugging message enable value:\n"
2786" 0x00000001 Log interrupt events\n"
2787" 0x00000002 Log scsi commands\n"
2788" 0x00000004 Log resets\n"
2789" 0x00000008 Log message in events\n"
2790" 0x00000010 Log message out events\n"
2791" 0x00000020 Log command completion\n"
2792" 0x00000040 Log disconnects\n"
2793" 0x00000080 Log data start\n"
2794" 0x00000100 Log data done\n"
2795" 0x00000200 Log reconnects\n"
2796" 0x00000400 Log auto-sense data\n"
2797);
2798
2799module_init(esp_init);
2800module_exit(esp_exit);
This page took 0.725451 seconds and 5 git commands to generate.