Fix cpuset sched_relax_domain_level control file
[deliverable/linux.git] / drivers / scsi / dpt_i2o.c
CommitLineData
1da177e4
LT
1/***************************************************************************
2 dpti.c - description
3 -------------------
4 begin : Thu Sep 7 2000
5 copyright : (C) 2000 by Adaptec
6
7 July 30, 2001 First version being submitted
8 for inclusion in the kernel. V2.4
9
10 See Documentation/scsi/dpti.txt for history, notes, license info
11 and credits
12 ***************************************************************************/
13
14/***************************************************************************
15 * *
16 * This program is free software; you can redistribute it and/or modify *
17 * it under the terms of the GNU General Public License as published by *
18 * the Free Software Foundation; either version 2 of the License, or *
19 * (at your option) any later version. *
20 * *
21 ***************************************************************************/
22/***************************************************************************
23 * Sat Dec 20 2003 Go Taniguchi <go@turbolinux.co.jp>
24 - Support 2.6 kernel and DMA-mapping
25 - ioctl fix for raid tools
26 - use schedule_timeout in long long loop
27 **************************************************************************/
28
29/*#define DEBUG 1 */
30/*#define UARTDELAY 1 */
31
1da177e4
LT
32#include <linux/module.h>
33
34MODULE_AUTHOR("Deanna Bonds, with _lots_ of help from Mark Salyzyn");
35MODULE_DESCRIPTION("Adaptec I2O RAID Driver");
36
37////////////////////////////////////////////////////////////////
38
39#include <linux/ioctl.h> /* For SCSI-Passthrough */
40#include <asm/uaccess.h>
41
42#include <linux/stat.h>
43#include <linux/slab.h> /* for kmalloc() */
1da177e4
LT
44#include <linux/pci.h> /* for PCI support */
45#include <linux/proc_fs.h>
46#include <linux/blkdev.h>
47#include <linux/delay.h> /* for udelay */
48#include <linux/interrupt.h>
49#include <linux/kernel.h> /* for printk */
50#include <linux/sched.h>
51#include <linux/reboot.h>
52#include <linux/spinlock.h>
910638ae 53#include <linux/dma-mapping.h>
1da177e4
LT
54
55#include <linux/timer.h>
56#include <linux/string.h>
57#include <linux/ioport.h>
0b950672 58#include <linux/mutex.h>
1da177e4
LT
59
60#include <asm/processor.h> /* for boot_cpu_data */
61#include <asm/pgtable.h>
62#include <asm/io.h> /* for virt_to_bus, etc. */
63
64#include <scsi/scsi.h>
65#include <scsi/scsi_cmnd.h>
66#include <scsi/scsi_device.h>
67#include <scsi/scsi_host.h>
68#include <scsi/scsi_tcq.h>
69
70#include "dpt/dptsig.h"
71#include "dpti.h"
72
73/*============================================================================
74 * Create a binary signature - this is read by dptsig
75 * Needed for our management apps
76 *============================================================================
77 */
78static dpt_sig_S DPTI_sig = {
79 {'d', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION,
80#ifdef __i386__
81 PROC_INTEL, PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM,
82#elif defined(__ia64__)
83 PROC_INTEL, PROC_IA64,
84#elif defined(__sparc__)
85 PROC_ULTRASPARC, PROC_ULTRASPARC,
86#elif defined(__alpha__)
87 PROC_ALPHA, PROC_ALPHA,
88#else
89 (-1),(-1),
90#endif
91 FT_HBADRVR, 0, OEM_DPT, OS_LINUX, CAP_OVERLAP, DEV_ALL,
92 ADF_ALL_SC5, 0, 0, DPT_VERSION, DPT_REVISION, DPT_SUBREVISION,
93 DPT_MONTH, DPT_DAY, DPT_YEAR, "Adaptec Linux I2O RAID Driver"
94};
95
96
97
98
99/*============================================================================
100 * Globals
101 *============================================================================
102 */
103
0b950672 104static DEFINE_MUTEX(adpt_configuration_lock);
1da177e4 105
67af2b06
MS
106static struct i2o_sys_tbl *sys_tbl;
107static dma_addr_t sys_tbl_pa;
108static int sys_tbl_ind;
109static int sys_tbl_len;
1da177e4 110
1da177e4
LT
111static adpt_hba* hba_chain = NULL;
112static int hba_count = 0;
113
1ed43910
MS
114static struct class *adpt_sysfs_class;
115
62ac5aed
MS
116#ifdef CONFIG_COMPAT
117static long compat_adpt_ioctl(struct file *, unsigned int, unsigned long);
118#endif
119
00977a59 120static const struct file_operations adpt_fops = {
1da177e4
LT
121 .ioctl = adpt_ioctl,
122 .open = adpt_open,
62ac5aed
MS
123 .release = adpt_close,
124#ifdef CONFIG_COMPAT
125 .compat_ioctl = compat_adpt_ioctl,
1da177e4 126#endif
1da177e4 127};
1da177e4
LT
128
129/* Structures and definitions for synchronous message posting.
130 * See adpt_i2o_post_wait() for description
131 * */
132struct adpt_i2o_post_wait_data
133{
134 int status;
135 u32 id;
136 adpt_wait_queue_head_t *wq;
137 struct adpt_i2o_post_wait_data *next;
138};
139
140static struct adpt_i2o_post_wait_data *adpt_post_wait_queue = NULL;
141static u32 adpt_post_wait_id = 0;
142static DEFINE_SPINLOCK(adpt_post_wait_lock);
143
144
145/*============================================================================
146 * Functions
147 *============================================================================
148 */
149
62ac5aed
MS
150static inline int dpt_dma64(adpt_hba *pHba)
151{
152 return (sizeof(dma_addr_t) > 4 && (pHba)->dma64);
153}
154
67af2b06
MS
155static inline u32 dma_high(dma_addr_t addr)
156{
157 return upper_32_bits(addr);
158}
159
160static inline u32 dma_low(dma_addr_t addr)
161{
162 return (u32)addr;
163}
164
1da177e4
LT
165static u8 adpt_read_blink_led(adpt_hba* host)
166{
172c122d 167 if (host->FwDebugBLEDflag_P) {
1da177e4
LT
168 if( readb(host->FwDebugBLEDflag_P) == 0xbc ){
169 return readb(host->FwDebugBLEDvalue_P);
170 }
171 }
172 return 0;
173}
174
175/*============================================================================
176 * Scsi host template interface functions
177 *============================================================================
178 */
179
180static struct pci_device_id dptids[] = {
181 { PCI_DPT_VENDOR_ID, PCI_DPT_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
182 { PCI_DPT_VENDOR_ID, PCI_DPT_RAPTOR_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
183 { 0, }
184};
185MODULE_DEVICE_TABLE(pci,dptids);
186
24601bbc 187static int adpt_detect(struct scsi_host_template* sht)
1da177e4
LT
188{
189 struct pci_dev *pDev = NULL;
190 adpt_hba* pHba;
191
1da177e4
LT
192 PINFO("Detecting Adaptec I2O RAID controllers...\n");
193
194 /* search for all Adatpec I2O RAID cards */
a07f3537 195 while ((pDev = pci_get_device( PCI_DPT_VENDOR_ID, PCI_ANY_ID, pDev))) {
1da177e4
LT
196 if(pDev->device == PCI_DPT_DEVICE_ID ||
197 pDev->device == PCI_DPT_RAPTOR_DEVICE_ID){
24601bbc 198 if(adpt_install_hba(sht, pDev) ){
1da177e4
LT
199 PERROR("Could not Init an I2O RAID device\n");
200 PERROR("Will not try to detect others.\n");
201 return hba_count-1;
202 }
a07f3537 203 pci_dev_get(pDev);
1da177e4
LT
204 }
205 }
206
207 /* In INIT state, Activate IOPs */
208 for (pHba = hba_chain; pHba; pHba = pHba->next) {
209 // Activate does get status , init outbound, and get hrt
210 if (adpt_i2o_activate_hba(pHba) < 0) {
211 adpt_i2o_delete_hba(pHba);
212 }
213 }
214
215
216 /* Active IOPs in HOLD state */
217
218rebuild_sys_tab:
219 if (hba_chain == NULL)
220 return 0;
221
222 /*
223 * If build_sys_table fails, we kill everything and bail
224 * as we can't init the IOPs w/o a system table
225 */
226 if (adpt_i2o_build_sys_table() < 0) {
227 adpt_i2o_sys_shutdown();
228 return 0;
229 }
230
231 PDEBUG("HBA's in HOLD state\n");
232
233 /* If IOP don't get online, we need to rebuild the System table */
234 for (pHba = hba_chain; pHba; pHba = pHba->next) {
235 if (adpt_i2o_online_hba(pHba) < 0) {
236 adpt_i2o_delete_hba(pHba);
237 goto rebuild_sys_tab;
238 }
239 }
240
241 /* Active IOPs now in OPERATIONAL state */
242 PDEBUG("HBA's in OPERATIONAL state\n");
243
244 printk("dpti: If you have a lot of devices this could take a few minutes.\n");
245 for (pHba = hba_chain; pHba; pHba = pHba->next) {
246 printk(KERN_INFO"%s: Reading the hardware resource table.\n", pHba->name);
247 if (adpt_i2o_lct_get(pHba) < 0){
248 adpt_i2o_delete_hba(pHba);
249 continue;
250 }
251
252 if (adpt_i2o_parse_lct(pHba) < 0){
253 adpt_i2o_delete_hba(pHba);
254 continue;
255 }
256 adpt_inquiry(pHba);
257 }
258
1ed43910
MS
259 adpt_sysfs_class = class_create(THIS_MODULE, "dpt_i2o");
260 if (IS_ERR(adpt_sysfs_class)) {
261 printk(KERN_WARNING"dpti: unable to create dpt_i2o class\n");
262 adpt_sysfs_class = NULL;
263 }
264
1da177e4 265 for (pHba = hba_chain; pHba; pHba = pHba->next) {
c864cb14 266 if (adpt_scsi_host_alloc(pHba, sht) < 0){
1da177e4
LT
267 adpt_i2o_delete_hba(pHba);
268 continue;
269 }
270 pHba->initialized = TRUE;
271 pHba->state &= ~DPTI_STATE_RESET;
1ed43910
MS
272 if (adpt_sysfs_class) {
273 struct device *dev = device_create(adpt_sysfs_class,
274 NULL, MKDEV(DPTI_I2O_MAJOR, pHba->unit),
275 "dpti%d", pHba->unit);
276 if (IS_ERR(dev)) {
277 printk(KERN_WARNING"dpti%d: unable to "
278 "create device in dpt_i2o class\n",
279 pHba->unit);
280 }
281 }
1da177e4
LT
282 }
283
284 // Register our control device node
285 // nodes will need to be created in /dev to access this
286 // the nodes can not be created from within the driver
287 if (hba_count && register_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER, &adpt_fops)) {
24601bbc 288 adpt_i2o_sys_shutdown();
1da177e4
LT
289 return 0;
290 }
291 return hba_count;
292}
293
294
24601bbc
AM
295/*
296 * scsi_unregister will be called AFTER we return.
297 */
298static int adpt_release(struct Scsi_Host *host)
1da177e4 299{
24601bbc 300 adpt_hba* pHba = (adpt_hba*) host->hostdata[0];
1da177e4
LT
301// adpt_i2o_quiesce_hba(pHba);
302 adpt_i2o_delete_hba(pHba);
24601bbc 303 scsi_unregister(host);
1da177e4
LT
304 return 0;
305}
306
307
308static void adpt_inquiry(adpt_hba* pHba)
309{
62ac5aed 310 u32 msg[17];
1da177e4
LT
311 u32 *mptr;
312 u32 *lenptr;
313 int direction;
314 int scsidir;
315 u32 len;
316 u32 reqlen;
317 u8* buf;
67af2b06 318 dma_addr_t addr;
1da177e4
LT
319 u8 scb[16];
320 s32 rcode;
321
322 memset(msg, 0, sizeof(msg));
67af2b06 323 buf = dma_alloc_coherent(&pHba->pDev->dev, 80, &addr, GFP_KERNEL);
1da177e4
LT
324 if(!buf){
325 printk(KERN_ERR"%s: Could not allocate buffer\n",pHba->name);
326 return;
327 }
328 memset((void*)buf, 0, 36);
329
330 len = 36;
331 direction = 0x00000000;
332 scsidir =0x40000000; // DATA IN (iop<--dev)
333
62ac5aed
MS
334 if (dpt_dma64(pHba))
335 reqlen = 17; // SINGLE SGE, 64 bit
336 else
337 reqlen = 14; // SINGLE SGE, 32 bit
1da177e4
LT
338 /* Stick the headers on */
339 msg[0] = reqlen<<16 | SGL_OFFSET_12;
340 msg[1] = (0xff<<24|HOST_TID<<12|ADAPTER_TID);
341 msg[2] = 0;
342 msg[3] = 0;
343 // Adaptec/DPT Private stuff
344 msg[4] = I2O_CMD_SCSI_EXEC|DPT_ORGANIZATION_ID<<16;
345 msg[5] = ADAPTER_TID | 1<<16 /* Interpret*/;
346 /* Direction, disconnect ok | sense data | simple queue , CDBLen */
347 // I2O_SCB_FLAG_ENABLE_DISCONNECT |
348 // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
349 // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
350 msg[6] = scsidir|0x20a00000| 6 /* cmd len*/;
351
352 mptr=msg+7;
353
354 memset(scb, 0, sizeof(scb));
355 // Write SCSI command into the message - always 16 byte block
356 scb[0] = INQUIRY;
357 scb[1] = 0;
358 scb[2] = 0;
359 scb[3] = 0;
360 scb[4] = 36;
361 scb[5] = 0;
362 // Don't care about the rest of scb
363
364 memcpy(mptr, scb, sizeof(scb));
365 mptr+=4;
366 lenptr=mptr++; /* Remember me - fill in when we know */
367
368 /* Now fill in the SGList and command */
369 *lenptr = len;
62ac5aed
MS
370 if (dpt_dma64(pHba)) {
371 *mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
372 *mptr++ = 1 << PAGE_SHIFT;
373 *mptr++ = 0xD0000000|direction|len;
374 *mptr++ = dma_low(addr);
375 *mptr++ = dma_high(addr);
376 } else {
377 *mptr++ = 0xD0000000|direction|len;
378 *mptr++ = addr;
379 }
1da177e4
LT
380
381 // Send it on it's way
382 rcode = adpt_i2o_post_wait(pHba, msg, reqlen<<2, 120);
383 if (rcode != 0) {
384 sprintf(pHba->detail, "Adaptec I2O RAID");
385 printk(KERN_INFO "%s: Inquiry Error (%d)\n",pHba->name,rcode);
386 if (rcode != -ETIME && rcode != -EINTR)
67af2b06 387 dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
1da177e4
LT
388 } else {
389 memset(pHba->detail, 0, sizeof(pHba->detail));
390 memcpy(&(pHba->detail), "Vendor: Adaptec ", 16);
391 memcpy(&(pHba->detail[16]), " Model: ", 8);
392 memcpy(&(pHba->detail[24]), (u8*) &buf[16], 16);
393 memcpy(&(pHba->detail[40]), " FW: ", 4);
394 memcpy(&(pHba->detail[44]), (u8*) &buf[32], 4);
395 pHba->detail[48] = '\0'; /* precautionary */
67af2b06 396 dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
1da177e4
LT
397 }
398 adpt_i2o_status_get(pHba);
399 return ;
400}
401
402
403static int adpt_slave_configure(struct scsi_device * device)
404{
405 struct Scsi_Host *host = device->host;
406 adpt_hba* pHba;
407
408 pHba = (adpt_hba *) host->hostdata[0];
409
410 if (host->can_queue && device->tagged_supported) {
411 scsi_adjust_queue_depth(device, MSG_SIMPLE_TAG,
412 host->can_queue - 1);
413 } else {
414 scsi_adjust_queue_depth(device, 0, 1);
415 }
416 return 0;
417}
418
419static int adpt_queue(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *))
420{
421 adpt_hba* pHba = NULL;
422 struct adpt_device* pDev = NULL; /* dpt per device information */
1da177e4
LT
423
424 cmd->scsi_done = done;
425 /*
426 * SCSI REQUEST_SENSE commands will be executed automatically by the
427 * Host Adapter for any errors, so they should not be executed
428 * explicitly unless the Sense Data is zero indicating that no error
429 * occurred.
430 */
431
432 if ((cmd->cmnd[0] == REQUEST_SENSE) && (cmd->sense_buffer[0] != 0)) {
433 cmd->result = (DID_OK << 16);
434 cmd->scsi_done(cmd);
435 return 0;
436 }
437
438 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
439 if (!pHba) {
440 return FAILED;
441 }
442
443 rmb();
444 /*
445 * TODO: I need to block here if I am processing ioctl cmds
446 * but if the outstanding cmds all finish before the ioctl,
447 * the scsi-core will not know to start sending cmds to me again.
448 * I need to a way to restart the scsi-cores queues or should I block
449 * calling scsi_done on the outstanding cmds instead
450 * for now we don't set the IOCTL state
451 */
452 if(((pHba->state) & DPTI_STATE_IOCTL) || ((pHba->state) & DPTI_STATE_RESET)) {
453 pHba->host->last_reset = jiffies;
454 pHba->host->resetting = 1;
455 return 1;
456 }
457
1da177e4
LT
458 // TODO if the cmd->device if offline then I may need to issue a bus rescan
459 // followed by a get_lct to see if the device is there anymore
460 if((pDev = (struct adpt_device*) (cmd->device->hostdata)) == NULL) {
461 /*
462 * First command request for this device. Set up a pointer
463 * to the device structure. This should be a TEST_UNIT_READY
464 * command from scan_scsis_single.
465 */
466 if ((pDev = adpt_find_device(pHba, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun)) == NULL) {
467 // TODO: if any luns are at this bus, scsi id then fake a TEST_UNIT_READY and INQUIRY response
468 // with type 7F (for all luns less than the max for this bus,id) so the lun scan will continue.
469 cmd->result = (DID_NO_CONNECT << 16);
470 cmd->scsi_done(cmd);
471 return 0;
472 }
473 cmd->device->hostdata = pDev;
474 }
475 pDev->pScsi_dev = cmd->device;
476
477 /*
478 * If we are being called from when the device is being reset,
479 * delay processing of the command until later.
480 */
481 if (pDev->state & DPTI_DEV_RESET ) {
482 return FAILED;
483 }
484 return adpt_scsi_to_i2o(pHba, cmd, pDev);
485}
486
487static int adpt_bios_param(struct scsi_device *sdev, struct block_device *dev,
488 sector_t capacity, int geom[])
489{
490 int heads=-1;
491 int sectors=-1;
492 int cylinders=-1;
493
494 // *** First lets set the default geometry ****
495
496 // If the capacity is less than ox2000
497 if (capacity < 0x2000 ) { // floppy
498 heads = 18;
499 sectors = 2;
500 }
501 // else if between 0x2000 and 0x20000
502 else if (capacity < 0x20000) {
503 heads = 64;
504 sectors = 32;
505 }
506 // else if between 0x20000 and 0x40000
507 else if (capacity < 0x40000) {
508 heads = 65;
509 sectors = 63;
510 }
511 // else if between 0x4000 and 0x80000
512 else if (capacity < 0x80000) {
513 heads = 128;
514 sectors = 63;
515 }
516 // else if greater than 0x80000
517 else {
518 heads = 255;
519 sectors = 63;
520 }
521 cylinders = sector_div(capacity, heads * sectors);
522
523 // Special case if CDROM
524 if(sdev->type == 5) { // CDROM
525 heads = 252;
526 sectors = 63;
527 cylinders = 1111;
528 }
529
530 geom[0] = heads;
531 geom[1] = sectors;
532 geom[2] = cylinders;
533
534 PDEBUG("adpt_bios_param: exit\n");
535 return 0;
536}
537
538
539static const char *adpt_info(struct Scsi_Host *host)
540{
541 adpt_hba* pHba;
542
543 pHba = (adpt_hba *) host->hostdata[0];
544 return (char *) (pHba->detail);
545}
546
547static int adpt_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset,
548 int length, int inout)
549{
550 struct adpt_device* d;
551 int id;
552 int chan;
553 int len = 0;
554 int begin = 0;
555 int pos = 0;
556 adpt_hba* pHba;
557 int unit;
558
559 *start = buffer;
560 if (inout == TRUE) {
561 /*
562 * The user has done a write and wants us to take the
563 * data in the buffer and do something with it.
564 * proc_scsiwrite calls us with inout = 1
565 *
566 * Read data from buffer (writing to us) - NOT SUPPORTED
567 */
568 return -EINVAL;
569 }
570
571 /*
572 * inout = 0 means the user has done a read and wants information
573 * returned, so we write information about the cards into the buffer
574 * proc_scsiread() calls us with inout = 0
575 */
576
577 // Find HBA (host bus adapter) we are looking for
0b950672 578 mutex_lock(&adpt_configuration_lock);
1da177e4
LT
579 for (pHba = hba_chain; pHba; pHba = pHba->next) {
580 if (pHba->host == host) {
581 break; /* found adapter */
582 }
583 }
0b950672 584 mutex_unlock(&adpt_configuration_lock);
1da177e4
LT
585 if (pHba == NULL) {
586 return 0;
587 }
588 host = pHba->host;
589
590 len = sprintf(buffer , "Adaptec I2O RAID Driver Version: %s\n\n", DPT_I2O_VERSION);
591 len += sprintf(buffer+len, "%s\n", pHba->detail);
592 len += sprintf(buffer+len, "SCSI Host=scsi%d Control Node=/dev/%s irq=%d\n",
593 pHba->host->host_no, pHba->name, host->irq);
594 len += sprintf(buffer+len, "\tpost fifo size = %d\n\treply fifo size = %d\n\tsg table size = %d\n\n",
595 host->can_queue, (int) pHba->reply_fifo_size , host->sg_tablesize);
596
597 pos = begin + len;
598
599 /* CHECKPOINT */
600 if(pos > offset + length) {
601 goto stop_output;
602 }
603 if(pos <= offset) {
604 /*
605 * If we haven't even written to where we last left
606 * off (the last time we were called), reset the
607 * beginning pointer.
608 */
609 len = 0;
610 begin = pos;
611 }
612 len += sprintf(buffer+len, "Devices:\n");
613 for(chan = 0; chan < MAX_CHANNEL; chan++) {
614 for(id = 0; id < MAX_ID; id++) {
615 d = pHba->channel[chan].device[id];
616 while(d){
617 len += sprintf(buffer+len,"\t%-24.24s", d->pScsi_dev->vendor);
618 len += sprintf(buffer+len," Rev: %-8.8s\n", d->pScsi_dev->rev);
619 pos = begin + len;
620
621
622 /* CHECKPOINT */
623 if(pos > offset + length) {
624 goto stop_output;
625 }
626 if(pos <= offset) {
627 len = 0;
628 begin = pos;
629 }
630
631 unit = d->pI2o_dev->lct_data.tid;
632 len += sprintf(buffer+len, "\tTID=%d, (Channel=%d, Target=%d, Lun=%d) (%s)\n\n",
633 unit, (int)d->scsi_channel, (int)d->scsi_id, (int)d->scsi_lun,
634 scsi_device_online(d->pScsi_dev)? "online":"offline");
635 pos = begin + len;
636
637 /* CHECKPOINT */
638 if(pos > offset + length) {
639 goto stop_output;
640 }
641 if(pos <= offset) {
642 len = 0;
643 begin = pos;
644 }
645
646 d = d->next_lun;
647 }
648 }
649 }
650
651 /*
652 * begin is where we last checked our position with regards to offset
653 * begin is always less than offset. len is relative to begin. It
654 * is the number of bytes written past begin
655 *
656 */
657stop_output:
658 /* stop the output and calculate the correct length */
659 *(buffer + len) = '\0';
660
661 *start = buffer + (offset - begin); /* Start of wanted data */
662 len -= (offset - begin);
663 if(len > length) {
664 len = length;
665 } else if(len < 0){
666 len = 0;
667 **start = '\0';
668 }
669 return len;
670}
671
62ac5aed
MS
672/*
673 * Turn a struct scsi_cmnd * into a unique 32 bit 'context'.
674 */
675static u32 adpt_cmd_to_context(struct scsi_cmnd *cmd)
676{
677 return (u32)cmd->serial_number;
678}
679
680/*
681 * Go from a u32 'context' to a struct scsi_cmnd * .
682 * This could probably be made more efficient.
683 */
684static struct scsi_cmnd *
685 adpt_cmd_from_context(adpt_hba * pHba, u32 context)
686{
687 struct scsi_cmnd * cmd;
688 struct scsi_device * d;
689
690 if (context == 0)
691 return NULL;
692
693 spin_unlock(pHba->host->host_lock);
694 shost_for_each_device(d, pHba->host) {
695 unsigned long flags;
696 spin_lock_irqsave(&d->list_lock, flags);
697 list_for_each_entry(cmd, &d->cmd_list, list) {
698 if (((u32)cmd->serial_number == context)) {
699 spin_unlock_irqrestore(&d->list_lock, flags);
700 scsi_device_put(d);
701 spin_lock(pHba->host->host_lock);
702 return cmd;
703 }
704 }
705 spin_unlock_irqrestore(&d->list_lock, flags);
706 }
707 spin_lock(pHba->host->host_lock);
708
709 return NULL;
710}
711
712/*
713 * Turn a pointer to ioctl reply data into an u32 'context'
714 */
715static u32 adpt_ioctl_to_context(adpt_hba * pHba, void *reply)
716{
717#if BITS_PER_LONG == 32
718 return (u32)(unsigned long)reply;
719#else
720 ulong flags = 0;
721 u32 nr, i;
722
723 spin_lock_irqsave(pHba->host->host_lock, flags);
724 nr = ARRAY_SIZE(pHba->ioctl_reply_context);
725 for (i = 0; i < nr; i++) {
726 if (pHba->ioctl_reply_context[i] == NULL) {
727 pHba->ioctl_reply_context[i] = reply;
728 break;
729 }
730 }
731 spin_unlock_irqrestore(pHba->host->host_lock, flags);
732 if (i >= nr) {
733 kfree (reply);
734 printk(KERN_WARNING"%s: Too many outstanding "
735 "ioctl commands\n", pHba->name);
736 return (u32)-1;
737 }
738
739 return i;
740#endif
741}
742
743/*
744 * Go from an u32 'context' to a pointer to ioctl reply data.
745 */
746static void *adpt_ioctl_from_context(adpt_hba *pHba, u32 context)
747{
748#if BITS_PER_LONG == 32
749 return (void *)(unsigned long)context;
750#else
751 void *p = pHba->ioctl_reply_context[context];
752 pHba->ioctl_reply_context[context] = NULL;
753
754 return p;
755#endif
756}
1da177e4
LT
757
758/*===========================================================================
759 * Error Handling routines
760 *===========================================================================
761 */
762
763static int adpt_abort(struct scsi_cmnd * cmd)
764{
765 adpt_hba* pHba = NULL; /* host bus adapter structure */
766 struct adpt_device* dptdevice; /* dpt per device information */
767 u32 msg[5];
768 int rcode;
769
770 if(cmd->serial_number == 0){
771 return FAILED;
772 }
773 pHba = (adpt_hba*) cmd->device->host->hostdata[0];
774 printk(KERN_INFO"%s: Trying to Abort cmd=%ld\n",pHba->name, cmd->serial_number);
775 if ((dptdevice = (void*) (cmd->device->hostdata)) == NULL) {
776 printk(KERN_ERR "%s: Unable to abort: No device in cmnd\n",pHba->name);
777 return FAILED;
778 }
779
780 memset(msg, 0, sizeof(msg));
781 msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
782 msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|dptdevice->tid;
783 msg[2] = 0;
784 msg[3]= 0;
62ac5aed 785 msg[4] = adpt_cmd_to_context(cmd);
e5508c13
SM
786 if (pHba->host)
787 spin_lock_irq(pHba->host->host_lock);
788 rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER);
789 if (pHba->host)
790 spin_unlock_irq(pHba->host->host_lock);
791 if (rcode != 0) {
1da177e4
LT
792 if(rcode == -EOPNOTSUPP ){
793 printk(KERN_INFO"%s: Abort cmd not supported\n",pHba->name);
794 return FAILED;
795 }
796 printk(KERN_INFO"%s: Abort cmd=%ld failed.\n",pHba->name, cmd->serial_number);
797 return FAILED;
798 }
799 printk(KERN_INFO"%s: Abort cmd=%ld complete.\n",pHba->name, cmd->serial_number);
800 return SUCCESS;
801}
802
803
804#define I2O_DEVICE_RESET 0x27
805// This is the same for BLK and SCSI devices
806// NOTE this is wrong in the i2o.h definitions
807// This is not currently supported by our adapter but we issue it anyway
808static int adpt_device_reset(struct scsi_cmnd* cmd)
809{
810 adpt_hba* pHba;
811 u32 msg[4];
812 u32 rcode;
813 int old_state;
1c2fb3f3 814 struct adpt_device* d = cmd->device->hostdata;
1da177e4
LT
815
816 pHba = (void*) cmd->device->host->hostdata[0];
817 printk(KERN_INFO"%s: Trying to reset device\n",pHba->name);
818 if (!d) {
819 printk(KERN_INFO"%s: Reset Device: Device Not found\n",pHba->name);
820 return FAILED;
821 }
822 memset(msg, 0, sizeof(msg));
823 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
824 msg[1] = (I2O_DEVICE_RESET<<24|HOST_TID<<12|d->tid);
825 msg[2] = 0;
826 msg[3] = 0;
827
e5508c13
SM
828 if (pHba->host)
829 spin_lock_irq(pHba->host->host_lock);
1da177e4
LT
830 old_state = d->state;
831 d->state |= DPTI_DEV_RESET;
e5508c13
SM
832 rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
833 d->state = old_state;
834 if (pHba->host)
835 spin_unlock_irq(pHba->host->host_lock);
836 if (rcode != 0) {
1da177e4
LT
837 if(rcode == -EOPNOTSUPP ){
838 printk(KERN_INFO"%s: Device reset not supported\n",pHba->name);
839 return FAILED;
840 }
841 printk(KERN_INFO"%s: Device reset failed\n",pHba->name);
842 return FAILED;
843 } else {
1da177e4
LT
844 printk(KERN_INFO"%s: Device reset successful\n",pHba->name);
845 return SUCCESS;
846 }
847}
848
849
850#define I2O_HBA_BUS_RESET 0x87
851// This version of bus reset is called by the eh_error handler
852static int adpt_bus_reset(struct scsi_cmnd* cmd)
853{
854 adpt_hba* pHba;
855 u32 msg[4];
e5508c13 856 u32 rcode;
1da177e4
LT
857
858 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
859 memset(msg, 0, sizeof(msg));
860 printk(KERN_WARNING"%s: Bus reset: SCSI Bus %d: tid: %d\n",pHba->name, cmd->device->channel,pHba->channel[cmd->device->channel].tid );
861 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
862 msg[1] = (I2O_HBA_BUS_RESET<<24|HOST_TID<<12|pHba->channel[cmd->device->channel].tid);
863 msg[2] = 0;
864 msg[3] = 0;
e5508c13
SM
865 if (pHba->host)
866 spin_lock_irq(pHba->host->host_lock);
867 rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
868 if (pHba->host)
869 spin_unlock_irq(pHba->host->host_lock);
870 if (rcode != 0) {
1da177e4
LT
871 printk(KERN_WARNING"%s: Bus reset failed.\n",pHba->name);
872 return FAILED;
873 } else {
874 printk(KERN_WARNING"%s: Bus reset success.\n",pHba->name);
875 return SUCCESS;
876 }
877}
878
879// This version of reset is called by the eh_error_handler
df0ae249 880static int __adpt_reset(struct scsi_cmnd* cmd)
1da177e4
LT
881{
882 adpt_hba* pHba;
883 int rcode;
884 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
885 printk(KERN_WARNING"%s: Hba Reset: scsi id %d: tid: %d\n",pHba->name,cmd->device->channel,pHba->channel[cmd->device->channel].tid );
886 rcode = adpt_hba_reset(pHba);
887 if(rcode == 0){
888 printk(KERN_WARNING"%s: HBA reset complete\n",pHba->name);
889 return SUCCESS;
890 } else {
891 printk(KERN_WARNING"%s: HBA reset failed (%x)\n",pHba->name, rcode);
892 return FAILED;
893 }
894}
895
df0ae249
JG
896static int adpt_reset(struct scsi_cmnd* cmd)
897{
898 int rc;
899
900 spin_lock_irq(cmd->device->host->host_lock);
901 rc = __adpt_reset(cmd);
902 spin_unlock_irq(cmd->device->host->host_lock);
903
904 return rc;
905}
906
1da177e4
LT
907// This version of reset is called by the ioctls and indirectly from eh_error_handler via adpt_reset
908static int adpt_hba_reset(adpt_hba* pHba)
909{
910 int rcode;
911
912 pHba->state |= DPTI_STATE_RESET;
913
914 // Activate does get status , init outbound, and get hrt
915 if ((rcode=adpt_i2o_activate_hba(pHba)) < 0) {
916 printk(KERN_ERR "%s: Could not activate\n", pHba->name);
917 adpt_i2o_delete_hba(pHba);
918 return rcode;
919 }
920
921 if ((rcode=adpt_i2o_build_sys_table()) < 0) {
922 adpt_i2o_delete_hba(pHba);
923 return rcode;
924 }
925 PDEBUG("%s: in HOLD state\n",pHba->name);
926
927 if ((rcode=adpt_i2o_online_hba(pHba)) < 0) {
928 adpt_i2o_delete_hba(pHba);
929 return rcode;
930 }
931 PDEBUG("%s: in OPERATIONAL state\n",pHba->name);
932
933 if ((rcode=adpt_i2o_lct_get(pHba)) < 0){
934 adpt_i2o_delete_hba(pHba);
935 return rcode;
936 }
937
938 if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0){
939 adpt_i2o_delete_hba(pHba);
940 return rcode;
941 }
942 pHba->state &= ~DPTI_STATE_RESET;
943
944 adpt_fail_posted_scbs(pHba);
945 return 0; /* return success */
946}
947
948/*===========================================================================
949 *
950 *===========================================================================
951 */
952
953
954static void adpt_i2o_sys_shutdown(void)
955{
956 adpt_hba *pHba, *pNext;
458af543 957 struct adpt_i2o_post_wait_data *p1, *old;
1da177e4
LT
958
959 printk(KERN_INFO"Shutting down Adaptec I2O controllers.\n");
960 printk(KERN_INFO" This could take a few minutes if there are many devices attached\n");
961 /* Delete all IOPs from the controller chain */
962 /* They should have already been released by the
963 * scsi-core
964 */
965 for (pHba = hba_chain; pHba; pHba = pNext) {
966 pNext = pHba->next;
967 adpt_i2o_delete_hba(pHba);
968 }
969
970 /* Remove any timedout entries from the wait queue. */
1da177e4
LT
971// spin_lock_irqsave(&adpt_post_wait_lock, flags);
972 /* Nothing should be outstanding at this point so just
973 * free them
974 */
458af543
AB
975 for(p1 = adpt_post_wait_queue; p1;) {
976 old = p1;
977 p1 = p1->next;
978 kfree(old);
1da177e4
LT
979 }
980// spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
981 adpt_post_wait_queue = NULL;
982
983 printk(KERN_INFO "Adaptec I2O controllers down.\n");
984}
985
24601bbc 986static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev)
1da177e4
LT
987{
988
989 adpt_hba* pHba = NULL;
990 adpt_hba* p = NULL;
991 ulong base_addr0_phys = 0;
992 ulong base_addr1_phys = 0;
993 u32 hba_map0_area_size = 0;
994 u32 hba_map1_area_size = 0;
995 void __iomem *base_addr_virt = NULL;
996 void __iomem *msg_addr_virt = NULL;
62ac5aed 997 int dma64 = 0;
1da177e4
LT
998
999 int raptorFlag = FALSE;
1da177e4
LT
1000
1001 if(pci_enable_device(pDev)) {
1002 return -EINVAL;
1003 }
9638d89a
SM
1004
1005 if (pci_request_regions(pDev, "dpt_i2o")) {
1006 PERROR("dpti: adpt_config_hba: pci request region failed\n");
1007 return -EINVAL;
1008 }
1009
1da177e4 1010 pci_set_master(pDev);
62ac5aed
MS
1011
1012 /*
1013 * See if we should enable dma64 mode.
1014 */
1015 if (sizeof(dma_addr_t) > 4 &&
1016 pci_set_dma_mask(pDev, DMA_64BIT_MASK) == 0) {
1017 if (dma_get_required_mask(&pDev->dev) > DMA_32BIT_MASK)
1018 dma64 = 1;
1019 }
1020 if (!dma64 && pci_set_dma_mask(pDev, DMA_32BIT_MASK) != 0)
1da177e4
LT
1021 return -EINVAL;
1022
67af2b06
MS
1023 /* adapter only supports message blocks below 4GB */
1024 pci_set_consistent_dma_mask(pDev, DMA_32BIT_MASK);
1025
1da177e4
LT
1026 base_addr0_phys = pci_resource_start(pDev,0);
1027 hba_map0_area_size = pci_resource_len(pDev,0);
1028
1029 // Check if standard PCI card or single BAR Raptor
1030 if(pDev->device == PCI_DPT_DEVICE_ID){
1031 if(pDev->subsystem_device >=0xc032 && pDev->subsystem_device <= 0xc03b){
1032 // Raptor card with this device id needs 4M
1033 hba_map0_area_size = 0x400000;
1034 } else { // Not Raptor - it is a PCI card
1035 if(hba_map0_area_size > 0x100000 ){
1036 hba_map0_area_size = 0x100000;
1037 }
1038 }
1039 } else {// Raptor split BAR config
1040 // Use BAR1 in this configuration
1041 base_addr1_phys = pci_resource_start(pDev,1);
1042 hba_map1_area_size = pci_resource_len(pDev,1);
1043 raptorFlag = TRUE;
1044 }
1045
62ac5aed
MS
1046#if BITS_PER_LONG == 64
1047 /*
1048 * The original Adaptec 64 bit driver has this comment here:
1049 * "x86_64 machines need more optimal mappings"
1050 *
1051 * I assume some HBAs report ridiculously large mappings
1052 * and we need to limit them on platforms with IOMMUs.
1053 */
1054 if (raptorFlag == TRUE) {
1055 if (hba_map0_area_size > 128)
1056 hba_map0_area_size = 128;
1057 if (hba_map1_area_size > 524288)
1058 hba_map1_area_size = 524288;
1059 } else {
1060 if (hba_map0_area_size > 524288)
1061 hba_map0_area_size = 524288;
1062 }
1063#endif
1064
1da177e4
LT
1065 base_addr_virt = ioremap(base_addr0_phys,hba_map0_area_size);
1066 if (!base_addr_virt) {
9c472dd9 1067 pci_release_regions(pDev);
1da177e4
LT
1068 PERROR("dpti: adpt_config_hba: io remap failed\n");
1069 return -EINVAL;
1070 }
1071
1072 if(raptorFlag == TRUE) {
1073 msg_addr_virt = ioremap(base_addr1_phys, hba_map1_area_size );
1074 if (!msg_addr_virt) {
1075 PERROR("dpti: adpt_config_hba: io remap failed on BAR1\n");
1076 iounmap(base_addr_virt);
9c472dd9 1077 pci_release_regions(pDev);
1da177e4
LT
1078 return -EINVAL;
1079 }
1080 } else {
1081 msg_addr_virt = base_addr_virt;
1082 }
1083
1084 // Allocate and zero the data structure
bbfbbbc1
MK
1085 pHba = kzalloc(sizeof(adpt_hba), GFP_KERNEL);
1086 if (!pHba) {
1087 if (msg_addr_virt != base_addr_virt)
1da177e4 1088 iounmap(msg_addr_virt);
1da177e4 1089 iounmap(base_addr_virt);
9c472dd9 1090 pci_release_regions(pDev);
1da177e4
LT
1091 return -ENOMEM;
1092 }
1da177e4 1093
0b950672 1094 mutex_lock(&adpt_configuration_lock);
1da177e4
LT
1095
1096 if(hba_chain != NULL){
1097 for(p = hba_chain; p->next; p = p->next);
1098 p->next = pHba;
1099 } else {
1100 hba_chain = pHba;
1101 }
1102 pHba->next = NULL;
1103 pHba->unit = hba_count;
23a2bc22 1104 sprintf(pHba->name, "dpti%d", hba_count);
1da177e4
LT
1105 hba_count++;
1106
0b950672 1107 mutex_unlock(&adpt_configuration_lock);
1da177e4
LT
1108
1109 pHba->pDev = pDev;
1110 pHba->base_addr_phys = base_addr0_phys;
1111
1112 // Set up the Virtual Base Address of the I2O Device
1113 pHba->base_addr_virt = base_addr_virt;
1114 pHba->msg_addr_virt = msg_addr_virt;
1115 pHba->irq_mask = base_addr_virt+0x30;
1116 pHba->post_port = base_addr_virt+0x40;
1117 pHba->reply_port = base_addr_virt+0x44;
1118
1119 pHba->hrt = NULL;
1120 pHba->lct = NULL;
1121 pHba->lct_size = 0;
1122 pHba->status_block = NULL;
1123 pHba->post_count = 0;
1124 pHba->state = DPTI_STATE_RESET;
1125 pHba->pDev = pDev;
1126 pHba->devices = NULL;
62ac5aed 1127 pHba->dma64 = dma64;
1da177e4
LT
1128
1129 // Initializing the spinlocks
1130 spin_lock_init(&pHba->state_lock);
1131 spin_lock_init(&adpt_post_wait_lock);
1132
1133 if(raptorFlag == 0){
62ac5aed
MS
1134 printk(KERN_INFO "Adaptec I2O RAID controller"
1135 " %d at %p size=%x irq=%d%s\n",
1136 hba_count-1, base_addr_virt,
1137 hba_map0_area_size, pDev->irq,
1138 dma64 ? " (64-bit DMA)" : "");
1da177e4 1139 } else {
62ac5aed
MS
1140 printk(KERN_INFO"Adaptec I2O RAID controller %d irq=%d%s\n",
1141 hba_count-1, pDev->irq,
1142 dma64 ? " (64-bit DMA)" : "");
1da177e4
LT
1143 printk(KERN_INFO" BAR0 %p - size= %x\n",base_addr_virt,hba_map0_area_size);
1144 printk(KERN_INFO" BAR1 %p - size= %x\n",msg_addr_virt,hba_map1_area_size);
1145 }
1146
1d6f359a 1147 if (request_irq (pDev->irq, adpt_isr, IRQF_SHARED, pHba->name, pHba)) {
1da177e4
LT
1148 printk(KERN_ERR"%s: Couldn't register IRQ %d\n", pHba->name, pDev->irq);
1149 adpt_i2o_delete_hba(pHba);
1150 return -EINVAL;
1151 }
1152
1153 return 0;
1154}
1155
1156
1157static void adpt_i2o_delete_hba(adpt_hba* pHba)
1158{
1159 adpt_hba* p1;
1160 adpt_hba* p2;
1161 struct i2o_device* d;
1162 struct i2o_device* next;
1163 int i;
1164 int j;
1165 struct adpt_device* pDev;
1166 struct adpt_device* pNext;
1167
1168
0b950672 1169 mutex_lock(&adpt_configuration_lock);
24601bbc
AM
1170 // scsi_unregister calls our adpt_release which
1171 // does a quiese
1da177e4
LT
1172 if(pHba->host){
1173 free_irq(pHba->host->irq, pHba);
1174 }
1da177e4
LT
1175 p2 = NULL;
1176 for( p1 = hba_chain; p1; p2 = p1,p1=p1->next){
1177 if(p1 == pHba) {
1178 if(p2) {
1179 p2->next = p1->next;
1180 } else {
1181 hba_chain = p1->next;
1182 }
1183 break;
1184 }
1185 }
1186
1187 hba_count--;
0b950672 1188 mutex_unlock(&adpt_configuration_lock);
1da177e4
LT
1189
1190 iounmap(pHba->base_addr_virt);
9c472dd9 1191 pci_release_regions(pHba->pDev);
1da177e4
LT
1192 if(pHba->msg_addr_virt != pHba->base_addr_virt){
1193 iounmap(pHba->msg_addr_virt);
1194 }
62ac5aed
MS
1195 if(pHba->FwDebugBuffer_P)
1196 iounmap(pHba->FwDebugBuffer_P);
67af2b06
MS
1197 if(pHba->hrt) {
1198 dma_free_coherent(&pHba->pDev->dev,
1199 pHba->hrt->num_entries * pHba->hrt->entry_len << 2,
1200 pHba->hrt, pHba->hrt_pa);
1201 }
1202 if(pHba->lct) {
1203 dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
1204 pHba->lct, pHba->lct_pa);
1205 }
1206 if(pHba->status_block) {
1207 dma_free_coherent(&pHba->pDev->dev, sizeof(i2o_status_block),
1208 pHba->status_block, pHba->status_block_pa);
1209 }
1210 if(pHba->reply_pool) {
1211 dma_free_coherent(&pHba->pDev->dev,
1212 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
1213 pHba->reply_pool, pHba->reply_pool_pa);
1214 }
1da177e4
LT
1215
1216 for(d = pHba->devices; d ; d = next){
1217 next = d->next;
1218 kfree(d);
1219 }
1220 for(i = 0 ; i < pHba->top_scsi_channel ; i++){
1221 for(j = 0; j < MAX_ID; j++){
1222 if(pHba->channel[i].device[j] != NULL){
1223 for(pDev = pHba->channel[i].device[j]; pDev; pDev = pNext){
1224 pNext = pDev->next_lun;
1225 kfree(pDev);
1226 }
1227 }
1228 }
1229 }
a07f3537 1230 pci_dev_put(pHba->pDev);
1da177e4
LT
1231 kfree(pHba);
1232
1ed43910
MS
1233 if (adpt_sysfs_class)
1234 device_destroy(adpt_sysfs_class,
1235 MKDEV(DPTI_I2O_MAJOR, pHba->unit));
1236
1da177e4
LT
1237 if(hba_count <= 0){
1238 unregister_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER);
1ed43910
MS
1239 if (adpt_sysfs_class) {
1240 class_destroy(adpt_sysfs_class);
1241 adpt_sysfs_class = NULL;
1242 }
1da177e4
LT
1243 }
1244}
1245
1da177e4
LT
1246static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u32 lun)
1247{
1248 struct adpt_device* d;
1249
1250 if(chan < 0 || chan >= MAX_CHANNEL)
1251 return NULL;
1252
1253 if( pHba->channel[chan].device == NULL){
1254 printk(KERN_DEBUG"Adaptec I2O RAID: Trying to find device before they are allocated\n");
1255 return NULL;
1256 }
1257
1258 d = pHba->channel[chan].device[id];
1259 if(!d || d->tid == 0) {
1260 return NULL;
1261 }
1262
1263 /* If it is the only lun at that address then this should match*/
1264 if(d->scsi_lun == lun){
1265 return d;
1266 }
1267
1268 /* else we need to look through all the luns */
1269 for(d=d->next_lun ; d ; d = d->next_lun){
1270 if(d->scsi_lun == lun){
1271 return d;
1272 }
1273 }
1274 return NULL;
1275}
1276
1277
1278static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout)
1279{
1280 // I used my own version of the WAIT_QUEUE_HEAD
1281 // to handle some version differences
1282 // When embedded in the kernel this could go back to the vanilla one
1283 ADPT_DECLARE_WAIT_QUEUE_HEAD(adpt_wq_i2o_post);
1284 int status = 0;
1285 ulong flags = 0;
1286 struct adpt_i2o_post_wait_data *p1, *p2;
1287 struct adpt_i2o_post_wait_data *wait_data =
1288 kmalloc(sizeof(struct adpt_i2o_post_wait_data),GFP_KERNEL);
4452ea50 1289 DECLARE_WAITQUEUE(wait, current);
1da177e4 1290
4452ea50 1291 if (!wait_data)
1da177e4 1292 return -ENOMEM;
4452ea50 1293
1da177e4
LT
1294 /*
1295 * The spin locking is needed to keep anyone from playing
1296 * with the queue pointers and id while we do the same
1297 */
1298 spin_lock_irqsave(&adpt_post_wait_lock, flags);
1299 // TODO we need a MORE unique way of getting ids
1300 // to support async LCT get
1301 wait_data->next = adpt_post_wait_queue;
1302 adpt_post_wait_queue = wait_data;
1303 adpt_post_wait_id++;
1304 adpt_post_wait_id &= 0x7fff;
1305 wait_data->id = adpt_post_wait_id;
1306 spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1307
1308 wait_data->wq = &adpt_wq_i2o_post;
1309 wait_data->status = -ETIMEDOUT;
1310
4452ea50 1311 add_wait_queue(&adpt_wq_i2o_post, &wait);
1da177e4
LT
1312
1313 msg[2] |= 0x80000000 | ((u32)wait_data->id);
1314 timeout *= HZ;
1315 if((status = adpt_i2o_post_this(pHba, msg, len)) == 0){
1316 set_current_state(TASK_INTERRUPTIBLE);
1317 if(pHba->host)
1318 spin_unlock_irq(pHba->host->host_lock);
1319 if (!timeout)
1320 schedule();
1321 else{
1322 timeout = schedule_timeout(timeout);
1323 if (timeout == 0) {
1324 // I/O issued, but cannot get result in
1325 // specified time. Freeing resorces is
1326 // dangerous.
1327 status = -ETIME;
1328 }
1329 }
1330 if(pHba->host)
1331 spin_lock_irq(pHba->host->host_lock);
1332 }
4452ea50 1333 remove_wait_queue(&adpt_wq_i2o_post, &wait);
1da177e4
LT
1334
1335 if(status == -ETIMEDOUT){
1336 printk(KERN_INFO"dpti%d: POST WAIT TIMEOUT\n",pHba->unit);
1337 // We will have to free the wait_data memory during shutdown
1338 return status;
1339 }
1340
1341 /* Remove the entry from the queue. */
1342 p2 = NULL;
1343 spin_lock_irqsave(&adpt_post_wait_lock, flags);
1344 for(p1 = adpt_post_wait_queue; p1; p2 = p1, p1 = p1->next) {
1345 if(p1 == wait_data) {
1346 if(p1->status == I2O_DETAIL_STATUS_UNSUPPORTED_FUNCTION ) {
1347 status = -EOPNOTSUPP;
1348 }
1349 if(p2) {
1350 p2->next = p1->next;
1351 } else {
1352 adpt_post_wait_queue = p1->next;
1353 }
1354 break;
1355 }
1356 }
1357 spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1358
1359 kfree(wait_data);
1360
1361 return status;
1362}
1363
1364
1365static s32 adpt_i2o_post_this(adpt_hba* pHba, u32* data, int len)
1366{
1367
1368 u32 m = EMPTY_QUEUE;
1369 u32 __iomem *msg;
1370 ulong timeout = jiffies + 30*HZ;
1371 do {
1372 rmb();
1373 m = readl(pHba->post_port);
1374 if (m != EMPTY_QUEUE) {
1375 break;
1376 }
1377 if(time_after(jiffies,timeout)){
1378 printk(KERN_WARNING"dpti%d: Timeout waiting for message frame!\n", pHba->unit);
1379 return -ETIMEDOUT;
1380 }
a9a3047d 1381 schedule_timeout_uninterruptible(1);
1da177e4
LT
1382 } while(m == EMPTY_QUEUE);
1383
1384 msg = pHba->msg_addr_virt + m;
1385 memcpy_toio(msg, data, len);
1386 wmb();
1387
1388 //post message
1389 writel(m, pHba->post_port);
1390 wmb();
1391
1392 return 0;
1393}
1394
1395
1396static void adpt_i2o_post_wait_complete(u32 context, int status)
1397{
1398 struct adpt_i2o_post_wait_data *p1 = NULL;
1399 /*
1400 * We need to search through the adpt_post_wait
1401 * queue to see if the given message is still
1402 * outstanding. If not, it means that the IOP
1403 * took longer to respond to the message than we
1404 * had allowed and timer has already expired.
1405 * Not much we can do about that except log
1406 * it for debug purposes, increase timeout, and recompile
1407 *
1408 * Lock needed to keep anyone from moving queue pointers
1409 * around while we're looking through them.
1410 */
1411
1412 context &= 0x7fff;
1413
1414 spin_lock(&adpt_post_wait_lock);
1415 for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1416 if(p1->id == context) {
1417 p1->status = status;
1418 spin_unlock(&adpt_post_wait_lock);
1419 wake_up_interruptible(p1->wq);
1420 return;
1421 }
1422 }
1423 spin_unlock(&adpt_post_wait_lock);
1424 // If this happens we lose commands that probably really completed
1425 printk(KERN_DEBUG"dpti: Could Not find task %d in wait queue\n",context);
1426 printk(KERN_DEBUG" Tasks in wait queue:\n");
1427 for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1428 printk(KERN_DEBUG" %d\n",p1->id);
1429 }
1430 return;
1431}
1432
1433static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
1434{
1435 u32 msg[8];
1436 u8* status;
67af2b06 1437 dma_addr_t addr;
1da177e4
LT
1438 u32 m = EMPTY_QUEUE ;
1439 ulong timeout = jiffies + (TMOUT_IOPRESET*HZ);
1440
1441 if(pHba->initialized == FALSE) { // First time reset should be quick
1442 timeout = jiffies + (25*HZ);
1443 } else {
1444 adpt_i2o_quiesce_hba(pHba);
1445 }
1446
1447 do {
1448 rmb();
1449 m = readl(pHba->post_port);
1450 if (m != EMPTY_QUEUE) {
1451 break;
1452 }
1453 if(time_after(jiffies,timeout)){
1454 printk(KERN_WARNING"Timeout waiting for message!\n");
1455 return -ETIMEDOUT;
1456 }
a9a3047d 1457 schedule_timeout_uninterruptible(1);
1da177e4
LT
1458 } while (m == EMPTY_QUEUE);
1459
67af2b06 1460 status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
1da177e4
LT
1461 if(status == NULL) {
1462 adpt_send_nop(pHba, m);
1463 printk(KERN_ERR"IOP reset failed - no free memory.\n");
1464 return -ENOMEM;
1465 }
67af2b06 1466 memset(status,0,4);
1da177e4
LT
1467
1468 msg[0]=EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0;
1469 msg[1]=I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID;
1470 msg[2]=0;
1471 msg[3]=0;
1472 msg[4]=0;
1473 msg[5]=0;
67af2b06
MS
1474 msg[6]=dma_low(addr);
1475 msg[7]=dma_high(addr);
1da177e4
LT
1476
1477 memcpy_toio(pHba->msg_addr_virt+m, msg, sizeof(msg));
1478 wmb();
1479 writel(m, pHba->post_port);
1480 wmb();
1481
1482 while(*status == 0){
1483 if(time_after(jiffies,timeout)){
1484 printk(KERN_WARNING"%s: IOP Reset Timeout\n",pHba->name);
67af2b06
MS
1485 /* We lose 4 bytes of "status" here, but we cannot
1486 free these because controller may awake and corrupt
1487 those bytes at any time */
1488 /* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
1da177e4
LT
1489 return -ETIMEDOUT;
1490 }
1491 rmb();
a9a3047d 1492 schedule_timeout_uninterruptible(1);
1da177e4
LT
1493 }
1494
1495 if(*status == 0x01 /*I2O_EXEC_IOP_RESET_IN_PROGRESS*/) {
1496 PDEBUG("%s: Reset in progress...\n", pHba->name);
1497 // Here we wait for message frame to become available
1498 // indicated that reset has finished
1499 do {
1500 rmb();
1501 m = readl(pHba->post_port);
1502 if (m != EMPTY_QUEUE) {
1503 break;
1504 }
1505 if(time_after(jiffies,timeout)){
1506 printk(KERN_ERR "%s:Timeout waiting for IOP Reset.\n",pHba->name);
67af2b06
MS
1507 /* We lose 4 bytes of "status" here, but we
1508 cannot free these because controller may
1509 awake and corrupt those bytes at any time */
1510 /* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
1da177e4
LT
1511 return -ETIMEDOUT;
1512 }
a9a3047d 1513 schedule_timeout_uninterruptible(1);
1da177e4
LT
1514 } while (m == EMPTY_QUEUE);
1515 // Flush the offset
1516 adpt_send_nop(pHba, m);
1517 }
1518 adpt_i2o_status_get(pHba);
1519 if(*status == 0x02 ||
1520 pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
1521 printk(KERN_WARNING"%s: Reset reject, trying to clear\n",
1522 pHba->name);
1523 } else {
1524 PDEBUG("%s: Reset completed.\n", pHba->name);
1525 }
1526
67af2b06 1527 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
1da177e4
LT
1528#ifdef UARTDELAY
1529 // This delay is to allow someone attached to the card through the debug UART to
1530 // set up the dump levels that they want before the rest of the initialization sequence
1531 adpt_delay(20000);
1532#endif
1533 return 0;
1534}
1535
1536
1537static int adpt_i2o_parse_lct(adpt_hba* pHba)
1538{
1539 int i;
1540 int max;
1541 int tid;
1542 struct i2o_device *d;
1543 i2o_lct *lct = pHba->lct;
1544 u8 bus_no = 0;
1545 s16 scsi_id;
1546 s16 scsi_lun;
1547 u32 buf[10]; // larger than 7, or 8 ...
1548 struct adpt_device* pDev;
1549
1550 if (lct == NULL) {
1551 printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
1552 return -1;
1553 }
1554
1555 max = lct->table_size;
1556 max -= 3;
1557 max /= 9;
1558
1559 for(i=0;i<max;i++) {
1560 if( lct->lct_entry[i].user_tid != 0xfff){
1561 /*
1562 * If we have hidden devices, we need to inform the upper layers about
1563 * the possible maximum id reference to handle device access when
1564 * an array is disassembled. This code has no other purpose but to
1565 * allow us future access to devices that are currently hidden
1566 * behind arrays, hotspares or have not been configured (JBOD mode).
1567 */
1568 if( lct->lct_entry[i].class_id != I2O_CLASS_RANDOM_BLOCK_STORAGE &&
1569 lct->lct_entry[i].class_id != I2O_CLASS_SCSI_PERIPHERAL &&
1570 lct->lct_entry[i].class_id != I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1571 continue;
1572 }
1573 tid = lct->lct_entry[i].tid;
1574 // I2O_DPT_DEVICE_INFO_GROUP_NO;
1575 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
1576 continue;
1577 }
1578 bus_no = buf[0]>>16;
1579 scsi_id = buf[1];
1580 scsi_lun = (buf[2]>>8 )&0xff;
1581 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1582 printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no);
1583 continue;
1584 }
1585 if (scsi_id >= MAX_ID){
1586 printk(KERN_WARNING"%s: SCSI ID %d out of range \n", pHba->name, bus_no);
1587 continue;
1588 }
1589 if(bus_no > pHba->top_scsi_channel){
1590 pHba->top_scsi_channel = bus_no;
1591 }
1592 if(scsi_id > pHba->top_scsi_id){
1593 pHba->top_scsi_id = scsi_id;
1594 }
1595 if(scsi_lun > pHba->top_scsi_lun){
1596 pHba->top_scsi_lun = scsi_lun;
1597 }
1598 continue;
1599 }
5cbded58 1600 d = kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
1da177e4
LT
1601 if(d==NULL)
1602 {
1603 printk(KERN_CRIT"%s: Out of memory for I2O device data.\n",pHba->name);
1604 return -ENOMEM;
1605 }
1606
1c2fb3f3 1607 d->controller = pHba;
1da177e4
LT
1608 d->next = NULL;
1609
1610 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
1611
1612 d->flags = 0;
1613 tid = d->lct_data.tid;
1614 adpt_i2o_report_hba_unit(pHba, d);
1615 adpt_i2o_install_device(pHba, d);
1616 }
1617 bus_no = 0;
1618 for(d = pHba->devices; d ; d = d->next) {
1619 if(d->lct_data.class_id == I2O_CLASS_BUS_ADAPTER_PORT ||
1620 d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PORT){
1621 tid = d->lct_data.tid;
1622 // TODO get the bus_no from hrt-but for now they are in order
1623 //bus_no =
1624 if(bus_no > pHba->top_scsi_channel){
1625 pHba->top_scsi_channel = bus_no;
1626 }
1627 pHba->channel[bus_no].type = d->lct_data.class_id;
1628 pHba->channel[bus_no].tid = tid;
1629 if(adpt_i2o_query_scalar(pHba, tid, 0x0200, -1, buf, 28)>=0)
1630 {
1631 pHba->channel[bus_no].scsi_id = buf[1];
1632 PDEBUG("Bus %d - SCSI ID %d.\n", bus_no, buf[1]);
1633 }
1634 // TODO remove - this is just until we get from hrt
1635 bus_no++;
1636 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1637 printk(KERN_WARNING"%s: Channel number %d out of range - LCT\n", pHba->name, bus_no);
1638 break;
1639 }
1640 }
1641 }
1642
1643 // Setup adpt_device table
1644 for(d = pHba->devices; d ; d = d->next) {
1645 if(d->lct_data.class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
1646 d->lct_data.class_id == I2O_CLASS_SCSI_PERIPHERAL ||
1647 d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1648
1649 tid = d->lct_data.tid;
1650 scsi_id = -1;
1651 // I2O_DPT_DEVICE_INFO_GROUP_NO;
1652 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)>=0) {
1653 bus_no = buf[0]>>16;
1654 scsi_id = buf[1];
1655 scsi_lun = (buf[2]>>8 )&0xff;
1656 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1657 continue;
1658 }
1659 if (scsi_id >= MAX_ID) {
1660 continue;
1661 }
1662 if( pHba->channel[bus_no].device[scsi_id] == NULL){
ab552204 1663 pDev = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
1da177e4
LT
1664 if(pDev == NULL) {
1665 return -ENOMEM;
1666 }
1667 pHba->channel[bus_no].device[scsi_id] = pDev;
1da177e4
LT
1668 } else {
1669 for( pDev = pHba->channel[bus_no].device[scsi_id];
1670 pDev->next_lun; pDev = pDev->next_lun){
1671 }
ab552204 1672 pDev->next_lun = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
1da177e4
LT
1673 if(pDev->next_lun == NULL) {
1674 return -ENOMEM;
1675 }
1da177e4
LT
1676 pDev = pDev->next_lun;
1677 }
1678 pDev->tid = tid;
1679 pDev->scsi_channel = bus_no;
1680 pDev->scsi_id = scsi_id;
1681 pDev->scsi_lun = scsi_lun;
1682 pDev->pI2o_dev = d;
1683 d->owner = pDev;
1684 pDev->type = (buf[0])&0xff;
1685 pDev->flags = (buf[0]>>8)&0xff;
1686 if(scsi_id > pHba->top_scsi_id){
1687 pHba->top_scsi_id = scsi_id;
1688 }
1689 if(scsi_lun > pHba->top_scsi_lun){
1690 pHba->top_scsi_lun = scsi_lun;
1691 }
1692 }
1693 if(scsi_id == -1){
1694 printk(KERN_WARNING"Could not find SCSI ID for %s\n",
1695 d->lct_data.identity_tag);
1696 }
1697 }
1698 }
1699 return 0;
1700}
1701
1702
1703/*
1704 * Each I2O controller has a chain of devices on it - these match
1705 * the useful parts of the LCT of the board.
1706 */
1707
1708static int adpt_i2o_install_device(adpt_hba* pHba, struct i2o_device *d)
1709{
0b950672 1710 mutex_lock(&adpt_configuration_lock);
1da177e4
LT
1711 d->controller=pHba;
1712 d->owner=NULL;
1713 d->next=pHba->devices;
1714 d->prev=NULL;
1715 if (pHba->devices != NULL){
1716 pHba->devices->prev=d;
1717 }
1718 pHba->devices=d;
1719 *d->dev_name = 0;
1720
0b950672 1721 mutex_unlock(&adpt_configuration_lock);
1da177e4
LT
1722 return 0;
1723}
1724
1725static int adpt_open(struct inode *inode, struct file *file)
1726{
1727 int minor;
1728 adpt_hba* pHba;
1729
1730 //TODO check for root access
1731 //
1732 minor = iminor(inode);
1733 if (minor >= hba_count) {
1734 return -ENXIO;
1735 }
0b950672 1736 mutex_lock(&adpt_configuration_lock);
1da177e4
LT
1737 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1738 if (pHba->unit == minor) {
1739 break; /* found adapter */
1740 }
1741 }
1742 if (pHba == NULL) {
0b950672 1743 mutex_unlock(&adpt_configuration_lock);
1da177e4
LT
1744 return -ENXIO;
1745 }
1746
1747// if(pHba->in_use){
0b950672 1748 // mutex_unlock(&adpt_configuration_lock);
1da177e4
LT
1749// return -EBUSY;
1750// }
1751
1752 pHba->in_use = 1;
0b950672 1753 mutex_unlock(&adpt_configuration_lock);
1da177e4
LT
1754
1755 return 0;
1756}
1757
1758static int adpt_close(struct inode *inode, struct file *file)
1759{
1760 int minor;
1761 adpt_hba* pHba;
1762
1763 minor = iminor(inode);
1764 if (minor >= hba_count) {
1765 return -ENXIO;
1766 }
0b950672 1767 mutex_lock(&adpt_configuration_lock);
1da177e4
LT
1768 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1769 if (pHba->unit == minor) {
1770 break; /* found adapter */
1771 }
1772 }
0b950672 1773 mutex_unlock(&adpt_configuration_lock);
1da177e4
LT
1774 if (pHba == NULL) {
1775 return -ENXIO;
1776 }
1777
1778 pHba->in_use = 0;
1779
1780 return 0;
1781}
1782
1783
1784static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1785{
1786 u32 msg[MAX_MESSAGE_SIZE];
1787 u32* reply = NULL;
1788 u32 size = 0;
1789 u32 reply_size = 0;
1790 u32 __user *user_msg = arg;
1791 u32 __user * user_reply = NULL;
1792 void *sg_list[pHba->sg_tablesize];
1793 u32 sg_offset = 0;
1794 u32 sg_count = 0;
1795 int sg_index = 0;
1796 u32 i = 0;
1797 u32 rcode = 0;
1798 void *p = NULL;
67af2b06 1799 dma_addr_t addr;
1da177e4
LT
1800 ulong flags = 0;
1801
1802 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1803 // get user msg size in u32s
1804 if(get_user(size, &user_msg[0])){
1805 return -EFAULT;
1806 }
1807 size = size>>16;
1808
1809 user_reply = &user_msg[size];
1810 if(size > MAX_MESSAGE_SIZE){
1811 return -EFAULT;
1812 }
1813 size *= 4; // Convert to bytes
1814
1815 /* Copy in the user's I2O command */
1816 if(copy_from_user(msg, user_msg, size)) {
1817 return -EFAULT;
1818 }
1819 get_user(reply_size, &user_reply[0]);
1820 reply_size = reply_size>>16;
1821 if(reply_size > REPLY_FRAME_SIZE){
1822 reply_size = REPLY_FRAME_SIZE;
1823 }
1824 reply_size *= 4;
ab552204 1825 reply = kzalloc(REPLY_FRAME_SIZE*4, GFP_KERNEL);
1da177e4
LT
1826 if(reply == NULL) {
1827 printk(KERN_WARNING"%s: Could not allocate reply buffer\n",pHba->name);
1828 return -ENOMEM;
1829 }
1da177e4
LT
1830 sg_offset = (msg[0]>>4)&0xf;
1831 msg[2] = 0x40000000; // IOCTL context
62ac5aed
MS
1832 msg[3] = adpt_ioctl_to_context(pHba, reply);
1833 if (msg[3] == (u32)-1)
1834 return -EBUSY;
1835
1da177e4
LT
1836 memset(sg_list,0, sizeof(sg_list[0])*pHba->sg_tablesize);
1837 if(sg_offset) {
62ac5aed 1838 // TODO add 64 bit API
1da177e4
LT
1839 struct sg_simple_element *sg = (struct sg_simple_element*) (msg+sg_offset);
1840 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1841 if (sg_count > pHba->sg_tablesize){
1842 printk(KERN_DEBUG"%s:IOCTL SG List too large (%u)\n", pHba->name,sg_count);
1843 kfree (reply);
1844 return -EINVAL;
1845 }
1846
1847 for(i = 0; i < sg_count; i++) {
1848 int sg_size;
1849
1850 if (!(sg[i].flag_count & 0x10000000 /*I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT*/)) {
1851 printk(KERN_DEBUG"%s:Bad SG element %d - not simple (%x)\n",pHba->name,i, sg[i].flag_count);
1852 rcode = -EINVAL;
1853 goto cleanup;
1854 }
1855 sg_size = sg[i].flag_count & 0xffffff;
1856 /* Allocate memory for the transfer */
67af2b06 1857 p = dma_alloc_coherent(&pHba->pDev->dev, sg_size, &addr, GFP_KERNEL);
1da177e4
LT
1858 if(!p) {
1859 printk(KERN_DEBUG"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
1860 pHba->name,sg_size,i,sg_count);
1861 rcode = -ENOMEM;
1862 goto cleanup;
1863 }
1864 sg_list[sg_index++] = p; // sglist indexed with input frame, not our internal frame.
1865 /* Copy in the user's SG buffer if necessary */
1866 if(sg[i].flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR*/) {
62ac5aed
MS
1867 // sg_simple_element API is 32 bit
1868 if (copy_from_user(p,(void __user *)(ulong)sg[i].addr_bus, sg_size)) {
1da177e4
LT
1869 printk(KERN_DEBUG"%s: Could not copy SG buf %d FROM user\n",pHba->name,i);
1870 rcode = -EFAULT;
1871 goto cleanup;
1872 }
1873 }
62ac5aed
MS
1874 /* sg_simple_element API is 32 bit, but addr < 4GB */
1875 sg[i].addr_bus = addr;
1da177e4
LT
1876 }
1877 }
1878
1879 do {
1880 if(pHba->host)
1881 spin_lock_irqsave(pHba->host->host_lock, flags);
1882 // This state stops any new commands from enterring the
1883 // controller while processing the ioctl
1884// pHba->state |= DPTI_STATE_IOCTL;
1885// We can't set this now - The scsi subsystem sets host_blocked and
1886// the queue empties and stops. We need a way to restart the queue
1887 rcode = adpt_i2o_post_wait(pHba, msg, size, FOREVER);
1888 if (rcode != 0)
1889 printk("adpt_i2o_passthru: post wait failed %d %p\n",
1890 rcode, reply);
1891// pHba->state &= ~DPTI_STATE_IOCTL;
1892 if(pHba->host)
1893 spin_unlock_irqrestore(pHba->host->host_lock, flags);
1894 } while(rcode == -ETIMEDOUT);
1895
1896 if(rcode){
1897 goto cleanup;
1898 }
1899
1900 if(sg_offset) {
1901 /* Copy back the Scatter Gather buffers back to user space */
1902 u32 j;
62ac5aed 1903 // TODO add 64 bit API
1da177e4
LT
1904 struct sg_simple_element* sg;
1905 int sg_size;
1906
1907 // re-acquire the original message to handle correctly the sg copy operation
1908 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1909 // get user msg size in u32s
1910 if(get_user(size, &user_msg[0])){
1911 rcode = -EFAULT;
1912 goto cleanup;
1913 }
1914 size = size>>16;
1915 size *= 4;
1916 /* Copy in the user's I2O command */
1917 if (copy_from_user (msg, user_msg, size)) {
1918 rcode = -EFAULT;
1919 goto cleanup;
1920 }
1921 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1922
62ac5aed 1923 // TODO add 64 bit API
1da177e4
LT
1924 sg = (struct sg_simple_element*)(msg + sg_offset);
1925 for (j = 0; j < sg_count; j++) {
1926 /* Copy out the SG list to user's buffer if necessary */
1927 if(! (sg[j].flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR*/)) {
1928 sg_size = sg[j].flag_count & 0xffffff;
62ac5aed
MS
1929 // sg_simple_element API is 32 bit
1930 if (copy_to_user((void __user *)(ulong)sg[j].addr_bus,sg_list[j], sg_size)) {
1da177e4
LT
1931 printk(KERN_WARNING"%s: Could not copy %p TO user %x\n",pHba->name, sg_list[j], sg[j].addr_bus);
1932 rcode = -EFAULT;
1933 goto cleanup;
1934 }
1935 }
1936 }
1937 }
1938
1939 /* Copy back the reply to user space */
1940 if (reply_size) {
1941 // we wrote our own values for context - now restore the user supplied ones
1942 if(copy_from_user(reply+2, user_msg+2, sizeof(u32)*2)) {
1943 printk(KERN_WARNING"%s: Could not copy message context FROM user\n",pHba->name);
1944 rcode = -EFAULT;
1945 }
1946 if(copy_to_user(user_reply, reply, reply_size)) {
1947 printk(KERN_WARNING"%s: Could not copy reply TO user\n",pHba->name);
1948 rcode = -EFAULT;
1949 }
1950 }
1951
1952
1953cleanup:
67af2b06
MS
1954 if (rcode != -ETIME && rcode != -EINTR) {
1955 struct sg_simple_element *sg =
1956 (struct sg_simple_element*) (msg +sg_offset);
1da177e4 1957 kfree (reply);
67af2b06
MS
1958 while(sg_index) {
1959 if(sg_list[--sg_index]) {
1960 dma_free_coherent(&pHba->pDev->dev,
1961 sg[sg_index].flag_count & 0xffffff,
1962 sg_list[sg_index],
1963 sg[sg_index].addr_bus);
1964 }
1da177e4
LT
1965 }
1966 }
1967 return rcode;
1968}
1969
1970
1971/*
1972 * This routine returns information about the system. This does not effect
1973 * any logic and if the info is wrong - it doesn't matter.
1974 */
1975
1976/* Get all the info we can not get from kernel services */
1977static int adpt_system_info(void __user *buffer)
1978{
1979 sysInfo_S si;
1980
1981 memset(&si, 0, sizeof(si));
1982
1983 si.osType = OS_LINUX;
a4cd16e2
AB
1984 si.osMajorVersion = 0;
1985 si.osMinorVersion = 0;
1986 si.osRevision = 0;
1da177e4
LT
1987 si.busType = SI_PCI_BUS;
1988 si.processorFamily = DPTI_sig.dsProcessorFamily;
1989
1990#if defined __i386__
1991 adpt_i386_info(&si);
1992#elif defined (__ia64__)
1993 adpt_ia64_info(&si);
1994#elif defined(__sparc__)
1995 adpt_sparc_info(&si);
1996#elif defined (__alpha__)
1997 adpt_alpha_info(&si);
1998#else
1999 si.processorType = 0xff ;
2000#endif
2001 if(copy_to_user(buffer, &si, sizeof(si))){
2002 printk(KERN_WARNING"dpti: Could not copy buffer TO user\n");
2003 return -EFAULT;
2004 }
2005
2006 return 0;
2007}
2008
2009#if defined __ia64__
2010static void adpt_ia64_info(sysInfo_S* si)
2011{
2012 // This is all the info we need for now
2013 // We will add more info as our new
2014 // managmenent utility requires it
2015 si->processorType = PROC_IA64;
2016}
2017#endif
2018
2019
2020#if defined __sparc__
2021static void adpt_sparc_info(sysInfo_S* si)
2022{
2023 // This is all the info we need for now
2024 // We will add more info as our new
2025 // managmenent utility requires it
2026 si->processorType = PROC_ULTRASPARC;
2027}
2028#endif
2029
2030#if defined __alpha__
2031static void adpt_alpha_info(sysInfo_S* si)
2032{
2033 // This is all the info we need for now
2034 // We will add more info as our new
2035 // managmenent utility requires it
2036 si->processorType = PROC_ALPHA;
2037}
2038#endif
2039
2040#if defined __i386__
2041
2042static void adpt_i386_info(sysInfo_S* si)
2043{
2044 // This is all the info we need for now
2045 // We will add more info as our new
2046 // managmenent utility requires it
2047 switch (boot_cpu_data.x86) {
2048 case CPU_386:
2049 si->processorType = PROC_386;
2050 break;
2051 case CPU_486:
2052 si->processorType = PROC_486;
2053 break;
2054 case CPU_586:
2055 si->processorType = PROC_PENTIUM;
2056 break;
2057 default: // Just in case
2058 si->processorType = PROC_PENTIUM;
2059 break;
2060 }
2061}
2062
2063#endif
2064
2065
2066static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd,
2067 ulong arg)
2068{
2069 int minor;
2070 int error = 0;
2071 adpt_hba* pHba;
2072 ulong flags = 0;
2073 void __user *argp = (void __user *)arg;
2074
2075 minor = iminor(inode);
2076 if (minor >= DPTI_MAX_HBA){
2077 return -ENXIO;
2078 }
0b950672 2079 mutex_lock(&adpt_configuration_lock);
1da177e4
LT
2080 for (pHba = hba_chain; pHba; pHba = pHba->next) {
2081 if (pHba->unit == minor) {
2082 break; /* found adapter */
2083 }
2084 }
0b950672 2085 mutex_unlock(&adpt_configuration_lock);
1da177e4
LT
2086 if(pHba == NULL){
2087 return -ENXIO;
2088 }
2089
a9a3047d
NA
2090 while((volatile u32) pHba->state & DPTI_STATE_RESET )
2091 schedule_timeout_uninterruptible(2);
1da177e4
LT
2092
2093 switch (cmd) {
2094 // TODO: handle 3 cases
2095 case DPT_SIGNATURE:
2096 if (copy_to_user(argp, &DPTI_sig, sizeof(DPTI_sig))) {
2097 return -EFAULT;
2098 }
2099 break;
2100 case I2OUSRCMD:
2101 return adpt_i2o_passthru(pHba, argp);
2102
2103 case DPT_CTRLINFO:{
2104 drvrHBAinfo_S HbaInfo;
2105
2106#define FLG_OSD_PCI_VALID 0x0001
2107#define FLG_OSD_DMA 0x0002
2108#define FLG_OSD_I2O 0x0004
2109 memset(&HbaInfo, 0, sizeof(HbaInfo));
2110 HbaInfo.drvrHBAnum = pHba->unit;
2111 HbaInfo.baseAddr = (ulong) pHba->base_addr_phys;
2112 HbaInfo.blinkState = adpt_read_blink_led(pHba);
2113 HbaInfo.pciBusNum = pHba->pDev->bus->number;
2114 HbaInfo.pciDeviceNum=PCI_SLOT(pHba->pDev->devfn);
2115 HbaInfo.Interrupt = pHba->pDev->irq;
2116 HbaInfo.hbaFlags = FLG_OSD_PCI_VALID | FLG_OSD_DMA | FLG_OSD_I2O;
2117 if(copy_to_user(argp, &HbaInfo, sizeof(HbaInfo))){
2118 printk(KERN_WARNING"%s: Could not copy HbaInfo TO user\n",pHba->name);
2119 return -EFAULT;
2120 }
2121 break;
2122 }
2123 case DPT_SYSINFO:
2124 return adpt_system_info(argp);
2125 case DPT_BLINKLED:{
2126 u32 value;
2127 value = (u32)adpt_read_blink_led(pHba);
2128 if (copy_to_user(argp, &value, sizeof(value))) {
2129 return -EFAULT;
2130 }
2131 break;
2132 }
2133 case I2ORESETCMD:
2134 if(pHba->host)
2135 spin_lock_irqsave(pHba->host->host_lock, flags);
2136 adpt_hba_reset(pHba);
2137 if(pHba->host)
2138 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2139 break;
2140 case I2ORESCANCMD:
2141 adpt_rescan(pHba);
2142 break;
2143 default:
2144 return -EINVAL;
2145 }
2146
2147 return error;
2148}
2149
62ac5aed
MS
2150#ifdef CONFIG_COMPAT
2151static long compat_adpt_ioctl(struct file *file,
2152 unsigned int cmd, unsigned long arg)
2153{
2154 struct inode *inode;
2155 long ret;
2156
2157 inode = file->f_dentry->d_inode;
2158
2159 lock_kernel();
2160
2161 switch(cmd) {
2162 case DPT_SIGNATURE:
2163 case I2OUSRCMD:
2164 case DPT_CTRLINFO:
2165 case DPT_SYSINFO:
2166 case DPT_BLINKLED:
2167 case I2ORESETCMD:
2168 case I2ORESCANCMD:
2169 case (DPT_TARGET_BUSY & 0xFFFF):
2170 case DPT_TARGET_BUSY:
2171 ret = adpt_ioctl(inode, file, cmd, arg);
2172 break;
2173 default:
2174 ret = -ENOIOCTLCMD;
2175 }
2176
2177 unlock_kernel();
2178
2179 return ret;
2180}
2181#endif
1da177e4 2182
7d12e780 2183static irqreturn_t adpt_isr(int irq, void *dev_id)
1da177e4
LT
2184{
2185 struct scsi_cmnd* cmd;
2186 adpt_hba* pHba = dev_id;
2187 u32 m;
1c2fb3f3 2188 void __iomem *reply;
1da177e4
LT
2189 u32 status=0;
2190 u32 context;
2191 ulong flags = 0;
2192 int handled = 0;
2193
2194 if (pHba == NULL){
2195 printk(KERN_WARNING"adpt_isr: NULL dev_id\n");
2196 return IRQ_NONE;
2197 }
2198 if(pHba->host)
2199 spin_lock_irqsave(pHba->host->host_lock, flags);
2200
2201 while( readl(pHba->irq_mask) & I2O_INTERRUPT_PENDING_B) {
2202 m = readl(pHba->reply_port);
2203 if(m == EMPTY_QUEUE){
2204 // Try twice then give up
2205 rmb();
2206 m = readl(pHba->reply_port);
2207 if(m == EMPTY_QUEUE){
2208 // This really should not happen
2209 printk(KERN_ERR"dpti: Could not get reply frame\n");
2210 goto out;
2211 }
2212 }
67af2b06
MS
2213 if (pHba->reply_pool_pa <= m &&
2214 m < pHba->reply_pool_pa +
2215 (pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4)) {
2216 reply = (u8 *)pHba->reply_pool +
2217 (m - pHba->reply_pool_pa);
2218 } else {
2219 /* Ick, we should *never* be here */
2220 printk(KERN_ERR "dpti: reply frame not from pool\n");
2221 reply = (u8 *)bus_to_virt(m);
2222 }
1da177e4
LT
2223
2224 if (readl(reply) & MSG_FAIL) {
2225 u32 old_m = readl(reply+28);
1c2fb3f3 2226 void __iomem *msg;
1da177e4
LT
2227 u32 old_context;
2228 PDEBUG("%s: Failed message\n",pHba->name);
2229 if(old_m >= 0x100000){
2230 printk(KERN_ERR"%s: Bad preserved MFA (%x)- dropping frame\n",pHba->name,old_m);
2231 writel(m,pHba->reply_port);
2232 continue;
2233 }
2234 // Transaction context is 0 in failed reply frame
1c2fb3f3 2235 msg = pHba->msg_addr_virt + old_m;
1da177e4
LT
2236 old_context = readl(msg+12);
2237 writel(old_context, reply+12);
2238 adpt_send_nop(pHba, old_m);
2239 }
2240 context = readl(reply+8);
2241 if(context & 0x40000000){ // IOCTL
62ac5aed 2242 void *p = adpt_ioctl_from_context(pHba, readl(reply+12));
1c2fb3f3
BB
2243 if( p != NULL) {
2244 memcpy_fromio(p, reply, REPLY_FRAME_SIZE * 4);
1da177e4
LT
2245 }
2246 // All IOCTLs will also be post wait
2247 }
2248 if(context & 0x80000000){ // Post wait message
2249 status = readl(reply+16);
2250 if(status >> 24){
2251 status &= 0xffff; /* Get detail status */
2252 } else {
2253 status = I2O_POST_WAIT_OK;
2254 }
2255 if(!(context & 0x40000000)) {
62ac5aed
MS
2256 cmd = adpt_cmd_from_context(pHba,
2257 readl(reply+12));
1da177e4
LT
2258 if(cmd != NULL) {
2259 printk(KERN_WARNING"%s: Apparent SCSI cmd in Post Wait Context - cmd=%p context=%x\n", pHba->name, cmd, context);
2260 }
2261 }
2262 adpt_i2o_post_wait_complete(context, status);
2263 } else { // SCSI message
62ac5aed 2264 cmd = adpt_cmd_from_context (pHba, readl(reply+12));
1da177e4 2265 if(cmd != NULL){
67af2b06 2266 scsi_dma_unmap(cmd);
1da177e4
LT
2267 if(cmd->serial_number != 0) { // If not timedout
2268 adpt_i2o_to_scsi(reply, cmd);
2269 }
2270 }
2271 }
2272 writel(m, pHba->reply_port);
2273 wmb();
2274 rmb();
2275 }
2276 handled = 1;
2277out: if(pHba->host)
2278 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2279 return IRQ_RETVAL(handled);
2280}
2281
2282static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_device* d)
2283{
2284 int i;
2285 u32 msg[MAX_MESSAGE_SIZE];
2286 u32* mptr;
62ac5aed 2287 u32* lptr;
1da177e4
LT
2288 u32 *lenptr;
2289 int direction;
2290 int scsidir;
10803de4 2291 int nseg;
1da177e4
LT
2292 u32 len;
2293 u32 reqlen;
2294 s32 rcode;
62ac5aed 2295 dma_addr_t addr;
1da177e4
LT
2296
2297 memset(msg, 0 , sizeof(msg));
10803de4 2298 len = scsi_bufflen(cmd);
1da177e4
LT
2299 direction = 0x00000000;
2300
2301 scsidir = 0x00000000; // DATA NO XFER
2302 if(len) {
2303 /*
2304 * Set SCBFlags to indicate if data is being transferred
2305 * in or out, or no data transfer
2306 * Note: Do not have to verify index is less than 0 since
2307 * cmd->cmnd[0] is an unsigned char
2308 */
2309 switch(cmd->sc_data_direction){
2310 case DMA_FROM_DEVICE:
2311 scsidir =0x40000000; // DATA IN (iop<--dev)
2312 break;
2313 case DMA_TO_DEVICE:
2314 direction=0x04000000; // SGL OUT
2315 scsidir =0x80000000; // DATA OUT (iop-->dev)
2316 break;
2317 case DMA_NONE:
2318 break;
2319 case DMA_BIDIRECTIONAL:
2320 scsidir =0x40000000; // DATA IN (iop<--dev)
2321 // Assume In - and continue;
2322 break;
2323 default:
2324 printk(KERN_WARNING"%s: scsi opcode 0x%x not supported.\n",
2325 pHba->name, cmd->cmnd[0]);
2326 cmd->result = (DID_OK <<16) | (INITIATOR_ERROR << 8);
2327 cmd->scsi_done(cmd);
2328 return 0;
2329 }
2330 }
2331 // msg[0] is set later
2332 // I2O_CMD_SCSI_EXEC
2333 msg[1] = ((0xff<<24)|(HOST_TID<<12)|d->tid);
2334 msg[2] = 0;
62ac5aed 2335 msg[3] = adpt_cmd_to_context(cmd); /* Want SCSI control block back */
1da177e4
LT
2336 // Our cards use the transaction context as the tag for queueing
2337 // Adaptec/DPT Private stuff
2338 msg[4] = I2O_CMD_SCSI_EXEC|(DPT_ORGANIZATION_ID<<16);
2339 msg[5] = d->tid;
2340 /* Direction, disconnect ok | sense data | simple queue , CDBLen */
2341 // I2O_SCB_FLAG_ENABLE_DISCONNECT |
2342 // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
2343 // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
2344 msg[6] = scsidir|0x20a00000|cmd->cmd_len;
2345
2346 mptr=msg+7;
2347
2348 // Write SCSI command into the message - always 16 byte block
2349 memset(mptr, 0, 16);
2350 memcpy(mptr, cmd->cmnd, cmd->cmd_len);
2351 mptr+=4;
2352 lenptr=mptr++; /* Remember me - fill in when we know */
62ac5aed
MS
2353 if (dpt_dma64(pHba)) {
2354 reqlen = 16; // SINGLE SGE
2355 *mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
2356 *mptr++ = 1 << PAGE_SHIFT;
2357 } else {
2358 reqlen = 14; // SINGLE SGE
2359 }
1da177e4 2360 /* Now fill in the SGList and command */
1da177e4 2361
10803de4
FT
2362 nseg = scsi_dma_map(cmd);
2363 BUG_ON(nseg < 0);
2364 if (nseg) {
2365 struct scatterlist *sg;
1da177e4
LT
2366
2367 len = 0;
10803de4 2368 scsi_for_each_sg(cmd, sg, nseg, i) {
62ac5aed 2369 lptr = mptr;
1da177e4
LT
2370 *mptr++ = direction|0x10000000|sg_dma_len(sg);
2371 len+=sg_dma_len(sg);
62ac5aed
MS
2372 addr = sg_dma_address(sg);
2373 *mptr++ = dma_low(addr);
2374 if (dpt_dma64(pHba))
2375 *mptr++ = dma_high(addr);
10803de4
FT
2376 /* Make this an end of list */
2377 if (i == nseg - 1)
62ac5aed 2378 *lptr = direction|0xD0000000|sg_dma_len(sg);
1da177e4 2379 }
1da177e4
LT
2380 reqlen = mptr - msg;
2381 *lenptr = len;
2382
2383 if(cmd->underflow && len != cmd->underflow){
2384 printk(KERN_WARNING"Cmd len %08X Cmd underflow %08X\n",
2385 len, cmd->underflow);
2386 }
2387 } else {
10803de4
FT
2388 *lenptr = len = 0;
2389 reqlen = 12;
1da177e4
LT
2390 }
2391
2392 /* Stick the headers on */
2393 msg[0] = reqlen<<16 | ((reqlen > 12) ? SGL_OFFSET_12 : SGL_OFFSET_0);
2394
2395 // Send it on it's way
2396 rcode = adpt_i2o_post_this(pHba, msg, reqlen<<2);
2397 if (rcode == 0) {
2398 return 0;
2399 }
2400 return rcode;
2401}
2402
2403
c864cb14 2404static s32 adpt_scsi_host_alloc(adpt_hba* pHba, struct scsi_host_template *sht)
24601bbc 2405{
c864cb14 2406 struct Scsi_Host *host;
24601bbc 2407
c864cb14 2408 host = scsi_host_alloc(sht, sizeof(adpt_hba*));
24601bbc 2409 if (host == NULL) {
c864cb14 2410 printk("%s: scsi_host_alloc returned NULL\n", pHba->name);
24601bbc
AM
2411 return -1;
2412 }
2413 host->hostdata[0] = (unsigned long)pHba;
2414 pHba->host = host;
2415
2416 host->irq = pHba->pDev->irq;
2417 /* no IO ports, so don't have to set host->io_port and
2418 * host->n_io_port
2419 */
2420 host->io_port = 0;
2421 host->n_io_port = 0;
2422 /* see comments in scsi_host.h */
2423 host->max_id = 16;
2424 host->max_lun = 256;
2425 host->max_channel = pHba->top_scsi_channel + 1;
2426 host->cmd_per_lun = 1;
67af2b06 2427 host->unique_id = (u32)sys_tbl_pa + pHba->unit;
24601bbc
AM
2428 host->sg_tablesize = pHba->sg_tablesize;
2429 host->can_queue = pHba->post_fifo_size;
2430
2431 return 0;
2432}
2433
2434
1c2fb3f3 2435static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd)
1da177e4
LT
2436{
2437 adpt_hba* pHba;
2438 u32 hba_status;
2439 u32 dev_status;
2440 u32 reply_flags = readl(reply) & 0xff00; // Leave it shifted up 8 bits
2441 // I know this would look cleaner if I just read bytes
2442 // but the model I have been using for all the rest of the
2443 // io is in 4 byte words - so I keep that model
2444 u16 detailed_status = readl(reply+16) &0xffff;
2445 dev_status = (detailed_status & 0xff);
2446 hba_status = detailed_status >> 8;
2447
2448 // calculate resid for sg
10803de4 2449 scsi_set_resid(cmd, scsi_bufflen(cmd) - readl(reply+5));
1da177e4
LT
2450
2451 pHba = (adpt_hba*) cmd->device->host->hostdata[0];
2452
2453 cmd->sense_buffer[0] = '\0'; // initialize sense valid flag to false
2454
2455 if(!(reply_flags & MSG_FAIL)) {
2456 switch(detailed_status & I2O_SCSI_DSC_MASK) {
2457 case I2O_SCSI_DSC_SUCCESS:
2458 cmd->result = (DID_OK << 16);
2459 // handle underflow
2460 if(readl(reply+5) < cmd->underflow ) {
2461 cmd->result = (DID_ERROR <<16);
2462 printk(KERN_WARNING"%s: SCSI CMD underflow\n",pHba->name);
2463 }
2464 break;
2465 case I2O_SCSI_DSC_REQUEST_ABORTED:
2466 cmd->result = (DID_ABORT << 16);
2467 break;
2468 case I2O_SCSI_DSC_PATH_INVALID:
2469 case I2O_SCSI_DSC_DEVICE_NOT_PRESENT:
2470 case I2O_SCSI_DSC_SELECTION_TIMEOUT:
2471 case I2O_SCSI_DSC_COMMAND_TIMEOUT:
2472 case I2O_SCSI_DSC_NO_ADAPTER:
2473 case I2O_SCSI_DSC_RESOURCE_UNAVAILABLE:
2474 printk(KERN_WARNING"%s: SCSI Timeout-Device (%d,%d,%d) hba status=0x%x, dev status=0x%x, cmd=0x%x\n",
2475 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun, hba_status, dev_status, cmd->cmnd[0]);
2476 cmd->result = (DID_TIME_OUT << 16);
2477 break;
2478 case I2O_SCSI_DSC_ADAPTER_BUSY:
2479 case I2O_SCSI_DSC_BUS_BUSY:
2480 cmd->result = (DID_BUS_BUSY << 16);
2481 break;
2482 case I2O_SCSI_DSC_SCSI_BUS_RESET:
2483 case I2O_SCSI_DSC_BDR_MESSAGE_SENT:
2484 cmd->result = (DID_RESET << 16);
2485 break;
2486 case I2O_SCSI_DSC_PARITY_ERROR_FAILURE:
2487 printk(KERN_WARNING"%s: SCSI CMD parity error\n",pHba->name);
2488 cmd->result = (DID_PARITY << 16);
2489 break;
2490 case I2O_SCSI_DSC_UNABLE_TO_ABORT:
2491 case I2O_SCSI_DSC_COMPLETE_WITH_ERROR:
2492 case I2O_SCSI_DSC_UNABLE_TO_TERMINATE:
2493 case I2O_SCSI_DSC_MR_MESSAGE_RECEIVED:
2494 case I2O_SCSI_DSC_AUTOSENSE_FAILED:
2495 case I2O_SCSI_DSC_DATA_OVERRUN:
2496 case I2O_SCSI_DSC_UNEXPECTED_BUS_FREE:
2497 case I2O_SCSI_DSC_SEQUENCE_FAILURE:
2498 case I2O_SCSI_DSC_REQUEST_LENGTH_ERROR:
2499 case I2O_SCSI_DSC_PROVIDE_FAILURE:
2500 case I2O_SCSI_DSC_REQUEST_TERMINATED:
2501 case I2O_SCSI_DSC_IDE_MESSAGE_SENT:
2502 case I2O_SCSI_DSC_UNACKNOWLEDGED_EVENT:
2503 case I2O_SCSI_DSC_MESSAGE_RECEIVED:
2504 case I2O_SCSI_DSC_INVALID_CDB:
2505 case I2O_SCSI_DSC_LUN_INVALID:
2506 case I2O_SCSI_DSC_SCSI_TID_INVALID:
2507 case I2O_SCSI_DSC_FUNCTION_UNAVAILABLE:
2508 case I2O_SCSI_DSC_NO_NEXUS:
2509 case I2O_SCSI_DSC_CDB_RECEIVED:
2510 case I2O_SCSI_DSC_LUN_ALREADY_ENABLED:
2511 case I2O_SCSI_DSC_QUEUE_FROZEN:
2512 case I2O_SCSI_DSC_REQUEST_INVALID:
2513 default:
2514 printk(KERN_WARNING"%s: SCSI error %0x-Device(%d,%d,%d) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2515 pHba->name, detailed_status & I2O_SCSI_DSC_MASK, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun,
2516 hba_status, dev_status, cmd->cmnd[0]);
2517 cmd->result = (DID_ERROR << 16);
2518 break;
2519 }
2520
2521 // copy over the request sense data if it was a check
2522 // condition status
d814c517 2523 if (dev_status == SAM_STAT_CHECK_CONDITION) {
b80ca4f7 2524 u32 len = min(SCSI_SENSE_BUFFERSIZE, 40);
1da177e4 2525 // Copy over the sense data
1c2fb3f3 2526 memcpy_fromio(cmd->sense_buffer, (reply+28) , len);
1da177e4
LT
2527 if(cmd->sense_buffer[0] == 0x70 /* class 7 */ &&
2528 cmd->sense_buffer[2] == DATA_PROTECT ){
2529 /* This is to handle an array failed */
2530 cmd->result = (DID_TIME_OUT << 16);
2531 printk(KERN_WARNING"%s: SCSI Data Protect-Device (%d,%d,%d) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2532 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun,
2533 hba_status, dev_status, cmd->cmnd[0]);
2534
2535 }
2536 }
2537 } else {
2538 /* In this condtion we could not talk to the tid
2539 * the card rejected it. We should signal a retry
2540 * for a limitted number of retries.
2541 */
2542 cmd->result = (DID_TIME_OUT << 16);
2543 printk(KERN_WARNING"%s: I2O MSG_FAIL - Device (%d,%d,%d) tid=%d, cmd=0x%x\n",
2544 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun,
2545 ((struct adpt_device*)(cmd->device->hostdata))->tid, cmd->cmnd[0]);
2546 }
2547
2548 cmd->result |= (dev_status);
2549
2550 if(cmd->scsi_done != NULL){
2551 cmd->scsi_done(cmd);
2552 }
2553 return cmd->result;
2554}
2555
2556
2557static s32 adpt_rescan(adpt_hba* pHba)
2558{
2559 s32 rcode;
2560 ulong flags = 0;
2561
2562 if(pHba->host)
2563 spin_lock_irqsave(pHba->host->host_lock, flags);
2564 if ((rcode=adpt_i2o_lct_get(pHba)) < 0)
2565 goto out;
2566 if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0)
2567 goto out;
2568 rcode = 0;
2569out: if(pHba->host)
2570 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2571 return rcode;
2572}
2573
2574
2575static s32 adpt_i2o_reparse_lct(adpt_hba* pHba)
2576{
2577 int i;
2578 int max;
2579 int tid;
2580 struct i2o_device *d;
2581 i2o_lct *lct = pHba->lct;
2582 u8 bus_no = 0;
2583 s16 scsi_id;
2584 s16 scsi_lun;
2585 u32 buf[10]; // at least 8 u32's
2586 struct adpt_device* pDev = NULL;
2587 struct i2o_device* pI2o_dev = NULL;
2588
2589 if (lct == NULL) {
2590 printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
2591 return -1;
2592 }
2593
2594 max = lct->table_size;
2595 max -= 3;
2596 max /= 9;
2597
2598 // Mark each drive as unscanned
2599 for (d = pHba->devices; d; d = d->next) {
2600 pDev =(struct adpt_device*) d->owner;
2601 if(!pDev){
2602 continue;
2603 }
2604 pDev->state |= DPTI_DEV_UNSCANNED;
2605 }
2606
2607 printk(KERN_INFO "%s: LCT has %d entries.\n", pHba->name,max);
2608
2609 for(i=0;i<max;i++) {
2610 if( lct->lct_entry[i].user_tid != 0xfff){
2611 continue;
2612 }
2613
2614 if( lct->lct_entry[i].class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
2615 lct->lct_entry[i].class_id == I2O_CLASS_SCSI_PERIPHERAL ||
2616 lct->lct_entry[i].class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
2617 tid = lct->lct_entry[i].tid;
2618 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
2619 printk(KERN_ERR"%s: Could not query device\n",pHba->name);
2620 continue;
2621 }
2622 bus_no = buf[0]>>16;
2623 scsi_id = buf[1];
2624 scsi_lun = (buf[2]>>8 )&0xff;
2625 pDev = pHba->channel[bus_no].device[scsi_id];
2626 /* da lun */
2627 while(pDev) {
2628 if(pDev->scsi_lun == scsi_lun) {
2629 break;
2630 }
2631 pDev = pDev->next_lun;
2632 }
2633 if(!pDev ) { // Something new add it
5cbded58 2634 d = kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
1da177e4
LT
2635 if(d==NULL)
2636 {
2637 printk(KERN_CRIT "Out of memory for I2O device data.\n");
2638 return -ENOMEM;
2639 }
2640
1c2fb3f3 2641 d->controller = pHba;
1da177e4
LT
2642 d->next = NULL;
2643
2644 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2645
2646 d->flags = 0;
2647 adpt_i2o_report_hba_unit(pHba, d);
2648 adpt_i2o_install_device(pHba, d);
2649
2650 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
2651 printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no);
2652 continue;
2653 }
2654 pDev = pHba->channel[bus_no].device[scsi_id];
2655 if( pDev == NULL){
ab552204 2656 pDev = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
1da177e4
LT
2657 if(pDev == NULL) {
2658 return -ENOMEM;
2659 }
2660 pHba->channel[bus_no].device[scsi_id] = pDev;
2661 } else {
2662 while (pDev->next_lun) {
2663 pDev = pDev->next_lun;
2664 }
ab552204 2665 pDev = pDev->next_lun = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
1da177e4
LT
2666 if(pDev == NULL) {
2667 return -ENOMEM;
2668 }
2669 }
1da177e4
LT
2670 pDev->tid = d->lct_data.tid;
2671 pDev->scsi_channel = bus_no;
2672 pDev->scsi_id = scsi_id;
2673 pDev->scsi_lun = scsi_lun;
2674 pDev->pI2o_dev = d;
2675 d->owner = pDev;
2676 pDev->type = (buf[0])&0xff;
2677 pDev->flags = (buf[0]>>8)&0xff;
2678 // Too late, SCSI system has made up it's mind, but what the hey ...
2679 if(scsi_id > pHba->top_scsi_id){
2680 pHba->top_scsi_id = scsi_id;
2681 }
2682 if(scsi_lun > pHba->top_scsi_lun){
2683 pHba->top_scsi_lun = scsi_lun;
2684 }
2685 continue;
2686 } // end of new i2o device
2687
2688 // We found an old device - check it
2689 while(pDev) {
2690 if(pDev->scsi_lun == scsi_lun) {
2691 if(!scsi_device_online(pDev->pScsi_dev)) {
2692 printk(KERN_WARNING"%s: Setting device (%d,%d,%d) back online\n",
2693 pHba->name,bus_no,scsi_id,scsi_lun);
2694 if (pDev->pScsi_dev) {
2695 scsi_device_set_state(pDev->pScsi_dev, SDEV_RUNNING);
2696 }
2697 }
2698 d = pDev->pI2o_dev;
2699 if(d->lct_data.tid != tid) { // something changed
2700 pDev->tid = tid;
2701 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2702 if (pDev->pScsi_dev) {
2703 pDev->pScsi_dev->changed = TRUE;
2704 pDev->pScsi_dev->removable = TRUE;
2705 }
2706 }
2707 // Found it - mark it scanned
2708 pDev->state = DPTI_DEV_ONLINE;
2709 break;
2710 }
2711 pDev = pDev->next_lun;
2712 }
2713 }
2714 }
2715 for (pI2o_dev = pHba->devices; pI2o_dev; pI2o_dev = pI2o_dev->next) {
2716 pDev =(struct adpt_device*) pI2o_dev->owner;
2717 if(!pDev){
2718 continue;
2719 }
2720 // Drive offline drives that previously existed but could not be found
2721 // in the LCT table
2722 if (pDev->state & DPTI_DEV_UNSCANNED){
2723 pDev->state = DPTI_DEV_OFFLINE;
2724 printk(KERN_WARNING"%s: Device (%d,%d,%d) offline\n",pHba->name,pDev->scsi_channel,pDev->scsi_id,pDev->scsi_lun);
2725 if (pDev->pScsi_dev) {
2726 scsi_device_set_state(pDev->pScsi_dev, SDEV_OFFLINE);
2727 }
2728 }
2729 }
2730 return 0;
2731}
2732
2733static void adpt_fail_posted_scbs(adpt_hba* pHba)
2734{
2735 struct scsi_cmnd* cmd = NULL;
2736 struct scsi_device* d = NULL;
2737
2738 shost_for_each_device(d, pHba->host) {
2739 unsigned long flags;
2740 spin_lock_irqsave(&d->list_lock, flags);
2741 list_for_each_entry(cmd, &d->cmd_list, list) {
2742 if(cmd->serial_number == 0){
2743 continue;
2744 }
2745 cmd->result = (DID_OK << 16) | (QUEUE_FULL <<1);
2746 cmd->scsi_done(cmd);
2747 }
2748 spin_unlock_irqrestore(&d->list_lock, flags);
2749 }
2750}
2751
2752
2753/*============================================================================
2754 * Routines from i2o subsystem
2755 *============================================================================
2756 */
2757
2758
2759
2760/*
2761 * Bring an I2O controller into HOLD state. See the spec.
2762 */
2763static int adpt_i2o_activate_hba(adpt_hba* pHba)
2764{
2765 int rcode;
2766
2767 if(pHba->initialized ) {
2768 if (adpt_i2o_status_get(pHba) < 0) {
2769 if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2770 printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2771 return rcode;
2772 }
2773 if (adpt_i2o_status_get(pHba) < 0) {
2774 printk(KERN_INFO "HBA not responding.\n");
2775 return -1;
2776 }
2777 }
2778
2779 if(pHba->status_block->iop_state == ADAPTER_STATE_FAULTED) {
2780 printk(KERN_CRIT "%s: hardware fault\n", pHba->name);
2781 return -1;
2782 }
2783
2784 if (pHba->status_block->iop_state == ADAPTER_STATE_READY ||
2785 pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL ||
2786 pHba->status_block->iop_state == ADAPTER_STATE_HOLD ||
2787 pHba->status_block->iop_state == ADAPTER_STATE_FAILED) {
2788 adpt_i2o_reset_hba(pHba);
2789 if (adpt_i2o_status_get(pHba) < 0 || pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
2790 printk(KERN_ERR "%s: Failed to initialize.\n", pHba->name);
2791 return -1;
2792 }
2793 }
2794 } else {
2795 if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2796 printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2797 return rcode;
2798 }
2799
2800 }
2801
2802 if (adpt_i2o_init_outbound_q(pHba) < 0) {
2803 return -1;
2804 }
2805
2806 /* In HOLD state */
2807
2808 if (adpt_i2o_hrt_get(pHba) < 0) {
2809 return -1;
2810 }
2811
2812 return 0;
2813}
2814
2815/*
2816 * Bring a controller online into OPERATIONAL state.
2817 */
2818
2819static int adpt_i2o_online_hba(adpt_hba* pHba)
2820{
2821 if (adpt_i2o_systab_send(pHba) < 0) {
2822 adpt_i2o_delete_hba(pHba);
2823 return -1;
2824 }
2825 /* In READY state */
2826
2827 if (adpt_i2o_enable_hba(pHba) < 0) {
2828 adpt_i2o_delete_hba(pHba);
2829 return -1;
2830 }
2831
2832 /* In OPERATIONAL state */
2833 return 0;
2834}
2835
2836static s32 adpt_send_nop(adpt_hba*pHba,u32 m)
2837{
2838 u32 __iomem *msg;
2839 ulong timeout = jiffies + 5*HZ;
2840
2841 while(m == EMPTY_QUEUE){
2842 rmb();
2843 m = readl(pHba->post_port);
2844 if(m != EMPTY_QUEUE){
2845 break;
2846 }
2847 if(time_after(jiffies,timeout)){
2848 printk(KERN_ERR "%s: Timeout waiting for message frame!\n",pHba->name);
2849 return 2;
2850 }
a9a3047d 2851 schedule_timeout_uninterruptible(1);
1da177e4
LT
2852 }
2853 msg = (u32 __iomem *)(pHba->msg_addr_virt + m);
2854 writel( THREE_WORD_MSG_SIZE | SGL_OFFSET_0,&msg[0]);
2855 writel( I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | 0,&msg[1]);
2856 writel( 0,&msg[2]);
2857 wmb();
2858
2859 writel(m, pHba->post_port);
2860 wmb();
2861 return 0;
2862}
2863
2864static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
2865{
2866 u8 *status;
67af2b06 2867 dma_addr_t addr;
1da177e4
LT
2868 u32 __iomem *msg = NULL;
2869 int i;
2870 ulong timeout = jiffies + TMOUT_INITOUTBOUND*HZ;
1da177e4
LT
2871 u32 m;
2872
2873 do {
2874 rmb();
2875 m = readl(pHba->post_port);
2876 if (m != EMPTY_QUEUE) {
2877 break;
2878 }
2879
2880 if(time_after(jiffies,timeout)){
2881 printk(KERN_WARNING"%s: Timeout waiting for message frame\n",pHba->name);
2882 return -ETIMEDOUT;
2883 }
a9a3047d 2884 schedule_timeout_uninterruptible(1);
1da177e4
LT
2885 } while(m == EMPTY_QUEUE);
2886
2887 msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2888
67af2b06 2889 status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
bbfbbbc1 2890 if (!status) {
1da177e4
LT
2891 adpt_send_nop(pHba, m);
2892 printk(KERN_WARNING"%s: IOP reset failed - no free memory.\n",
2893 pHba->name);
2894 return -ENOMEM;
2895 }
67af2b06 2896 memset(status, 0, 4);
1da177e4
LT
2897
2898 writel(EIGHT_WORD_MSG_SIZE| SGL_OFFSET_6, &msg[0]);
2899 writel(I2O_CMD_OUTBOUND_INIT<<24 | HOST_TID<<12 | ADAPTER_TID, &msg[1]);
2900 writel(0, &msg[2]);
2901 writel(0x0106, &msg[3]); /* Transaction context */
2902 writel(4096, &msg[4]); /* Host page frame size */
2903 writel((REPLY_FRAME_SIZE)<<16|0x80, &msg[5]); /* Outbound msg frame size and Initcode */
2904 writel(0xD0000004, &msg[6]); /* Simple SG LE, EOB */
67af2b06 2905 writel((u32)addr, &msg[7]);
1da177e4
LT
2906
2907 writel(m, pHba->post_port);
2908 wmb();
2909
2910 // Wait for the reply status to come back
2911 do {
2912 if (*status) {
2913 if (*status != 0x01 /*I2O_EXEC_OUTBOUND_INIT_IN_PROGRESS*/) {
2914 break;
2915 }
2916 }
2917 rmb();
2918 if(time_after(jiffies,timeout)){
2919 printk(KERN_WARNING"%s: Timeout Initializing\n",pHba->name);
67af2b06
MS
2920 /* We lose 4 bytes of "status" here, but we
2921 cannot free these because controller may
2922 awake and corrupt those bytes at any time */
2923 /* dma_free_coherent(&pHba->pDev->dev, 4, status, addr); */
1da177e4
LT
2924 return -ETIMEDOUT;
2925 }
a9a3047d 2926 schedule_timeout_uninterruptible(1);
1da177e4
LT
2927 } while (1);
2928
2929 // If the command was successful, fill the fifo with our reply
2930 // message packets
2931 if(*status != 0x04 /*I2O_EXEC_OUTBOUND_INIT_COMPLETE*/) {
67af2b06 2932 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
1da177e4
LT
2933 return -2;
2934 }
67af2b06 2935 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
1da177e4 2936
67af2b06
MS
2937 if(pHba->reply_pool != NULL) {
2938 dma_free_coherent(&pHba->pDev->dev,
2939 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2940 pHba->reply_pool, pHba->reply_pool_pa);
2941 }
1da177e4 2942
67af2b06
MS
2943 pHba->reply_pool = dma_alloc_coherent(&pHba->pDev->dev,
2944 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2945 &pHba->reply_pool_pa, GFP_KERNEL);
bbfbbbc1
MK
2946 if (!pHba->reply_pool) {
2947 printk(KERN_ERR "%s: Could not allocate reply pool\n", pHba->name);
2948 return -ENOMEM;
1da177e4 2949 }
67af2b06 2950 memset(pHba->reply_pool, 0 , pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4);
1da177e4 2951
1da177e4 2952 for(i = 0; i < pHba->reply_fifo_size; i++) {
67af2b06
MS
2953 writel(pHba->reply_pool_pa + (i * REPLY_FRAME_SIZE * 4),
2954 pHba->reply_port);
1da177e4 2955 wmb();
1da177e4
LT
2956 }
2957 adpt_i2o_status_get(pHba);
2958 return 0;
2959}
2960
2961
2962/*
2963 * I2O System Table. Contains information about
2964 * all the IOPs in the system. Used to inform IOPs
2965 * about each other's existence.
2966 *
2967 * sys_tbl_ver is the CurrentChangeIndicator that is
2968 * used by IOPs to track changes.
2969 */
2970
2971
2972
2973static s32 adpt_i2o_status_get(adpt_hba* pHba)
2974{
2975 ulong timeout;
2976 u32 m;
2977 u32 __iomem *msg;
2978 u8 *status_block=NULL;
1da177e4
LT
2979
2980 if(pHba->status_block == NULL) {
67af2b06
MS
2981 pHba->status_block = dma_alloc_coherent(&pHba->pDev->dev,
2982 sizeof(i2o_status_block),
2983 &pHba->status_block_pa, GFP_KERNEL);
1da177e4
LT
2984 if(pHba->status_block == NULL) {
2985 printk(KERN_ERR
2986 "dpti%d: Get Status Block failed; Out of memory. \n",
2987 pHba->unit);
2988 return -ENOMEM;
2989 }
2990 }
2991 memset(pHba->status_block, 0, sizeof(i2o_status_block));
2992 status_block = (u8*)(pHba->status_block);
1da177e4
LT
2993 timeout = jiffies+TMOUT_GETSTATUS*HZ;
2994 do {
2995 rmb();
2996 m = readl(pHba->post_port);
2997 if (m != EMPTY_QUEUE) {
2998 break;
2999 }
3000 if(time_after(jiffies,timeout)){
3001 printk(KERN_ERR "%s: Timeout waiting for message !\n",
3002 pHba->name);
3003 return -ETIMEDOUT;
3004 }
a9a3047d 3005 schedule_timeout_uninterruptible(1);
1da177e4
LT
3006 } while(m==EMPTY_QUEUE);
3007
3008
3009 msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
3010
3011 writel(NINE_WORD_MSG_SIZE|SGL_OFFSET_0, &msg[0]);
3012 writel(I2O_CMD_STATUS_GET<<24|HOST_TID<<12|ADAPTER_TID, &msg[1]);
3013 writel(1, &msg[2]);
3014 writel(0, &msg[3]);
3015 writel(0, &msg[4]);
3016 writel(0, &msg[5]);
67af2b06
MS
3017 writel( dma_low(pHba->status_block_pa), &msg[6]);
3018 writel( dma_high(pHba->status_block_pa), &msg[7]);
1da177e4
LT
3019 writel(sizeof(i2o_status_block), &msg[8]); // 88 bytes
3020
3021 //post message
3022 writel(m, pHba->post_port);
3023 wmb();
3024
3025 while(status_block[87]!=0xff){
3026 if(time_after(jiffies,timeout)){
3027 printk(KERN_ERR"dpti%d: Get status timeout.\n",
3028 pHba->unit);
3029 return -ETIMEDOUT;
3030 }
3031 rmb();
a9a3047d 3032 schedule_timeout_uninterruptible(1);
1da177e4
LT
3033 }
3034
3035 // Set up our number of outbound and inbound messages
3036 pHba->post_fifo_size = pHba->status_block->max_inbound_frames;
3037 if (pHba->post_fifo_size > MAX_TO_IOP_MESSAGES) {
3038 pHba->post_fifo_size = MAX_TO_IOP_MESSAGES;
3039 }
3040
3041 pHba->reply_fifo_size = pHba->status_block->max_outbound_frames;
3042 if (pHba->reply_fifo_size > MAX_FROM_IOP_MESSAGES) {
3043 pHba->reply_fifo_size = MAX_FROM_IOP_MESSAGES;
3044 }
3045
3046 // Calculate the Scatter Gather list size
62ac5aed
MS
3047 if (dpt_dma64(pHba)) {
3048 pHba->sg_tablesize
3049 = ((pHba->status_block->inbound_frame_size * 4
3050 - 14 * sizeof(u32))
3051 / (sizeof(struct sg_simple_element) + sizeof(u32)));
3052 } else {
3053 pHba->sg_tablesize
3054 = ((pHba->status_block->inbound_frame_size * 4
3055 - 12 * sizeof(u32))
3056 / sizeof(struct sg_simple_element));
3057 }
1da177e4
LT
3058 if (pHba->sg_tablesize > SG_LIST_ELEMENTS) {
3059 pHba->sg_tablesize = SG_LIST_ELEMENTS;
3060 }
3061
3062
3063#ifdef DEBUG
3064 printk("dpti%d: State = ",pHba->unit);
3065 switch(pHba->status_block->iop_state) {
3066 case 0x01:
3067 printk("INIT\n");
3068 break;
3069 case 0x02:
3070 printk("RESET\n");
3071 break;
3072 case 0x04:
3073 printk("HOLD\n");
3074 break;
3075 case 0x05:
3076 printk("READY\n");
3077 break;
3078 case 0x08:
3079 printk("OPERATIONAL\n");
3080 break;
3081 case 0x10:
3082 printk("FAILED\n");
3083 break;
3084 case 0x11:
3085 printk("FAULTED\n");
3086 break;
3087 default:
3088 printk("%x (unknown!!)\n",pHba->status_block->iop_state);
3089 }
3090#endif
3091 return 0;
3092}
3093
3094/*
3095 * Get the IOP's Logical Configuration Table
3096 */
3097static int adpt_i2o_lct_get(adpt_hba* pHba)
3098{
3099 u32 msg[8];
3100 int ret;
3101 u32 buf[16];
3102
3103 if ((pHba->lct_size == 0) || (pHba->lct == NULL)){
3104 pHba->lct_size = pHba->status_block->expected_lct_size;
3105 }
3106 do {
3107 if (pHba->lct == NULL) {
67af2b06
MS
3108 pHba->lct = dma_alloc_coherent(&pHba->pDev->dev,
3109 pHba->lct_size, &pHba->lct_pa,
3110 GFP_KERNEL);
1da177e4
LT
3111 if(pHba->lct == NULL) {
3112 printk(KERN_CRIT "%s: Lct Get failed. Out of memory.\n",
3113 pHba->name);
3114 return -ENOMEM;
3115 }
3116 }
3117 memset(pHba->lct, 0, pHba->lct_size);
3118
3119 msg[0] = EIGHT_WORD_MSG_SIZE|SGL_OFFSET_6;
3120 msg[1] = I2O_CMD_LCT_NOTIFY<<24 | HOST_TID<<12 | ADAPTER_TID;
3121 msg[2] = 0;
3122 msg[3] = 0;
3123 msg[4] = 0xFFFFFFFF; /* All devices */
3124 msg[5] = 0x00000000; /* Report now */
3125 msg[6] = 0xD0000000|pHba->lct_size;
67af2b06 3126 msg[7] = (u32)pHba->lct_pa;
1da177e4
LT
3127
3128 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 360))) {
3129 printk(KERN_ERR "%s: LCT Get failed (status=%#10x.\n",
3130 pHba->name, ret);
3131 printk(KERN_ERR"Adaptec: Error Reading Hardware.\n");
3132 return ret;
3133 }
3134
3135 if ((pHba->lct->table_size << 2) > pHba->lct_size) {
3136 pHba->lct_size = pHba->lct->table_size << 2;
67af2b06
MS
3137 dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
3138 pHba->lct, pHba->lct_pa);
1da177e4
LT
3139 pHba->lct = NULL;
3140 }
3141 } while (pHba->lct == NULL);
3142
3143 PDEBUG("%s: Hardware resource table read.\n", pHba->name);
3144
3145
3146 // I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO;
3147 if(adpt_i2o_query_scalar(pHba, 0 , 0x8000, -1, buf, sizeof(buf))>=0) {
3148 pHba->FwDebugBufferSize = buf[1];
62ac5aed
MS
3149 pHba->FwDebugBuffer_P = ioremap(pHba->base_addr_phys + buf[0],
3150 pHba->FwDebugBufferSize);
3151 if (pHba->FwDebugBuffer_P) {
3152 pHba->FwDebugFlags_P = pHba->FwDebugBuffer_P +
3153 FW_DEBUG_FLAGS_OFFSET;
3154 pHba->FwDebugBLEDvalue_P = pHba->FwDebugBuffer_P +
3155 FW_DEBUG_BLED_OFFSET;
3156 pHba->FwDebugBLEDflag_P = pHba->FwDebugBLEDvalue_P + 1;
3157 pHba->FwDebugStrLength_P = pHba->FwDebugBuffer_P +
3158 FW_DEBUG_STR_LENGTH_OFFSET;
3159 pHba->FwDebugBuffer_P += buf[2];
3160 pHba->FwDebugFlags = 0;
3161 }
1da177e4
LT
3162 }
3163
3164 return 0;
3165}
3166
3167static int adpt_i2o_build_sys_table(void)
3168{
67af2b06 3169 adpt_hba* pHba = hba_chain;
1da177e4
LT
3170 int count = 0;
3171
67af2b06
MS
3172 if (sys_tbl)
3173 dma_free_coherent(&pHba->pDev->dev, sys_tbl_len,
3174 sys_tbl, sys_tbl_pa);
3175
1da177e4
LT
3176 sys_tbl_len = sizeof(struct i2o_sys_tbl) + // Header + IOPs
3177 (hba_count) * sizeof(struct i2o_sys_tbl_entry);
3178
67af2b06
MS
3179 sys_tbl = dma_alloc_coherent(&pHba->pDev->dev,
3180 sys_tbl_len, &sys_tbl_pa, GFP_KERNEL);
bbfbbbc1 3181 if (!sys_tbl) {
1da177e4
LT
3182 printk(KERN_WARNING "SysTab Set failed. Out of memory.\n");
3183 return -ENOMEM;
3184 }
67af2b06 3185 memset(sys_tbl, 0, sys_tbl_len);
1da177e4
LT
3186
3187 sys_tbl->num_entries = hba_count;
3188 sys_tbl->version = I2OVERSION;
3189 sys_tbl->change_ind = sys_tbl_ind++;
3190
3191 for(pHba = hba_chain; pHba; pHba = pHba->next) {
67af2b06 3192 u64 addr;
1da177e4
LT
3193 // Get updated Status Block so we have the latest information
3194 if (adpt_i2o_status_get(pHba)) {
3195 sys_tbl->num_entries--;
3196 continue; // try next one
3197 }
3198
3199 sys_tbl->iops[count].org_id = pHba->status_block->org_id;
3200 sys_tbl->iops[count].iop_id = pHba->unit + 2;
3201 sys_tbl->iops[count].seg_num = 0;
3202 sys_tbl->iops[count].i2o_version = pHba->status_block->i2o_version;
3203 sys_tbl->iops[count].iop_state = pHba->status_block->iop_state;
3204 sys_tbl->iops[count].msg_type = pHba->status_block->msg_type;
3205 sys_tbl->iops[count].frame_size = pHba->status_block->inbound_frame_size;
3206 sys_tbl->iops[count].last_changed = sys_tbl_ind - 1; // ??
3207 sys_tbl->iops[count].iop_capabilities = pHba->status_block->iop_capabilities;
67af2b06
MS
3208 addr = pHba->base_addr_phys + 0x40;
3209 sys_tbl->iops[count].inbound_low = dma_low(addr);
3210 sys_tbl->iops[count].inbound_high = dma_high(addr);
1da177e4
LT
3211
3212 count++;
3213 }
3214
3215#ifdef DEBUG
3216{
3217 u32 *table = (u32*)sys_tbl;
3218 printk(KERN_DEBUG"sys_tbl_len=%d in 32bit words\n",(sys_tbl_len >>2));
3219 for(count = 0; count < (sys_tbl_len >>2); count++) {
3220 printk(KERN_INFO "sys_tbl[%d] = %0#10x\n",
3221 count, table[count]);
3222 }
3223}
3224#endif
3225
3226 return 0;
3227}
3228
3229
3230/*
3231 * Dump the information block associated with a given unit (TID)
3232 */
3233
3234static void adpt_i2o_report_hba_unit(adpt_hba* pHba, struct i2o_device *d)
3235{
3236 char buf[64];
3237 int unit = d->lct_data.tid;
3238
3239 printk(KERN_INFO "TID %3.3d ", unit);
3240
3241 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 3, buf, 16)>=0)
3242 {
3243 buf[16]=0;
3244 printk(" Vendor: %-12.12s", buf);
3245 }
3246 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 4, buf, 16)>=0)
3247 {
3248 buf[16]=0;
3249 printk(" Device: %-12.12s", buf);
3250 }
3251 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 6, buf, 8)>=0)
3252 {
3253 buf[8]=0;
3254 printk(" Rev: %-12.12s\n", buf);
3255 }
3256#ifdef DEBUG
3257 printk(KERN_INFO "\tClass: %.21s\n", adpt_i2o_get_class_name(d->lct_data.class_id));
3258 printk(KERN_INFO "\tSubclass: 0x%04X\n", d->lct_data.sub_class);
3259 printk(KERN_INFO "\tFlags: ");
3260
3261 if(d->lct_data.device_flags&(1<<0))
3262 printk("C"); // ConfigDialog requested
3263 if(d->lct_data.device_flags&(1<<1))
3264 printk("U"); // Multi-user capable
3265 if(!(d->lct_data.device_flags&(1<<4)))
3266 printk("P"); // Peer service enabled!
3267 if(!(d->lct_data.device_flags&(1<<5)))
3268 printk("M"); // Mgmt service enabled!
3269 printk("\n");
3270#endif
3271}
3272
3273#ifdef DEBUG
3274/*
3275 * Do i2o class name lookup
3276 */
3277static const char *adpt_i2o_get_class_name(int class)
3278{
3279 int idx = 16;
3280 static char *i2o_class_name[] = {
3281 "Executive",
3282 "Device Driver Module",
3283 "Block Device",
3284 "Tape Device",
3285 "LAN Interface",
3286 "WAN Interface",
3287 "Fibre Channel Port",
3288 "Fibre Channel Device",
3289 "SCSI Device",
3290 "ATE Port",
3291 "ATE Device",
3292 "Floppy Controller",
3293 "Floppy Device",
3294 "Secondary Bus Port",
3295 "Peer Transport Agent",
3296 "Peer Transport",
3297 "Unknown"
3298 };
3299
3300 switch(class&0xFFF) {
3301 case I2O_CLASS_EXECUTIVE:
3302 idx = 0; break;
3303 case I2O_CLASS_DDM:
3304 idx = 1; break;
3305 case I2O_CLASS_RANDOM_BLOCK_STORAGE:
3306 idx = 2; break;
3307 case I2O_CLASS_SEQUENTIAL_STORAGE:
3308 idx = 3; break;
3309 case I2O_CLASS_LAN:
3310 idx = 4; break;
3311 case I2O_CLASS_WAN:
3312 idx = 5; break;
3313 case I2O_CLASS_FIBRE_CHANNEL_PORT:
3314 idx = 6; break;
3315 case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
3316 idx = 7; break;
3317 case I2O_CLASS_SCSI_PERIPHERAL:
3318 idx = 8; break;
3319 case I2O_CLASS_ATE_PORT:
3320 idx = 9; break;
3321 case I2O_CLASS_ATE_PERIPHERAL:
3322 idx = 10; break;
3323 case I2O_CLASS_FLOPPY_CONTROLLER:
3324 idx = 11; break;
3325 case I2O_CLASS_FLOPPY_DEVICE:
3326 idx = 12; break;
3327 case I2O_CLASS_BUS_ADAPTER_PORT:
3328 idx = 13; break;
3329 case I2O_CLASS_PEER_TRANSPORT_AGENT:
3330 idx = 14; break;
3331 case I2O_CLASS_PEER_TRANSPORT:
3332 idx = 15; break;
3333 }
3334 return i2o_class_name[idx];
3335}
3336#endif
3337
3338
3339static s32 adpt_i2o_hrt_get(adpt_hba* pHba)
3340{
3341 u32 msg[6];
3342 int ret, size = sizeof(i2o_hrt);
3343
3344 do {
3345 if (pHba->hrt == NULL) {
67af2b06
MS
3346 pHba->hrt = dma_alloc_coherent(&pHba->pDev->dev,
3347 size, &pHba->hrt_pa, GFP_KERNEL);
1da177e4
LT
3348 if (pHba->hrt == NULL) {
3349 printk(KERN_CRIT "%s: Hrt Get failed; Out of memory.\n", pHba->name);
3350 return -ENOMEM;
3351 }
3352 }
3353
3354 msg[0]= SIX_WORD_MSG_SIZE| SGL_OFFSET_4;
3355 msg[1]= I2O_CMD_HRT_GET<<24 | HOST_TID<<12 | ADAPTER_TID;
3356 msg[2]= 0;
3357 msg[3]= 0;
3358 msg[4]= (0xD0000000 | size); /* Simple transaction */
67af2b06 3359 msg[5]= (u32)pHba->hrt_pa; /* Dump it here */
1da177e4
LT
3360
3361 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg),20))) {
3362 printk(KERN_ERR "%s: Unable to get HRT (status=%#10x)\n", pHba->name, ret);
3363 return ret;
3364 }
3365
3366 if (pHba->hrt->num_entries * pHba->hrt->entry_len << 2 > size) {
67af2b06
MS
3367 int newsize = pHba->hrt->num_entries * pHba->hrt->entry_len << 2;
3368 dma_free_coherent(&pHba->pDev->dev, size,
3369 pHba->hrt, pHba->hrt_pa);
3370 size = newsize;
1da177e4
LT
3371 pHba->hrt = NULL;
3372 }
3373 } while(pHba->hrt == NULL);
3374 return 0;
3375}
3376
3377/*
3378 * Query one scalar group value or a whole scalar group.
3379 */
3380static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid,
3381 int group, int field, void *buf, int buflen)
3382{
3383 u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field };
67af2b06
MS
3384 u8 *opblk_va;
3385 dma_addr_t opblk_pa;
3386 u8 *resblk_va;
3387 dma_addr_t resblk_pa;
1da177e4
LT
3388
3389 int size;
3390
3391 /* 8 bytes for header */
67af2b06
MS
3392 resblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3393 sizeof(u8) * (8 + buflen), &resblk_pa, GFP_KERNEL);
3394 if (resblk_va == NULL) {
1da177e4
LT
3395 printk(KERN_CRIT "%s: query scalar failed; Out of memory.\n", pHba->name);
3396 return -ENOMEM;
3397 }
3398
67af2b06
MS
3399 opblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3400 sizeof(opblk), &opblk_pa, GFP_KERNEL);
3401 if (opblk_va == NULL) {
3402 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3403 resblk_va, resblk_pa);
3404 printk(KERN_CRIT "%s: query operatio failed; Out of memory.\n",
3405 pHba->name);
3406 return -ENOMEM;
3407 }
1da177e4
LT
3408 if (field == -1) /* whole group */
3409 opblk[4] = -1;
3410
67af2b06 3411 memcpy(opblk_va, opblk, sizeof(opblk));
1da177e4 3412 size = adpt_i2o_issue_params(I2O_CMD_UTIL_PARAMS_GET, pHba, tid,
67af2b06
MS
3413 opblk_va, opblk_pa, sizeof(opblk),
3414 resblk_va, resblk_pa, sizeof(u8)*(8+buflen));
3415 dma_free_coherent(&pHba->pDev->dev, sizeof(opblk), opblk_va, opblk_pa);
1da177e4 3416 if (size == -ETIME) {
67af2b06
MS
3417 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3418 resblk_va, resblk_pa);
1da177e4
LT
3419 printk(KERN_WARNING "%s: issue params failed; Timed out.\n", pHba->name);
3420 return -ETIME;
3421 } else if (size == -EINTR) {
67af2b06
MS
3422 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3423 resblk_va, resblk_pa);
1da177e4
LT
3424 printk(KERN_WARNING "%s: issue params failed; Interrupted.\n", pHba->name);
3425 return -EINTR;
3426 }
3427
67af2b06 3428 memcpy(buf, resblk_va+8, buflen); /* cut off header */
1da177e4 3429
67af2b06
MS
3430 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3431 resblk_va, resblk_pa);
1da177e4
LT
3432 if (size < 0)
3433 return size;
3434
3435 return buflen;
3436}
3437
3438
3439/* Issue UTIL_PARAMS_GET or UTIL_PARAMS_SET
3440 *
3441 * This function can be used for all UtilParamsGet/Set operations.
3442 * The OperationBlock is given in opblk-buffer,
3443 * and results are returned in resblk-buffer.
3444 * Note that the minimum sized resblk is 8 bytes and contains
3445 * ResultCount, ErrorInfoSize, BlockStatus and BlockSize.
3446 */
3447static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid,
67af2b06
MS
3448 void *opblk_va, dma_addr_t opblk_pa, int oplen,
3449 void *resblk_va, dma_addr_t resblk_pa, int reslen)
1da177e4
LT
3450{
3451 u32 msg[9];
67af2b06 3452 u32 *res = (u32 *)resblk_va;
1da177e4
LT
3453 int wait_status;
3454
3455 msg[0] = NINE_WORD_MSG_SIZE | SGL_OFFSET_5;
3456 msg[1] = cmd << 24 | HOST_TID << 12 | tid;
3457 msg[2] = 0;
3458 msg[3] = 0;
3459 msg[4] = 0;
3460 msg[5] = 0x54000000 | oplen; /* OperationBlock */
67af2b06 3461 msg[6] = (u32)opblk_pa;
1da177e4 3462 msg[7] = 0xD0000000 | reslen; /* ResultBlock */
67af2b06 3463 msg[8] = (u32)resblk_pa;
1da177e4
LT
3464
3465 if ((wait_status = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 20))) {
67af2b06 3466 printk("adpt_i2o_issue_params: post_wait failed (%p)\n", resblk_va);
1da177e4
LT
3467 return wait_status; /* -DetailedStatus */
3468 }
3469
3470 if (res[1]&0x00FF0000) { /* BlockStatus != SUCCESS */
3471 printk(KERN_WARNING "%s: %s - Error:\n ErrorInfoSize = 0x%02x, "
3472 "BlockStatus = 0x%02x, BlockSize = 0x%04x\n",
3473 pHba->name,
3474 (cmd == I2O_CMD_UTIL_PARAMS_SET) ? "PARAMS_SET"
3475 : "PARAMS_GET",
3476 res[1]>>24, (res[1]>>16)&0xFF, res[1]&0xFFFF);
3477 return -((res[1] >> 16) & 0xFF); /* -BlockStatus */
3478 }
3479
3480 return 4 + ((res[1] & 0x0000FFFF) << 2); /* bytes used in resblk */
3481}
3482
3483
3484static s32 adpt_i2o_quiesce_hba(adpt_hba* pHba)
3485{
3486 u32 msg[4];
3487 int ret;
3488
3489 adpt_i2o_status_get(pHba);
3490
3491 /* SysQuiesce discarded if IOP not in READY or OPERATIONAL state */
3492
3493 if((pHba->status_block->iop_state != ADAPTER_STATE_READY) &&
3494 (pHba->status_block->iop_state != ADAPTER_STATE_OPERATIONAL)){
3495 return 0;
3496 }
3497
3498 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3499 msg[1] = I2O_CMD_SYS_QUIESCE<<24|HOST_TID<<12|ADAPTER_TID;
3500 msg[2] = 0;
3501 msg[3] = 0;
3502
3503 if((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3504 printk(KERN_INFO"dpti%d: Unable to quiesce (status=%#x).\n",
3505 pHba->unit, -ret);
3506 } else {
3507 printk(KERN_INFO"dpti%d: Quiesced.\n",pHba->unit);
3508 }
3509
3510 adpt_i2o_status_get(pHba);
3511 return ret;
3512}
3513
3514
3515/*
3516 * Enable IOP. Allows the IOP to resume external operations.
3517 */
3518static int adpt_i2o_enable_hba(adpt_hba* pHba)
3519{
3520 u32 msg[4];
3521 int ret;
3522
3523 adpt_i2o_status_get(pHba);
3524 if(!pHba->status_block){
3525 return -ENOMEM;
3526 }
3527 /* Enable only allowed on READY state */
3528 if(pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL)
3529 return 0;
3530
3531 if(pHba->status_block->iop_state != ADAPTER_STATE_READY)
3532 return -EINVAL;
3533
3534 msg[0]=FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3535 msg[1]=I2O_CMD_SYS_ENABLE<<24|HOST_TID<<12|ADAPTER_TID;
3536 msg[2]= 0;
3537 msg[3]= 0;
3538
3539 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3540 printk(KERN_WARNING"%s: Could not enable (status=%#10x).\n",
3541 pHba->name, ret);
3542 } else {
3543 PDEBUG("%s: Enabled.\n", pHba->name);
3544 }
3545
3546 adpt_i2o_status_get(pHba);
3547 return ret;
3548}
3549
3550
3551static int adpt_i2o_systab_send(adpt_hba* pHba)
3552{
3553 u32 msg[12];
3554 int ret;
3555
3556 msg[0] = I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6;
3557 msg[1] = I2O_CMD_SYS_TAB_SET<<24 | HOST_TID<<12 | ADAPTER_TID;
3558 msg[2] = 0;
3559 msg[3] = 0;
3560 msg[4] = (0<<16) | ((pHba->unit+2) << 12); /* Host 0 IOP ID (unit + 2) */
3561 msg[5] = 0; /* Segment 0 */
3562
3563 /*
3564 * Provide three SGL-elements:
3565 * System table (SysTab), Private memory space declaration and
3566 * Private i/o space declaration
3567 */
3568 msg[6] = 0x54000000 | sys_tbl_len;
67af2b06 3569 msg[7] = (u32)sys_tbl_pa;
1da177e4
LT
3570 msg[8] = 0x54000000 | 0;
3571 msg[9] = 0;
3572 msg[10] = 0xD4000000 | 0;
3573 msg[11] = 0;
3574
3575 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 120))) {
3576 printk(KERN_INFO "%s: Unable to set SysTab (status=%#10x).\n",
3577 pHba->name, ret);
3578 }
3579#ifdef DEBUG
3580 else {
3581 PINFO("%s: SysTab set.\n", pHba->name);
3582 }
3583#endif
3584
3585 return ret;
3586 }
3587
3588
3589/*============================================================================
3590 *
3591 *============================================================================
3592 */
3593
3594
3595#ifdef UARTDELAY
3596
3597static static void adpt_delay(int millisec)
3598{
3599 int i;
3600 for (i = 0; i < millisec; i++) {
3601 udelay(1000); /* delay for one millisecond */
3602 }
3603}
3604
3605#endif
3606
24601bbc 3607static struct scsi_host_template driver_template = {
c864cb14 3608 .module = THIS_MODULE,
1da177e4
LT
3609 .name = "dpt_i2o",
3610 .proc_name = "dpt_i2o",
3611 .proc_info = adpt_proc_info,
1da177e4
LT
3612 .info = adpt_info,
3613 .queuecommand = adpt_queue,
3614 .eh_abort_handler = adpt_abort,
3615 .eh_device_reset_handler = adpt_device_reset,
3616 .eh_bus_reset_handler = adpt_bus_reset,
3617 .eh_host_reset_handler = adpt_reset,
3618 .bios_param = adpt_bios_param,
3619 .slave_configure = adpt_slave_configure,
3620 .can_queue = MAX_TO_IOP_MESSAGES,
3621 .this_id = 7,
3622 .cmd_per_lun = 1,
3623 .use_clustering = ENABLE_CLUSTERING,
3624};
c864cb14
MS
3625
3626static int __init adpt_init(void)
3627{
3628 int error;
3629 adpt_hba *pHba, *next;
3630
3631 printk("Loading Adaptec I2O RAID: Version " DPT_I2O_VERSION "\n");
3632
3633 error = adpt_detect(&driver_template);
3634 if (error < 0)
3635 return error;
3636 if (hba_chain == NULL)
3637 return -ENODEV;
3638
3639 for (pHba = hba_chain; pHba; pHba = pHba->next) {
3640 error = scsi_add_host(pHba->host, &pHba->pDev->dev);
3641 if (error)
3642 goto fail;
3643 scsi_scan_host(pHba->host);
3644 }
3645 return 0;
3646fail:
3647 for (pHba = hba_chain; pHba; pHba = next) {
3648 next = pHba->next;
3649 scsi_remove_host(pHba->host);
3650 }
3651 return error;
3652}
3653
3654static void __exit adpt_exit(void)
3655{
3656 adpt_hba *pHba, *next;
3657
3658 for (pHba = hba_chain; pHba; pHba = pHba->next)
3659 scsi_remove_host(pHba->host);
3660 for (pHba = hba_chain; pHba; pHba = next) {
3661 next = pHba->next;
3662 adpt_release(pHba->host);
3663 }
3664}
3665
3666module_init(adpt_init);
3667module_exit(adpt_exit);
3668
1da177e4 3669MODULE_LICENSE("GPL");
This page took 0.47839 seconds and 5 git commands to generate.