[SCSI] dpt_i2o: Remove DPTI_STATE_IOCTL
[deliverable/linux.git] / drivers / scsi / dpt_i2o.c
CommitLineData
1da177e4
LT
1/***************************************************************************
2 dpti.c - description
3 -------------------
4 begin : Thu Sep 7 2000
5 copyright : (C) 2000 by Adaptec
6
7 July 30, 2001 First version being submitted
8 for inclusion in the kernel. V2.4
9
10 See Documentation/scsi/dpti.txt for history, notes, license info
11 and credits
12 ***************************************************************************/
13
14/***************************************************************************
15 * *
16 * This program is free software; you can redistribute it and/or modify *
17 * it under the terms of the GNU General Public License as published by *
18 * the Free Software Foundation; either version 2 of the License, or *
19 * (at your option) any later version. *
20 * *
21 ***************************************************************************/
22/***************************************************************************
23 * Sat Dec 20 2003 Go Taniguchi <go@turbolinux.co.jp>
24 - Support 2.6 kernel and DMA-mapping
25 - ioctl fix for raid tools
26 - use schedule_timeout in long long loop
27 **************************************************************************/
28
29/*#define DEBUG 1 */
30/*#define UARTDELAY 1 */
31
1da177e4
LT
32#include <linux/module.h>
33
34MODULE_AUTHOR("Deanna Bonds, with _lots_ of help from Mark Salyzyn");
35MODULE_DESCRIPTION("Adaptec I2O RAID Driver");
36
37////////////////////////////////////////////////////////////////
38
39#include <linux/ioctl.h> /* For SCSI-Passthrough */
40#include <asm/uaccess.h>
41
42#include <linux/stat.h>
43#include <linux/slab.h> /* for kmalloc() */
1da177e4
LT
44#include <linux/pci.h> /* for PCI support */
45#include <linux/proc_fs.h>
46#include <linux/blkdev.h>
47#include <linux/delay.h> /* for udelay */
48#include <linux/interrupt.h>
49#include <linux/kernel.h> /* for printk */
50#include <linux/sched.h>
51#include <linux/reboot.h>
52#include <linux/spinlock.h>
910638ae 53#include <linux/dma-mapping.h>
1da177e4
LT
54
55#include <linux/timer.h>
56#include <linux/string.h>
57#include <linux/ioport.h>
0b950672 58#include <linux/mutex.h>
1da177e4
LT
59
60#include <asm/processor.h> /* for boot_cpu_data */
61#include <asm/pgtable.h>
62#include <asm/io.h> /* for virt_to_bus, etc. */
63
64#include <scsi/scsi.h>
65#include <scsi/scsi_cmnd.h>
66#include <scsi/scsi_device.h>
67#include <scsi/scsi_host.h>
68#include <scsi/scsi_tcq.h>
69
70#include "dpt/dptsig.h"
71#include "dpti.h"
72
73/*============================================================================
74 * Create a binary signature - this is read by dptsig
75 * Needed for our management apps
76 *============================================================================
77 */
c45d15d2 78static DEFINE_MUTEX(adpt_mutex);
1da177e4
LT
79static dpt_sig_S DPTI_sig = {
80 {'d', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION,
81#ifdef __i386__
82 PROC_INTEL, PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM,
83#elif defined(__ia64__)
84 PROC_INTEL, PROC_IA64,
85#elif defined(__sparc__)
86 PROC_ULTRASPARC, PROC_ULTRASPARC,
87#elif defined(__alpha__)
88 PROC_ALPHA, PROC_ALPHA,
89#else
90 (-1),(-1),
91#endif
92 FT_HBADRVR, 0, OEM_DPT, OS_LINUX, CAP_OVERLAP, DEV_ALL,
93 ADF_ALL_SC5, 0, 0, DPT_VERSION, DPT_REVISION, DPT_SUBREVISION,
94 DPT_MONTH, DPT_DAY, DPT_YEAR, "Adaptec Linux I2O RAID Driver"
95};
96
97
98
99
100/*============================================================================
101 * Globals
102 *============================================================================
103 */
104
0b950672 105static DEFINE_MUTEX(adpt_configuration_lock);
1da177e4 106
67af2b06
MS
107static struct i2o_sys_tbl *sys_tbl;
108static dma_addr_t sys_tbl_pa;
109static int sys_tbl_ind;
110static int sys_tbl_len;
1da177e4 111
1da177e4
LT
112static adpt_hba* hba_chain = NULL;
113static int hba_count = 0;
114
1ed43910
MS
115static struct class *adpt_sysfs_class;
116
f4927c45 117static long adpt_unlocked_ioctl(struct file *, unsigned int, unsigned long);
62ac5aed
MS
118#ifdef CONFIG_COMPAT
119static long compat_adpt_ioctl(struct file *, unsigned int, unsigned long);
120#endif
121
00977a59 122static const struct file_operations adpt_fops = {
f4927c45 123 .unlocked_ioctl = adpt_unlocked_ioctl,
1da177e4 124 .open = adpt_open,
62ac5aed
MS
125 .release = adpt_close,
126#ifdef CONFIG_COMPAT
127 .compat_ioctl = compat_adpt_ioctl,
1da177e4 128#endif
6038f373 129 .llseek = noop_llseek,
1da177e4 130};
1da177e4
LT
131
132/* Structures and definitions for synchronous message posting.
133 * See adpt_i2o_post_wait() for description
134 * */
135struct adpt_i2o_post_wait_data
136{
137 int status;
138 u32 id;
139 adpt_wait_queue_head_t *wq;
140 struct adpt_i2o_post_wait_data *next;
141};
142
143static struct adpt_i2o_post_wait_data *adpt_post_wait_queue = NULL;
144static u32 adpt_post_wait_id = 0;
145static DEFINE_SPINLOCK(adpt_post_wait_lock);
146
147
148/*============================================================================
149 * Functions
150 *============================================================================
151 */
152
62ac5aed
MS
153static inline int dpt_dma64(adpt_hba *pHba)
154{
155 return (sizeof(dma_addr_t) > 4 && (pHba)->dma64);
156}
157
67af2b06
MS
158static inline u32 dma_high(dma_addr_t addr)
159{
160 return upper_32_bits(addr);
161}
162
163static inline u32 dma_low(dma_addr_t addr)
164{
165 return (u32)addr;
166}
167
1da177e4
LT
168static u8 adpt_read_blink_led(adpt_hba* host)
169{
172c122d 170 if (host->FwDebugBLEDflag_P) {
1da177e4
LT
171 if( readb(host->FwDebugBLEDflag_P) == 0xbc ){
172 return readb(host->FwDebugBLEDvalue_P);
173 }
174 }
175 return 0;
176}
177
178/*============================================================================
179 * Scsi host template interface functions
180 *============================================================================
181 */
182
183static struct pci_device_id dptids[] = {
184 { PCI_DPT_VENDOR_ID, PCI_DPT_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
185 { PCI_DPT_VENDOR_ID, PCI_DPT_RAPTOR_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
186 { 0, }
187};
188MODULE_DEVICE_TABLE(pci,dptids);
189
24601bbc 190static int adpt_detect(struct scsi_host_template* sht)
1da177e4
LT
191{
192 struct pci_dev *pDev = NULL;
229bab6b
DC
193 adpt_hba *pHba;
194 adpt_hba *next;
1da177e4 195
1da177e4
LT
196 PINFO("Detecting Adaptec I2O RAID controllers...\n");
197
198 /* search for all Adatpec I2O RAID cards */
a07f3537 199 while ((pDev = pci_get_device( PCI_DPT_VENDOR_ID, PCI_ANY_ID, pDev))) {
1da177e4
LT
200 if(pDev->device == PCI_DPT_DEVICE_ID ||
201 pDev->device == PCI_DPT_RAPTOR_DEVICE_ID){
24601bbc 202 if(adpt_install_hba(sht, pDev) ){
1da177e4
LT
203 PERROR("Could not Init an I2O RAID device\n");
204 PERROR("Will not try to detect others.\n");
205 return hba_count-1;
206 }
a07f3537 207 pci_dev_get(pDev);
1da177e4
LT
208 }
209 }
210
211 /* In INIT state, Activate IOPs */
229bab6b
DC
212 for (pHba = hba_chain; pHba; pHba = next) {
213 next = pHba->next;
1da177e4
LT
214 // Activate does get status , init outbound, and get hrt
215 if (adpt_i2o_activate_hba(pHba) < 0) {
216 adpt_i2o_delete_hba(pHba);
217 }
218 }
219
220
221 /* Active IOPs in HOLD state */
222
223rebuild_sys_tab:
224 if (hba_chain == NULL)
225 return 0;
226
227 /*
228 * If build_sys_table fails, we kill everything and bail
229 * as we can't init the IOPs w/o a system table
230 */
231 if (adpt_i2o_build_sys_table() < 0) {
232 adpt_i2o_sys_shutdown();
233 return 0;
234 }
235
236 PDEBUG("HBA's in HOLD state\n");
237
238 /* If IOP don't get online, we need to rebuild the System table */
239 for (pHba = hba_chain; pHba; pHba = pHba->next) {
240 if (adpt_i2o_online_hba(pHba) < 0) {
241 adpt_i2o_delete_hba(pHba);
242 goto rebuild_sys_tab;
243 }
244 }
245
246 /* Active IOPs now in OPERATIONAL state */
247 PDEBUG("HBA's in OPERATIONAL state\n");
248
249 printk("dpti: If you have a lot of devices this could take a few minutes.\n");
229bab6b
DC
250 for (pHba = hba_chain; pHba; pHba = next) {
251 next = pHba->next;
1da177e4
LT
252 printk(KERN_INFO"%s: Reading the hardware resource table.\n", pHba->name);
253 if (adpt_i2o_lct_get(pHba) < 0){
254 adpt_i2o_delete_hba(pHba);
255 continue;
256 }
257
258 if (adpt_i2o_parse_lct(pHba) < 0){
259 adpt_i2o_delete_hba(pHba);
260 continue;
261 }
262 adpt_inquiry(pHba);
263 }
264
1ed43910
MS
265 adpt_sysfs_class = class_create(THIS_MODULE, "dpt_i2o");
266 if (IS_ERR(adpt_sysfs_class)) {
267 printk(KERN_WARNING"dpti: unable to create dpt_i2o class\n");
268 adpt_sysfs_class = NULL;
269 }
270
229bab6b
DC
271 for (pHba = hba_chain; pHba; pHba = next) {
272 next = pHba->next;
c864cb14 273 if (adpt_scsi_host_alloc(pHba, sht) < 0){
1da177e4
LT
274 adpt_i2o_delete_hba(pHba);
275 continue;
276 }
277 pHba->initialized = TRUE;
278 pHba->state &= ~DPTI_STATE_RESET;
1ed43910 279 if (adpt_sysfs_class) {
d73a1a67 280 struct device *dev = device_create(adpt_sysfs_class,
9def0b97 281 NULL, MKDEV(DPTI_I2O_MAJOR, pHba->unit), NULL,
1ed43910
MS
282 "dpti%d", pHba->unit);
283 if (IS_ERR(dev)) {
284 printk(KERN_WARNING"dpti%d: unable to "
285 "create device in dpt_i2o class\n",
286 pHba->unit);
287 }
288 }
1da177e4
LT
289 }
290
291 // Register our control device node
292 // nodes will need to be created in /dev to access this
293 // the nodes can not be created from within the driver
294 if (hba_count && register_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER, &adpt_fops)) {
24601bbc 295 adpt_i2o_sys_shutdown();
1da177e4
LT
296 return 0;
297 }
298 return hba_count;
299}
300
301
24601bbc
AM
302/*
303 * scsi_unregister will be called AFTER we return.
304 */
305static int adpt_release(struct Scsi_Host *host)
1da177e4 306{
24601bbc 307 adpt_hba* pHba = (adpt_hba*) host->hostdata[0];
1da177e4
LT
308// adpt_i2o_quiesce_hba(pHba);
309 adpt_i2o_delete_hba(pHba);
24601bbc 310 scsi_unregister(host);
1da177e4
LT
311 return 0;
312}
313
314
315static void adpt_inquiry(adpt_hba* pHba)
316{
62ac5aed 317 u32 msg[17];
1da177e4
LT
318 u32 *mptr;
319 u32 *lenptr;
320 int direction;
321 int scsidir;
322 u32 len;
323 u32 reqlen;
324 u8* buf;
67af2b06 325 dma_addr_t addr;
1da177e4
LT
326 u8 scb[16];
327 s32 rcode;
328
329 memset(msg, 0, sizeof(msg));
67af2b06 330 buf = dma_alloc_coherent(&pHba->pDev->dev, 80, &addr, GFP_KERNEL);
1da177e4
LT
331 if(!buf){
332 printk(KERN_ERR"%s: Could not allocate buffer\n",pHba->name);
333 return;
334 }
335 memset((void*)buf, 0, 36);
336
337 len = 36;
338 direction = 0x00000000;
339 scsidir =0x40000000; // DATA IN (iop<--dev)
340
62ac5aed
MS
341 if (dpt_dma64(pHba))
342 reqlen = 17; // SINGLE SGE, 64 bit
343 else
344 reqlen = 14; // SINGLE SGE, 32 bit
1da177e4
LT
345 /* Stick the headers on */
346 msg[0] = reqlen<<16 | SGL_OFFSET_12;
347 msg[1] = (0xff<<24|HOST_TID<<12|ADAPTER_TID);
348 msg[2] = 0;
349 msg[3] = 0;
350 // Adaptec/DPT Private stuff
351 msg[4] = I2O_CMD_SCSI_EXEC|DPT_ORGANIZATION_ID<<16;
352 msg[5] = ADAPTER_TID | 1<<16 /* Interpret*/;
353 /* Direction, disconnect ok | sense data | simple queue , CDBLen */
354 // I2O_SCB_FLAG_ENABLE_DISCONNECT |
355 // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
356 // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
357 msg[6] = scsidir|0x20a00000| 6 /* cmd len*/;
358
359 mptr=msg+7;
360
361 memset(scb, 0, sizeof(scb));
362 // Write SCSI command into the message - always 16 byte block
363 scb[0] = INQUIRY;
364 scb[1] = 0;
365 scb[2] = 0;
366 scb[3] = 0;
367 scb[4] = 36;
368 scb[5] = 0;
369 // Don't care about the rest of scb
370
371 memcpy(mptr, scb, sizeof(scb));
372 mptr+=4;
373 lenptr=mptr++; /* Remember me - fill in when we know */
374
375 /* Now fill in the SGList and command */
376 *lenptr = len;
62ac5aed
MS
377 if (dpt_dma64(pHba)) {
378 *mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
379 *mptr++ = 1 << PAGE_SHIFT;
380 *mptr++ = 0xD0000000|direction|len;
381 *mptr++ = dma_low(addr);
382 *mptr++ = dma_high(addr);
383 } else {
384 *mptr++ = 0xD0000000|direction|len;
385 *mptr++ = addr;
386 }
1da177e4
LT
387
388 // Send it on it's way
389 rcode = adpt_i2o_post_wait(pHba, msg, reqlen<<2, 120);
390 if (rcode != 0) {
391 sprintf(pHba->detail, "Adaptec I2O RAID");
392 printk(KERN_INFO "%s: Inquiry Error (%d)\n",pHba->name,rcode);
393 if (rcode != -ETIME && rcode != -EINTR)
67af2b06 394 dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
1da177e4
LT
395 } else {
396 memset(pHba->detail, 0, sizeof(pHba->detail));
397 memcpy(&(pHba->detail), "Vendor: Adaptec ", 16);
398 memcpy(&(pHba->detail[16]), " Model: ", 8);
399 memcpy(&(pHba->detail[24]), (u8*) &buf[16], 16);
400 memcpy(&(pHba->detail[40]), " FW: ", 4);
401 memcpy(&(pHba->detail[44]), (u8*) &buf[32], 4);
402 pHba->detail[48] = '\0'; /* precautionary */
67af2b06 403 dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
1da177e4
LT
404 }
405 adpt_i2o_status_get(pHba);
406 return ;
407}
408
409
410static int adpt_slave_configure(struct scsi_device * device)
411{
412 struct Scsi_Host *host = device->host;
413 adpt_hba* pHba;
414
415 pHba = (adpt_hba *) host->hostdata[0];
416
417 if (host->can_queue && device->tagged_supported) {
418 scsi_adjust_queue_depth(device, MSG_SIMPLE_TAG,
419 host->can_queue - 1);
420 } else {
421 scsi_adjust_queue_depth(device, 0, 1);
422 }
423 return 0;
424}
425
f281233d 426static int adpt_queue_lck(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *))
1da177e4
LT
427{
428 adpt_hba* pHba = NULL;
429 struct adpt_device* pDev = NULL; /* dpt per device information */
1da177e4
LT
430
431 cmd->scsi_done = done;
432 /*
433 * SCSI REQUEST_SENSE commands will be executed automatically by the
434 * Host Adapter for any errors, so they should not be executed
435 * explicitly unless the Sense Data is zero indicating that no error
436 * occurred.
437 */
438
439 if ((cmd->cmnd[0] == REQUEST_SENSE) && (cmd->sense_buffer[0] != 0)) {
440 cmd->result = (DID_OK << 16);
441 cmd->scsi_done(cmd);
442 return 0;
443 }
444
445 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
446 if (!pHba) {
447 return FAILED;
448 }
449
450 rmb();
1f8c88c3 451 if ((pHba->state) & DPTI_STATE_RESET) {
1da177e4
LT
452 pHba->host->last_reset = jiffies;
453 pHba->host->resetting = 1;
454 return 1;
455 }
456
1da177e4
LT
457 // TODO if the cmd->device if offline then I may need to issue a bus rescan
458 // followed by a get_lct to see if the device is there anymore
459 if((pDev = (struct adpt_device*) (cmd->device->hostdata)) == NULL) {
460 /*
461 * First command request for this device. Set up a pointer
462 * to the device structure. This should be a TEST_UNIT_READY
463 * command from scan_scsis_single.
464 */
465 if ((pDev = adpt_find_device(pHba, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun)) == NULL) {
466 // TODO: if any luns are at this bus, scsi id then fake a TEST_UNIT_READY and INQUIRY response
467 // with type 7F (for all luns less than the max for this bus,id) so the lun scan will continue.
468 cmd->result = (DID_NO_CONNECT << 16);
469 cmd->scsi_done(cmd);
470 return 0;
471 }
472 cmd->device->hostdata = pDev;
473 }
474 pDev->pScsi_dev = cmd->device;
475
476 /*
477 * If we are being called from when the device is being reset,
478 * delay processing of the command until later.
479 */
480 if (pDev->state & DPTI_DEV_RESET ) {
481 return FAILED;
482 }
483 return adpt_scsi_to_i2o(pHba, cmd, pDev);
484}
485
f281233d
JG
486static DEF_SCSI_QCMD(adpt_queue)
487
1da177e4
LT
488static int adpt_bios_param(struct scsi_device *sdev, struct block_device *dev,
489 sector_t capacity, int geom[])
490{
491 int heads=-1;
492 int sectors=-1;
493 int cylinders=-1;
494
495 // *** First lets set the default geometry ****
496
497 // If the capacity is less than ox2000
498 if (capacity < 0x2000 ) { // floppy
499 heads = 18;
500 sectors = 2;
501 }
502 // else if between 0x2000 and 0x20000
503 else if (capacity < 0x20000) {
504 heads = 64;
505 sectors = 32;
506 }
507 // else if between 0x20000 and 0x40000
508 else if (capacity < 0x40000) {
509 heads = 65;
510 sectors = 63;
511 }
512 // else if between 0x4000 and 0x80000
513 else if (capacity < 0x80000) {
514 heads = 128;
515 sectors = 63;
516 }
517 // else if greater than 0x80000
518 else {
519 heads = 255;
520 sectors = 63;
521 }
522 cylinders = sector_div(capacity, heads * sectors);
523
524 // Special case if CDROM
525 if(sdev->type == 5) { // CDROM
526 heads = 252;
527 sectors = 63;
528 cylinders = 1111;
529 }
530
531 geom[0] = heads;
532 geom[1] = sectors;
533 geom[2] = cylinders;
534
535 PDEBUG("adpt_bios_param: exit\n");
536 return 0;
537}
538
539
540static const char *adpt_info(struct Scsi_Host *host)
541{
542 adpt_hba* pHba;
543
544 pHba = (adpt_hba *) host->hostdata[0];
545 return (char *) (pHba->detail);
546}
547
ff98f7ce 548static int adpt_show_info(struct seq_file *m, struct Scsi_Host *host)
1da177e4
LT
549{
550 struct adpt_device* d;
551 int id;
552 int chan;
1da177e4
LT
553 adpt_hba* pHba;
554 int unit;
555
1da177e4 556 // Find HBA (host bus adapter) we are looking for
0b950672 557 mutex_lock(&adpt_configuration_lock);
1da177e4
LT
558 for (pHba = hba_chain; pHba; pHba = pHba->next) {
559 if (pHba->host == host) {
560 break; /* found adapter */
561 }
562 }
0b950672 563 mutex_unlock(&adpt_configuration_lock);
1da177e4
LT
564 if (pHba == NULL) {
565 return 0;
566 }
567 host = pHba->host;
568
ff98f7ce
AV
569 seq_printf(m, "Adaptec I2O RAID Driver Version: %s\n\n", DPT_I2O_VERSION);
570 seq_printf(m, "%s\n", pHba->detail);
571 seq_printf(m, "SCSI Host=scsi%d Control Node=/dev/%s irq=%d\n",
1da177e4 572 pHba->host->host_no, pHba->name, host->irq);
ff98f7ce 573 seq_printf(m, "\tpost fifo size = %d\n\treply fifo size = %d\n\tsg table size = %d\n\n",
1da177e4
LT
574 host->can_queue, (int) pHba->reply_fifo_size , host->sg_tablesize);
575
ff98f7ce 576 seq_printf(m, "Devices:\n");
1da177e4
LT
577 for(chan = 0; chan < MAX_CHANNEL; chan++) {
578 for(id = 0; id < MAX_ID; id++) {
579 d = pHba->channel[chan].device[id];
ff98f7ce
AV
580 while(d) {
581 seq_printf(m,"\t%-24.24s", d->pScsi_dev->vendor);
582 seq_printf(m," Rev: %-8.8s\n", d->pScsi_dev->rev);
1da177e4
LT
583
584 unit = d->pI2o_dev->lct_data.tid;
ff98f7ce 585 seq_printf(m, "\tTID=%d, (Channel=%d, Target=%d, Lun=%d) (%s)\n\n",
1da177e4
LT
586 unit, (int)d->scsi_channel, (int)d->scsi_id, (int)d->scsi_lun,
587 scsi_device_online(d->pScsi_dev)? "online":"offline");
1da177e4
LT
588 d = d->next_lun;
589 }
590 }
591 }
ff98f7ce 592 return 0;
1da177e4
LT
593}
594
62ac5aed
MS
595/*
596 * Turn a struct scsi_cmnd * into a unique 32 bit 'context'.
597 */
598static u32 adpt_cmd_to_context(struct scsi_cmnd *cmd)
599{
600 return (u32)cmd->serial_number;
601}
602
603/*
604 * Go from a u32 'context' to a struct scsi_cmnd * .
605 * This could probably be made more efficient.
606 */
607static struct scsi_cmnd *
608 adpt_cmd_from_context(adpt_hba * pHba, u32 context)
609{
610 struct scsi_cmnd * cmd;
611 struct scsi_device * d;
612
613 if (context == 0)
614 return NULL;
615
616 spin_unlock(pHba->host->host_lock);
617 shost_for_each_device(d, pHba->host) {
618 unsigned long flags;
619 spin_lock_irqsave(&d->list_lock, flags);
620 list_for_each_entry(cmd, &d->cmd_list, list) {
621 if (((u32)cmd->serial_number == context)) {
622 spin_unlock_irqrestore(&d->list_lock, flags);
623 scsi_device_put(d);
624 spin_lock(pHba->host->host_lock);
625 return cmd;
626 }
627 }
628 spin_unlock_irqrestore(&d->list_lock, flags);
629 }
630 spin_lock(pHba->host->host_lock);
631
632 return NULL;
633}
634
635/*
636 * Turn a pointer to ioctl reply data into an u32 'context'
637 */
638static u32 adpt_ioctl_to_context(adpt_hba * pHba, void *reply)
639{
640#if BITS_PER_LONG == 32
641 return (u32)(unsigned long)reply;
642#else
643 ulong flags = 0;
644 u32 nr, i;
645
646 spin_lock_irqsave(pHba->host->host_lock, flags);
647 nr = ARRAY_SIZE(pHba->ioctl_reply_context);
648 for (i = 0; i < nr; i++) {
649 if (pHba->ioctl_reply_context[i] == NULL) {
650 pHba->ioctl_reply_context[i] = reply;
651 break;
652 }
653 }
654 spin_unlock_irqrestore(pHba->host->host_lock, flags);
655 if (i >= nr) {
656 kfree (reply);
657 printk(KERN_WARNING"%s: Too many outstanding "
658 "ioctl commands\n", pHba->name);
659 return (u32)-1;
660 }
661
662 return i;
663#endif
664}
665
666/*
667 * Go from an u32 'context' to a pointer to ioctl reply data.
668 */
669static void *adpt_ioctl_from_context(adpt_hba *pHba, u32 context)
670{
671#if BITS_PER_LONG == 32
672 return (void *)(unsigned long)context;
673#else
674 void *p = pHba->ioctl_reply_context[context];
675 pHba->ioctl_reply_context[context] = NULL;
676
677 return p;
678#endif
679}
1da177e4
LT
680
681/*===========================================================================
682 * Error Handling routines
683 *===========================================================================
684 */
685
686static int adpt_abort(struct scsi_cmnd * cmd)
687{
688 adpt_hba* pHba = NULL; /* host bus adapter structure */
689 struct adpt_device* dptdevice; /* dpt per device information */
690 u32 msg[5];
691 int rcode;
692
693 if(cmd->serial_number == 0){
694 return FAILED;
695 }
696 pHba = (adpt_hba*) cmd->device->host->hostdata[0];
5cd049a5 697 printk(KERN_INFO"%s: Trying to Abort\n",pHba->name);
1da177e4
LT
698 if ((dptdevice = (void*) (cmd->device->hostdata)) == NULL) {
699 printk(KERN_ERR "%s: Unable to abort: No device in cmnd\n",pHba->name);
700 return FAILED;
701 }
702
703 memset(msg, 0, sizeof(msg));
704 msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
705 msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|dptdevice->tid;
706 msg[2] = 0;
707 msg[3]= 0;
62ac5aed 708 msg[4] = adpt_cmd_to_context(cmd);
e5508c13
SM
709 if (pHba->host)
710 spin_lock_irq(pHba->host->host_lock);
711 rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER);
712 if (pHba->host)
713 spin_unlock_irq(pHba->host->host_lock);
714 if (rcode != 0) {
1da177e4
LT
715 if(rcode == -EOPNOTSUPP ){
716 printk(KERN_INFO"%s: Abort cmd not supported\n",pHba->name);
717 return FAILED;
718 }
5cd049a5 719 printk(KERN_INFO"%s: Abort failed.\n",pHba->name);
1da177e4
LT
720 return FAILED;
721 }
5cd049a5 722 printk(KERN_INFO"%s: Abort complete.\n",pHba->name);
1da177e4
LT
723 return SUCCESS;
724}
725
726
727#define I2O_DEVICE_RESET 0x27
728// This is the same for BLK and SCSI devices
729// NOTE this is wrong in the i2o.h definitions
730// This is not currently supported by our adapter but we issue it anyway
731static int adpt_device_reset(struct scsi_cmnd* cmd)
732{
733 adpt_hba* pHba;
734 u32 msg[4];
735 u32 rcode;
736 int old_state;
1c2fb3f3 737 struct adpt_device* d = cmd->device->hostdata;
1da177e4
LT
738
739 pHba = (void*) cmd->device->host->hostdata[0];
740 printk(KERN_INFO"%s: Trying to reset device\n",pHba->name);
741 if (!d) {
742 printk(KERN_INFO"%s: Reset Device: Device Not found\n",pHba->name);
743 return FAILED;
744 }
745 memset(msg, 0, sizeof(msg));
746 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
747 msg[1] = (I2O_DEVICE_RESET<<24|HOST_TID<<12|d->tid);
748 msg[2] = 0;
749 msg[3] = 0;
750
e5508c13
SM
751 if (pHba->host)
752 spin_lock_irq(pHba->host->host_lock);
1da177e4
LT
753 old_state = d->state;
754 d->state |= DPTI_DEV_RESET;
e5508c13
SM
755 rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
756 d->state = old_state;
757 if (pHba->host)
758 spin_unlock_irq(pHba->host->host_lock);
759 if (rcode != 0) {
1da177e4
LT
760 if(rcode == -EOPNOTSUPP ){
761 printk(KERN_INFO"%s: Device reset not supported\n",pHba->name);
762 return FAILED;
763 }
764 printk(KERN_INFO"%s: Device reset failed\n",pHba->name);
765 return FAILED;
766 } else {
1da177e4
LT
767 printk(KERN_INFO"%s: Device reset successful\n",pHba->name);
768 return SUCCESS;
769 }
770}
771
772
773#define I2O_HBA_BUS_RESET 0x87
774// This version of bus reset is called by the eh_error handler
775static int adpt_bus_reset(struct scsi_cmnd* cmd)
776{
777 adpt_hba* pHba;
778 u32 msg[4];
e5508c13 779 u32 rcode;
1da177e4
LT
780
781 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
782 memset(msg, 0, sizeof(msg));
783 printk(KERN_WARNING"%s: Bus reset: SCSI Bus %d: tid: %d\n",pHba->name, cmd->device->channel,pHba->channel[cmd->device->channel].tid );
784 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
785 msg[1] = (I2O_HBA_BUS_RESET<<24|HOST_TID<<12|pHba->channel[cmd->device->channel].tid);
786 msg[2] = 0;
787 msg[3] = 0;
e5508c13
SM
788 if (pHba->host)
789 spin_lock_irq(pHba->host->host_lock);
790 rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
791 if (pHba->host)
792 spin_unlock_irq(pHba->host->host_lock);
793 if (rcode != 0) {
1da177e4
LT
794 printk(KERN_WARNING"%s: Bus reset failed.\n",pHba->name);
795 return FAILED;
796 } else {
797 printk(KERN_WARNING"%s: Bus reset success.\n",pHba->name);
798 return SUCCESS;
799 }
800}
801
802// This version of reset is called by the eh_error_handler
df0ae249 803static int __adpt_reset(struct scsi_cmnd* cmd)
1da177e4
LT
804{
805 adpt_hba* pHba;
806 int rcode;
807 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
808 printk(KERN_WARNING"%s: Hba Reset: scsi id %d: tid: %d\n",pHba->name,cmd->device->channel,pHba->channel[cmd->device->channel].tid );
809 rcode = adpt_hba_reset(pHba);
810 if(rcode == 0){
811 printk(KERN_WARNING"%s: HBA reset complete\n",pHba->name);
812 return SUCCESS;
813 } else {
814 printk(KERN_WARNING"%s: HBA reset failed (%x)\n",pHba->name, rcode);
815 return FAILED;
816 }
817}
818
df0ae249
JG
819static int adpt_reset(struct scsi_cmnd* cmd)
820{
821 int rc;
822
823 spin_lock_irq(cmd->device->host->host_lock);
824 rc = __adpt_reset(cmd);
825 spin_unlock_irq(cmd->device->host->host_lock);
826
827 return rc;
828}
829
1da177e4
LT
830// This version of reset is called by the ioctls and indirectly from eh_error_handler via adpt_reset
831static int adpt_hba_reset(adpt_hba* pHba)
832{
833 int rcode;
834
835 pHba->state |= DPTI_STATE_RESET;
836
837 // Activate does get status , init outbound, and get hrt
838 if ((rcode=adpt_i2o_activate_hba(pHba)) < 0) {
839 printk(KERN_ERR "%s: Could not activate\n", pHba->name);
840 adpt_i2o_delete_hba(pHba);
841 return rcode;
842 }
843
844 if ((rcode=adpt_i2o_build_sys_table()) < 0) {
845 adpt_i2o_delete_hba(pHba);
846 return rcode;
847 }
848 PDEBUG("%s: in HOLD state\n",pHba->name);
849
850 if ((rcode=adpt_i2o_online_hba(pHba)) < 0) {
851 adpt_i2o_delete_hba(pHba);
852 return rcode;
853 }
854 PDEBUG("%s: in OPERATIONAL state\n",pHba->name);
855
856 if ((rcode=adpt_i2o_lct_get(pHba)) < 0){
857 adpt_i2o_delete_hba(pHba);
858 return rcode;
859 }
860
861 if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0){
862 adpt_i2o_delete_hba(pHba);
863 return rcode;
864 }
865 pHba->state &= ~DPTI_STATE_RESET;
866
867 adpt_fail_posted_scbs(pHba);
868 return 0; /* return success */
869}
870
871/*===========================================================================
872 *
873 *===========================================================================
874 */
875
876
877static void adpt_i2o_sys_shutdown(void)
878{
879 adpt_hba *pHba, *pNext;
458af543 880 struct adpt_i2o_post_wait_data *p1, *old;
1da177e4
LT
881
882 printk(KERN_INFO"Shutting down Adaptec I2O controllers.\n");
883 printk(KERN_INFO" This could take a few minutes if there are many devices attached\n");
884 /* Delete all IOPs from the controller chain */
885 /* They should have already been released by the
886 * scsi-core
887 */
888 for (pHba = hba_chain; pHba; pHba = pNext) {
889 pNext = pHba->next;
890 adpt_i2o_delete_hba(pHba);
891 }
892
893 /* Remove any timedout entries from the wait queue. */
1da177e4
LT
894// spin_lock_irqsave(&adpt_post_wait_lock, flags);
895 /* Nothing should be outstanding at this point so just
896 * free them
897 */
458af543
AB
898 for(p1 = adpt_post_wait_queue; p1;) {
899 old = p1;
900 p1 = p1->next;
901 kfree(old);
1da177e4
LT
902 }
903// spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
904 adpt_post_wait_queue = NULL;
905
906 printk(KERN_INFO "Adaptec I2O controllers down.\n");
907}
908
24601bbc 909static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev)
1da177e4
LT
910{
911
912 adpt_hba* pHba = NULL;
913 adpt_hba* p = NULL;
914 ulong base_addr0_phys = 0;
915 ulong base_addr1_phys = 0;
916 u32 hba_map0_area_size = 0;
917 u32 hba_map1_area_size = 0;
918 void __iomem *base_addr_virt = NULL;
919 void __iomem *msg_addr_virt = NULL;
62ac5aed 920 int dma64 = 0;
1da177e4
LT
921
922 int raptorFlag = FALSE;
1da177e4
LT
923
924 if(pci_enable_device(pDev)) {
925 return -EINVAL;
926 }
9638d89a
SM
927
928 if (pci_request_regions(pDev, "dpt_i2o")) {
929 PERROR("dpti: adpt_config_hba: pci request region failed\n");
930 return -EINVAL;
931 }
932
1da177e4 933 pci_set_master(pDev);
62ac5aed
MS
934
935 /*
936 * See if we should enable dma64 mode.
937 */
938 if (sizeof(dma_addr_t) > 4 &&
6a35528a 939 pci_set_dma_mask(pDev, DMA_BIT_MASK(64)) == 0) {
284901a9 940 if (dma_get_required_mask(&pDev->dev) > DMA_BIT_MASK(32))
62ac5aed
MS
941 dma64 = 1;
942 }
284901a9 943 if (!dma64 && pci_set_dma_mask(pDev, DMA_BIT_MASK(32)) != 0)
1da177e4
LT
944 return -EINVAL;
945
67af2b06 946 /* adapter only supports message blocks below 4GB */
284901a9 947 pci_set_consistent_dma_mask(pDev, DMA_BIT_MASK(32));
67af2b06 948
1da177e4
LT
949 base_addr0_phys = pci_resource_start(pDev,0);
950 hba_map0_area_size = pci_resource_len(pDev,0);
951
952 // Check if standard PCI card or single BAR Raptor
953 if(pDev->device == PCI_DPT_DEVICE_ID){
954 if(pDev->subsystem_device >=0xc032 && pDev->subsystem_device <= 0xc03b){
955 // Raptor card with this device id needs 4M
956 hba_map0_area_size = 0x400000;
957 } else { // Not Raptor - it is a PCI card
958 if(hba_map0_area_size > 0x100000 ){
959 hba_map0_area_size = 0x100000;
960 }
961 }
962 } else {// Raptor split BAR config
963 // Use BAR1 in this configuration
964 base_addr1_phys = pci_resource_start(pDev,1);
965 hba_map1_area_size = pci_resource_len(pDev,1);
966 raptorFlag = TRUE;
967 }
968
62ac5aed
MS
969#if BITS_PER_LONG == 64
970 /*
971 * The original Adaptec 64 bit driver has this comment here:
972 * "x86_64 machines need more optimal mappings"
973 *
974 * I assume some HBAs report ridiculously large mappings
975 * and we need to limit them on platforms with IOMMUs.
976 */
977 if (raptorFlag == TRUE) {
978 if (hba_map0_area_size > 128)
979 hba_map0_area_size = 128;
980 if (hba_map1_area_size > 524288)
981 hba_map1_area_size = 524288;
982 } else {
983 if (hba_map0_area_size > 524288)
984 hba_map0_area_size = 524288;
985 }
986#endif
987
1da177e4
LT
988 base_addr_virt = ioremap(base_addr0_phys,hba_map0_area_size);
989 if (!base_addr_virt) {
9c472dd9 990 pci_release_regions(pDev);
1da177e4
LT
991 PERROR("dpti: adpt_config_hba: io remap failed\n");
992 return -EINVAL;
993 }
994
995 if(raptorFlag == TRUE) {
996 msg_addr_virt = ioremap(base_addr1_phys, hba_map1_area_size );
997 if (!msg_addr_virt) {
998 PERROR("dpti: adpt_config_hba: io remap failed on BAR1\n");
999 iounmap(base_addr_virt);
9c472dd9 1000 pci_release_regions(pDev);
1da177e4
LT
1001 return -EINVAL;
1002 }
1003 } else {
1004 msg_addr_virt = base_addr_virt;
1005 }
1006
1007 // Allocate and zero the data structure
bbfbbbc1
MK
1008 pHba = kzalloc(sizeof(adpt_hba), GFP_KERNEL);
1009 if (!pHba) {
1010 if (msg_addr_virt != base_addr_virt)
1da177e4 1011 iounmap(msg_addr_virt);
1da177e4 1012 iounmap(base_addr_virt);
9c472dd9 1013 pci_release_regions(pDev);
1da177e4
LT
1014 return -ENOMEM;
1015 }
1da177e4 1016
0b950672 1017 mutex_lock(&adpt_configuration_lock);
1da177e4
LT
1018
1019 if(hba_chain != NULL){
1020 for(p = hba_chain; p->next; p = p->next);
1021 p->next = pHba;
1022 } else {
1023 hba_chain = pHba;
1024 }
1025 pHba->next = NULL;
1026 pHba->unit = hba_count;
23a2bc22 1027 sprintf(pHba->name, "dpti%d", hba_count);
1da177e4
LT
1028 hba_count++;
1029
0b950672 1030 mutex_unlock(&adpt_configuration_lock);
1da177e4
LT
1031
1032 pHba->pDev = pDev;
1033 pHba->base_addr_phys = base_addr0_phys;
1034
1035 // Set up the Virtual Base Address of the I2O Device
1036 pHba->base_addr_virt = base_addr_virt;
1037 pHba->msg_addr_virt = msg_addr_virt;
1038 pHba->irq_mask = base_addr_virt+0x30;
1039 pHba->post_port = base_addr_virt+0x40;
1040 pHba->reply_port = base_addr_virt+0x44;
1041
1042 pHba->hrt = NULL;
1043 pHba->lct = NULL;
1044 pHba->lct_size = 0;
1045 pHba->status_block = NULL;
1046 pHba->post_count = 0;
1047 pHba->state = DPTI_STATE_RESET;
1048 pHba->pDev = pDev;
1049 pHba->devices = NULL;
62ac5aed 1050 pHba->dma64 = dma64;
1da177e4
LT
1051
1052 // Initializing the spinlocks
1053 spin_lock_init(&pHba->state_lock);
1054 spin_lock_init(&adpt_post_wait_lock);
1055
1056 if(raptorFlag == 0){
62ac5aed
MS
1057 printk(KERN_INFO "Adaptec I2O RAID controller"
1058 " %d at %p size=%x irq=%d%s\n",
1059 hba_count-1, base_addr_virt,
1060 hba_map0_area_size, pDev->irq,
1061 dma64 ? " (64-bit DMA)" : "");
1da177e4 1062 } else {
62ac5aed
MS
1063 printk(KERN_INFO"Adaptec I2O RAID controller %d irq=%d%s\n",
1064 hba_count-1, pDev->irq,
1065 dma64 ? " (64-bit DMA)" : "");
1da177e4
LT
1066 printk(KERN_INFO" BAR0 %p - size= %x\n",base_addr_virt,hba_map0_area_size);
1067 printk(KERN_INFO" BAR1 %p - size= %x\n",msg_addr_virt,hba_map1_area_size);
1068 }
1069
1d6f359a 1070 if (request_irq (pDev->irq, adpt_isr, IRQF_SHARED, pHba->name, pHba)) {
1da177e4
LT
1071 printk(KERN_ERR"%s: Couldn't register IRQ %d\n", pHba->name, pDev->irq);
1072 adpt_i2o_delete_hba(pHba);
1073 return -EINVAL;
1074 }
1075
1076 return 0;
1077}
1078
1079
1080static void adpt_i2o_delete_hba(adpt_hba* pHba)
1081{
1082 adpt_hba* p1;
1083 adpt_hba* p2;
1084 struct i2o_device* d;
1085 struct i2o_device* next;
1086 int i;
1087 int j;
1088 struct adpt_device* pDev;
1089 struct adpt_device* pNext;
1090
1091
0b950672 1092 mutex_lock(&adpt_configuration_lock);
24601bbc
AM
1093 // scsi_unregister calls our adpt_release which
1094 // does a quiese
1da177e4
LT
1095 if(pHba->host){
1096 free_irq(pHba->host->irq, pHba);
1097 }
1da177e4
LT
1098 p2 = NULL;
1099 for( p1 = hba_chain; p1; p2 = p1,p1=p1->next){
1100 if(p1 == pHba) {
1101 if(p2) {
1102 p2->next = p1->next;
1103 } else {
1104 hba_chain = p1->next;
1105 }
1106 break;
1107 }
1108 }
1109
1110 hba_count--;
0b950672 1111 mutex_unlock(&adpt_configuration_lock);
1da177e4
LT
1112
1113 iounmap(pHba->base_addr_virt);
9c472dd9 1114 pci_release_regions(pHba->pDev);
1da177e4
LT
1115 if(pHba->msg_addr_virt != pHba->base_addr_virt){
1116 iounmap(pHba->msg_addr_virt);
1117 }
62ac5aed
MS
1118 if(pHba->FwDebugBuffer_P)
1119 iounmap(pHba->FwDebugBuffer_P);
67af2b06
MS
1120 if(pHba->hrt) {
1121 dma_free_coherent(&pHba->pDev->dev,
1122 pHba->hrt->num_entries * pHba->hrt->entry_len << 2,
1123 pHba->hrt, pHba->hrt_pa);
1124 }
1125 if(pHba->lct) {
1126 dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
1127 pHba->lct, pHba->lct_pa);
1128 }
1129 if(pHba->status_block) {
1130 dma_free_coherent(&pHba->pDev->dev, sizeof(i2o_status_block),
1131 pHba->status_block, pHba->status_block_pa);
1132 }
1133 if(pHba->reply_pool) {
1134 dma_free_coherent(&pHba->pDev->dev,
1135 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
1136 pHba->reply_pool, pHba->reply_pool_pa);
1137 }
1da177e4
LT
1138
1139 for(d = pHba->devices; d ; d = next){
1140 next = d->next;
1141 kfree(d);
1142 }
1143 for(i = 0 ; i < pHba->top_scsi_channel ; i++){
1144 for(j = 0; j < MAX_ID; j++){
1145 if(pHba->channel[i].device[j] != NULL){
1146 for(pDev = pHba->channel[i].device[j]; pDev; pDev = pNext){
1147 pNext = pDev->next_lun;
1148 kfree(pDev);
1149 }
1150 }
1151 }
1152 }
a07f3537 1153 pci_dev_put(pHba->pDev);
1ed43910
MS
1154 if (adpt_sysfs_class)
1155 device_destroy(adpt_sysfs_class,
1156 MKDEV(DPTI_I2O_MAJOR, pHba->unit));
229bab6b 1157 kfree(pHba);
1ed43910 1158
1da177e4
LT
1159 if(hba_count <= 0){
1160 unregister_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER);
1ed43910
MS
1161 if (adpt_sysfs_class) {
1162 class_destroy(adpt_sysfs_class);
1163 adpt_sysfs_class = NULL;
1164 }
1da177e4
LT
1165 }
1166}
1167
1da177e4
LT
1168static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u32 lun)
1169{
1170 struct adpt_device* d;
1171
1172 if(chan < 0 || chan >= MAX_CHANNEL)
1173 return NULL;
1174
1175 if( pHba->channel[chan].device == NULL){
1176 printk(KERN_DEBUG"Adaptec I2O RAID: Trying to find device before they are allocated\n");
1177 return NULL;
1178 }
1179
1180 d = pHba->channel[chan].device[id];
1181 if(!d || d->tid == 0) {
1182 return NULL;
1183 }
1184
1185 /* If it is the only lun at that address then this should match*/
1186 if(d->scsi_lun == lun){
1187 return d;
1188 }
1189
1190 /* else we need to look through all the luns */
1191 for(d=d->next_lun ; d ; d = d->next_lun){
1192 if(d->scsi_lun == lun){
1193 return d;
1194 }
1195 }
1196 return NULL;
1197}
1198
1199
1200static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout)
1201{
1202 // I used my own version of the WAIT_QUEUE_HEAD
1203 // to handle some version differences
1204 // When embedded in the kernel this could go back to the vanilla one
1205 ADPT_DECLARE_WAIT_QUEUE_HEAD(adpt_wq_i2o_post);
1206 int status = 0;
1207 ulong flags = 0;
1208 struct adpt_i2o_post_wait_data *p1, *p2;
1209 struct adpt_i2o_post_wait_data *wait_data =
da2907ff 1210 kmalloc(sizeof(struct adpt_i2o_post_wait_data), GFP_ATOMIC);
4452ea50 1211 DECLARE_WAITQUEUE(wait, current);
1da177e4 1212
4452ea50 1213 if (!wait_data)
1da177e4 1214 return -ENOMEM;
4452ea50 1215
1da177e4
LT
1216 /*
1217 * The spin locking is needed to keep anyone from playing
1218 * with the queue pointers and id while we do the same
1219 */
1220 spin_lock_irqsave(&adpt_post_wait_lock, flags);
1221 // TODO we need a MORE unique way of getting ids
1222 // to support async LCT get
1223 wait_data->next = adpt_post_wait_queue;
1224 adpt_post_wait_queue = wait_data;
1225 adpt_post_wait_id++;
1226 adpt_post_wait_id &= 0x7fff;
1227 wait_data->id = adpt_post_wait_id;
1228 spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1229
1230 wait_data->wq = &adpt_wq_i2o_post;
1231 wait_data->status = -ETIMEDOUT;
1232
4452ea50 1233 add_wait_queue(&adpt_wq_i2o_post, &wait);
1da177e4
LT
1234
1235 msg[2] |= 0x80000000 | ((u32)wait_data->id);
1236 timeout *= HZ;
1237 if((status = adpt_i2o_post_this(pHba, msg, len)) == 0){
1238 set_current_state(TASK_INTERRUPTIBLE);
1239 if(pHba->host)
1240 spin_unlock_irq(pHba->host->host_lock);
1241 if (!timeout)
1242 schedule();
1243 else{
1244 timeout = schedule_timeout(timeout);
1245 if (timeout == 0) {
1246 // I/O issued, but cannot get result in
1247 // specified time. Freeing resorces is
1248 // dangerous.
1249 status = -ETIME;
1250 }
1251 }
1252 if(pHba->host)
1253 spin_lock_irq(pHba->host->host_lock);
1254 }
4452ea50 1255 remove_wait_queue(&adpt_wq_i2o_post, &wait);
1da177e4
LT
1256
1257 if(status == -ETIMEDOUT){
1258 printk(KERN_INFO"dpti%d: POST WAIT TIMEOUT\n",pHba->unit);
1259 // We will have to free the wait_data memory during shutdown
1260 return status;
1261 }
1262
1263 /* Remove the entry from the queue. */
1264 p2 = NULL;
1265 spin_lock_irqsave(&adpt_post_wait_lock, flags);
1266 for(p1 = adpt_post_wait_queue; p1; p2 = p1, p1 = p1->next) {
1267 if(p1 == wait_data) {
1268 if(p1->status == I2O_DETAIL_STATUS_UNSUPPORTED_FUNCTION ) {
1269 status = -EOPNOTSUPP;
1270 }
1271 if(p2) {
1272 p2->next = p1->next;
1273 } else {
1274 adpt_post_wait_queue = p1->next;
1275 }
1276 break;
1277 }
1278 }
1279 spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1280
1281 kfree(wait_data);
1282
1283 return status;
1284}
1285
1286
1287static s32 adpt_i2o_post_this(adpt_hba* pHba, u32* data, int len)
1288{
1289
1290 u32 m = EMPTY_QUEUE;
1291 u32 __iomem *msg;
1292 ulong timeout = jiffies + 30*HZ;
1293 do {
1294 rmb();
1295 m = readl(pHba->post_port);
1296 if (m != EMPTY_QUEUE) {
1297 break;
1298 }
1299 if(time_after(jiffies,timeout)){
1300 printk(KERN_WARNING"dpti%d: Timeout waiting for message frame!\n", pHba->unit);
1301 return -ETIMEDOUT;
1302 }
a9a3047d 1303 schedule_timeout_uninterruptible(1);
1da177e4
LT
1304 } while(m == EMPTY_QUEUE);
1305
1306 msg = pHba->msg_addr_virt + m;
1307 memcpy_toio(msg, data, len);
1308 wmb();
1309
1310 //post message
1311 writel(m, pHba->post_port);
1312 wmb();
1313
1314 return 0;
1315}
1316
1317
1318static void adpt_i2o_post_wait_complete(u32 context, int status)
1319{
1320 struct adpt_i2o_post_wait_data *p1 = NULL;
1321 /*
1322 * We need to search through the adpt_post_wait
1323 * queue to see if the given message is still
1324 * outstanding. If not, it means that the IOP
1325 * took longer to respond to the message than we
1326 * had allowed and timer has already expired.
1327 * Not much we can do about that except log
1328 * it for debug purposes, increase timeout, and recompile
1329 *
1330 * Lock needed to keep anyone from moving queue pointers
1331 * around while we're looking through them.
1332 */
1333
1334 context &= 0x7fff;
1335
1336 spin_lock(&adpt_post_wait_lock);
1337 for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1338 if(p1->id == context) {
1339 p1->status = status;
1340 spin_unlock(&adpt_post_wait_lock);
1341 wake_up_interruptible(p1->wq);
1342 return;
1343 }
1344 }
1345 spin_unlock(&adpt_post_wait_lock);
1346 // If this happens we lose commands that probably really completed
1347 printk(KERN_DEBUG"dpti: Could Not find task %d in wait queue\n",context);
1348 printk(KERN_DEBUG" Tasks in wait queue:\n");
1349 for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1350 printk(KERN_DEBUG" %d\n",p1->id);
1351 }
1352 return;
1353}
1354
1355static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
1356{
1357 u32 msg[8];
1358 u8* status;
67af2b06 1359 dma_addr_t addr;
1da177e4
LT
1360 u32 m = EMPTY_QUEUE ;
1361 ulong timeout = jiffies + (TMOUT_IOPRESET*HZ);
1362
1363 if(pHba->initialized == FALSE) { // First time reset should be quick
1364 timeout = jiffies + (25*HZ);
1365 } else {
1366 adpt_i2o_quiesce_hba(pHba);
1367 }
1368
1369 do {
1370 rmb();
1371 m = readl(pHba->post_port);
1372 if (m != EMPTY_QUEUE) {
1373 break;
1374 }
1375 if(time_after(jiffies,timeout)){
1376 printk(KERN_WARNING"Timeout waiting for message!\n");
1377 return -ETIMEDOUT;
1378 }
a9a3047d 1379 schedule_timeout_uninterruptible(1);
1da177e4
LT
1380 } while (m == EMPTY_QUEUE);
1381
67af2b06 1382 status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
1da177e4
LT
1383 if(status == NULL) {
1384 adpt_send_nop(pHba, m);
1385 printk(KERN_ERR"IOP reset failed - no free memory.\n");
1386 return -ENOMEM;
1387 }
67af2b06 1388 memset(status,0,4);
1da177e4
LT
1389
1390 msg[0]=EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0;
1391 msg[1]=I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID;
1392 msg[2]=0;
1393 msg[3]=0;
1394 msg[4]=0;
1395 msg[5]=0;
67af2b06
MS
1396 msg[6]=dma_low(addr);
1397 msg[7]=dma_high(addr);
1da177e4
LT
1398
1399 memcpy_toio(pHba->msg_addr_virt+m, msg, sizeof(msg));
1400 wmb();
1401 writel(m, pHba->post_port);
1402 wmb();
1403
1404 while(*status == 0){
1405 if(time_after(jiffies,timeout)){
1406 printk(KERN_WARNING"%s: IOP Reset Timeout\n",pHba->name);
67af2b06
MS
1407 /* We lose 4 bytes of "status" here, but we cannot
1408 free these because controller may awake and corrupt
1409 those bytes at any time */
1410 /* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
1da177e4
LT
1411 return -ETIMEDOUT;
1412 }
1413 rmb();
a9a3047d 1414 schedule_timeout_uninterruptible(1);
1da177e4
LT
1415 }
1416
1417 if(*status == 0x01 /*I2O_EXEC_IOP_RESET_IN_PROGRESS*/) {
1418 PDEBUG("%s: Reset in progress...\n", pHba->name);
1419 // Here we wait for message frame to become available
1420 // indicated that reset has finished
1421 do {
1422 rmb();
1423 m = readl(pHba->post_port);
1424 if (m != EMPTY_QUEUE) {
1425 break;
1426 }
1427 if(time_after(jiffies,timeout)){
1428 printk(KERN_ERR "%s:Timeout waiting for IOP Reset.\n",pHba->name);
67af2b06
MS
1429 /* We lose 4 bytes of "status" here, but we
1430 cannot free these because controller may
1431 awake and corrupt those bytes at any time */
1432 /* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
1da177e4
LT
1433 return -ETIMEDOUT;
1434 }
a9a3047d 1435 schedule_timeout_uninterruptible(1);
1da177e4
LT
1436 } while (m == EMPTY_QUEUE);
1437 // Flush the offset
1438 adpt_send_nop(pHba, m);
1439 }
1440 adpt_i2o_status_get(pHba);
1441 if(*status == 0x02 ||
1442 pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
1443 printk(KERN_WARNING"%s: Reset reject, trying to clear\n",
1444 pHba->name);
1445 } else {
1446 PDEBUG("%s: Reset completed.\n", pHba->name);
1447 }
1448
67af2b06 1449 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
1da177e4
LT
1450#ifdef UARTDELAY
1451 // This delay is to allow someone attached to the card through the debug UART to
1452 // set up the dump levels that they want before the rest of the initialization sequence
1453 adpt_delay(20000);
1454#endif
1455 return 0;
1456}
1457
1458
1459static int adpt_i2o_parse_lct(adpt_hba* pHba)
1460{
1461 int i;
1462 int max;
1463 int tid;
1464 struct i2o_device *d;
1465 i2o_lct *lct = pHba->lct;
1466 u8 bus_no = 0;
1467 s16 scsi_id;
1468 s16 scsi_lun;
1469 u32 buf[10]; // larger than 7, or 8 ...
1470 struct adpt_device* pDev;
1471
1472 if (lct == NULL) {
1473 printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
1474 return -1;
1475 }
1476
1477 max = lct->table_size;
1478 max -= 3;
1479 max /= 9;
1480
1481 for(i=0;i<max;i++) {
1482 if( lct->lct_entry[i].user_tid != 0xfff){
1483 /*
1484 * If we have hidden devices, we need to inform the upper layers about
1485 * the possible maximum id reference to handle device access when
1486 * an array is disassembled. This code has no other purpose but to
1487 * allow us future access to devices that are currently hidden
1488 * behind arrays, hotspares or have not been configured (JBOD mode).
1489 */
1490 if( lct->lct_entry[i].class_id != I2O_CLASS_RANDOM_BLOCK_STORAGE &&
1491 lct->lct_entry[i].class_id != I2O_CLASS_SCSI_PERIPHERAL &&
1492 lct->lct_entry[i].class_id != I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1493 continue;
1494 }
1495 tid = lct->lct_entry[i].tid;
1496 // I2O_DPT_DEVICE_INFO_GROUP_NO;
1497 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
1498 continue;
1499 }
1500 bus_no = buf[0]>>16;
1501 scsi_id = buf[1];
1502 scsi_lun = (buf[2]>>8 )&0xff;
1503 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1504 printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no);
1505 continue;
1506 }
1507 if (scsi_id >= MAX_ID){
1508 printk(KERN_WARNING"%s: SCSI ID %d out of range \n", pHba->name, bus_no);
1509 continue;
1510 }
1511 if(bus_no > pHba->top_scsi_channel){
1512 pHba->top_scsi_channel = bus_no;
1513 }
1514 if(scsi_id > pHba->top_scsi_id){
1515 pHba->top_scsi_id = scsi_id;
1516 }
1517 if(scsi_lun > pHba->top_scsi_lun){
1518 pHba->top_scsi_lun = scsi_lun;
1519 }
1520 continue;
1521 }
5cbded58 1522 d = kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
1da177e4
LT
1523 if(d==NULL)
1524 {
1525 printk(KERN_CRIT"%s: Out of memory for I2O device data.\n",pHba->name);
1526 return -ENOMEM;
1527 }
1528
1c2fb3f3 1529 d->controller = pHba;
1da177e4
LT
1530 d->next = NULL;
1531
1532 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
1533
1534 d->flags = 0;
1535 tid = d->lct_data.tid;
1536 adpt_i2o_report_hba_unit(pHba, d);
1537 adpt_i2o_install_device(pHba, d);
1538 }
1539 bus_no = 0;
1540 for(d = pHba->devices; d ; d = d->next) {
1541 if(d->lct_data.class_id == I2O_CLASS_BUS_ADAPTER_PORT ||
1542 d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PORT){
1543 tid = d->lct_data.tid;
1544 // TODO get the bus_no from hrt-but for now they are in order
1545 //bus_no =
1546 if(bus_no > pHba->top_scsi_channel){
1547 pHba->top_scsi_channel = bus_no;
1548 }
1549 pHba->channel[bus_no].type = d->lct_data.class_id;
1550 pHba->channel[bus_no].tid = tid;
1551 if(adpt_i2o_query_scalar(pHba, tid, 0x0200, -1, buf, 28)>=0)
1552 {
1553 pHba->channel[bus_no].scsi_id = buf[1];
1554 PDEBUG("Bus %d - SCSI ID %d.\n", bus_no, buf[1]);
1555 }
1556 // TODO remove - this is just until we get from hrt
1557 bus_no++;
1558 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1559 printk(KERN_WARNING"%s: Channel number %d out of range - LCT\n", pHba->name, bus_no);
1560 break;
1561 }
1562 }
1563 }
1564
1565 // Setup adpt_device table
1566 for(d = pHba->devices; d ; d = d->next) {
1567 if(d->lct_data.class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
1568 d->lct_data.class_id == I2O_CLASS_SCSI_PERIPHERAL ||
1569 d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1570
1571 tid = d->lct_data.tid;
1572 scsi_id = -1;
1573 // I2O_DPT_DEVICE_INFO_GROUP_NO;
1574 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)>=0) {
1575 bus_no = buf[0]>>16;
1576 scsi_id = buf[1];
1577 scsi_lun = (buf[2]>>8 )&0xff;
1578 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1579 continue;
1580 }
1581 if (scsi_id >= MAX_ID) {
1582 continue;
1583 }
1584 if( pHba->channel[bus_no].device[scsi_id] == NULL){
ab552204 1585 pDev = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
1da177e4
LT
1586 if(pDev == NULL) {
1587 return -ENOMEM;
1588 }
1589 pHba->channel[bus_no].device[scsi_id] = pDev;
1da177e4
LT
1590 } else {
1591 for( pDev = pHba->channel[bus_no].device[scsi_id];
1592 pDev->next_lun; pDev = pDev->next_lun){
1593 }
ab552204 1594 pDev->next_lun = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
1da177e4
LT
1595 if(pDev->next_lun == NULL) {
1596 return -ENOMEM;
1597 }
1da177e4
LT
1598 pDev = pDev->next_lun;
1599 }
1600 pDev->tid = tid;
1601 pDev->scsi_channel = bus_no;
1602 pDev->scsi_id = scsi_id;
1603 pDev->scsi_lun = scsi_lun;
1604 pDev->pI2o_dev = d;
1605 d->owner = pDev;
1606 pDev->type = (buf[0])&0xff;
1607 pDev->flags = (buf[0]>>8)&0xff;
1608 if(scsi_id > pHba->top_scsi_id){
1609 pHba->top_scsi_id = scsi_id;
1610 }
1611 if(scsi_lun > pHba->top_scsi_lun){
1612 pHba->top_scsi_lun = scsi_lun;
1613 }
1614 }
1615 if(scsi_id == -1){
1616 printk(KERN_WARNING"Could not find SCSI ID for %s\n",
1617 d->lct_data.identity_tag);
1618 }
1619 }
1620 }
1621 return 0;
1622}
1623
1624
1625/*
1626 * Each I2O controller has a chain of devices on it - these match
1627 * the useful parts of the LCT of the board.
1628 */
1629
1630static int adpt_i2o_install_device(adpt_hba* pHba, struct i2o_device *d)
1631{
0b950672 1632 mutex_lock(&adpt_configuration_lock);
1da177e4
LT
1633 d->controller=pHba;
1634 d->owner=NULL;
1635 d->next=pHba->devices;
1636 d->prev=NULL;
1637 if (pHba->devices != NULL){
1638 pHba->devices->prev=d;
1639 }
1640 pHba->devices=d;
1641 *d->dev_name = 0;
1642
0b950672 1643 mutex_unlock(&adpt_configuration_lock);
1da177e4
LT
1644 return 0;
1645}
1646
1647static int adpt_open(struct inode *inode, struct file *file)
1648{
1649 int minor;
1650 adpt_hba* pHba;
1651
c45d15d2 1652 mutex_lock(&adpt_mutex);
1da177e4
LT
1653 //TODO check for root access
1654 //
1655 minor = iminor(inode);
1656 if (minor >= hba_count) {
c45d15d2 1657 mutex_unlock(&adpt_mutex);
1da177e4
LT
1658 return -ENXIO;
1659 }
0b950672 1660 mutex_lock(&adpt_configuration_lock);
1da177e4
LT
1661 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1662 if (pHba->unit == minor) {
1663 break; /* found adapter */
1664 }
1665 }
1666 if (pHba == NULL) {
0b950672 1667 mutex_unlock(&adpt_configuration_lock);
c45d15d2 1668 mutex_unlock(&adpt_mutex);
1da177e4
LT
1669 return -ENXIO;
1670 }
1671
1672// if(pHba->in_use){
0b950672 1673 // mutex_unlock(&adpt_configuration_lock);
1da177e4
LT
1674// return -EBUSY;
1675// }
1676
1677 pHba->in_use = 1;
0b950672 1678 mutex_unlock(&adpt_configuration_lock);
c45d15d2 1679 mutex_unlock(&adpt_mutex);
1da177e4
LT
1680
1681 return 0;
1682}
1683
1684static int adpt_close(struct inode *inode, struct file *file)
1685{
1686 int minor;
1687 adpt_hba* pHba;
1688
1689 minor = iminor(inode);
1690 if (minor >= hba_count) {
1691 return -ENXIO;
1692 }
0b950672 1693 mutex_lock(&adpt_configuration_lock);
1da177e4
LT
1694 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1695 if (pHba->unit == minor) {
1696 break; /* found adapter */
1697 }
1698 }
0b950672 1699 mutex_unlock(&adpt_configuration_lock);
1da177e4
LT
1700 if (pHba == NULL) {
1701 return -ENXIO;
1702 }
1703
1704 pHba->in_use = 0;
1705
1706 return 0;
1707}
1708
1709
1710static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1711{
1712 u32 msg[MAX_MESSAGE_SIZE];
1713 u32* reply = NULL;
1714 u32 size = 0;
1715 u32 reply_size = 0;
1716 u32 __user *user_msg = arg;
1717 u32 __user * user_reply = NULL;
1718 void *sg_list[pHba->sg_tablesize];
1719 u32 sg_offset = 0;
1720 u32 sg_count = 0;
1721 int sg_index = 0;
1722 u32 i = 0;
1723 u32 rcode = 0;
1724 void *p = NULL;
67af2b06 1725 dma_addr_t addr;
1da177e4
LT
1726 ulong flags = 0;
1727
1728 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1729 // get user msg size in u32s
1730 if(get_user(size, &user_msg[0])){
1731 return -EFAULT;
1732 }
1733 size = size>>16;
1734
1735 user_reply = &user_msg[size];
1736 if(size > MAX_MESSAGE_SIZE){
1737 return -EFAULT;
1738 }
1739 size *= 4; // Convert to bytes
1740
1741 /* Copy in the user's I2O command */
1742 if(copy_from_user(msg, user_msg, size)) {
1743 return -EFAULT;
1744 }
1745 get_user(reply_size, &user_reply[0]);
1746 reply_size = reply_size>>16;
1747 if(reply_size > REPLY_FRAME_SIZE){
1748 reply_size = REPLY_FRAME_SIZE;
1749 }
1750 reply_size *= 4;
ab552204 1751 reply = kzalloc(REPLY_FRAME_SIZE*4, GFP_KERNEL);
1da177e4
LT
1752 if(reply == NULL) {
1753 printk(KERN_WARNING"%s: Could not allocate reply buffer\n",pHba->name);
1754 return -ENOMEM;
1755 }
1da177e4
LT
1756 sg_offset = (msg[0]>>4)&0xf;
1757 msg[2] = 0x40000000; // IOCTL context
62ac5aed
MS
1758 msg[3] = adpt_ioctl_to_context(pHba, reply);
1759 if (msg[3] == (u32)-1)
1760 return -EBUSY;
1761
1da177e4
LT
1762 memset(sg_list,0, sizeof(sg_list[0])*pHba->sg_tablesize);
1763 if(sg_offset) {
62ac5aed 1764 // TODO add 64 bit API
1da177e4
LT
1765 struct sg_simple_element *sg = (struct sg_simple_element*) (msg+sg_offset);
1766 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1767 if (sg_count > pHba->sg_tablesize){
1768 printk(KERN_DEBUG"%s:IOCTL SG List too large (%u)\n", pHba->name,sg_count);
1769 kfree (reply);
1770 return -EINVAL;
1771 }
1772
1773 for(i = 0; i < sg_count; i++) {
1774 int sg_size;
1775
1776 if (!(sg[i].flag_count & 0x10000000 /*I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT*/)) {
1777 printk(KERN_DEBUG"%s:Bad SG element %d - not simple (%x)\n",pHba->name,i, sg[i].flag_count);
1778 rcode = -EINVAL;
1779 goto cleanup;
1780 }
1781 sg_size = sg[i].flag_count & 0xffffff;
1782 /* Allocate memory for the transfer */
67af2b06 1783 p = dma_alloc_coherent(&pHba->pDev->dev, sg_size, &addr, GFP_KERNEL);
1da177e4
LT
1784 if(!p) {
1785 printk(KERN_DEBUG"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
1786 pHba->name,sg_size,i,sg_count);
1787 rcode = -ENOMEM;
1788 goto cleanup;
1789 }
1790 sg_list[sg_index++] = p; // sglist indexed with input frame, not our internal frame.
1791 /* Copy in the user's SG buffer if necessary */
1792 if(sg[i].flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR*/) {
62ac5aed
MS
1793 // sg_simple_element API is 32 bit
1794 if (copy_from_user(p,(void __user *)(ulong)sg[i].addr_bus, sg_size)) {
1da177e4
LT
1795 printk(KERN_DEBUG"%s: Could not copy SG buf %d FROM user\n",pHba->name,i);
1796 rcode = -EFAULT;
1797 goto cleanup;
1798 }
1799 }
62ac5aed
MS
1800 /* sg_simple_element API is 32 bit, but addr < 4GB */
1801 sg[i].addr_bus = addr;
1da177e4
LT
1802 }
1803 }
1804
1805 do {
1f8c88c3
HR
1806 /*
1807 * Stop any new commands from enterring the
1808 * controller while processing the ioctl
1809 */
1810 if (pHba->host) {
1811 scsi_block_requests(pHba->host);
1da177e4 1812 spin_lock_irqsave(pHba->host->host_lock, flags);
1f8c88c3 1813 }
1da177e4
LT
1814 rcode = adpt_i2o_post_wait(pHba, msg, size, FOREVER);
1815 if (rcode != 0)
1816 printk("adpt_i2o_passthru: post wait failed %d %p\n",
1817 rcode, reply);
1f8c88c3 1818 if (pHba->host) {
1da177e4 1819 spin_unlock_irqrestore(pHba->host->host_lock, flags);
1f8c88c3
HR
1820 scsi_unblock_requests(pHba->host);
1821 }
1822 } while (rcode == -ETIMEDOUT);
1da177e4
LT
1823
1824 if(rcode){
1825 goto cleanup;
1826 }
1827
1828 if(sg_offset) {
1829 /* Copy back the Scatter Gather buffers back to user space */
1830 u32 j;
62ac5aed 1831 // TODO add 64 bit API
1da177e4
LT
1832 struct sg_simple_element* sg;
1833 int sg_size;
1834
1835 // re-acquire the original message to handle correctly the sg copy operation
1836 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1837 // get user msg size in u32s
1838 if(get_user(size, &user_msg[0])){
1839 rcode = -EFAULT;
1840 goto cleanup;
1841 }
1842 size = size>>16;
1843 size *= 4;
ef7562b7 1844 if (size > MAX_MESSAGE_SIZE) {
aefba418 1845 rcode = -EINVAL;
ef7562b7
AC
1846 goto cleanup;
1847 }
1da177e4
LT
1848 /* Copy in the user's I2O command */
1849 if (copy_from_user (msg, user_msg, size)) {
1850 rcode = -EFAULT;
1851 goto cleanup;
1852 }
1853 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1854
62ac5aed 1855 // TODO add 64 bit API
1da177e4
LT
1856 sg = (struct sg_simple_element*)(msg + sg_offset);
1857 for (j = 0; j < sg_count; j++) {
1858 /* Copy out the SG list to user's buffer if necessary */
1859 if(! (sg[j].flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR*/)) {
1860 sg_size = sg[j].flag_count & 0xffffff;
62ac5aed
MS
1861 // sg_simple_element API is 32 bit
1862 if (copy_to_user((void __user *)(ulong)sg[j].addr_bus,sg_list[j], sg_size)) {
1da177e4
LT
1863 printk(KERN_WARNING"%s: Could not copy %p TO user %x\n",pHba->name, sg_list[j], sg[j].addr_bus);
1864 rcode = -EFAULT;
1865 goto cleanup;
1866 }
1867 }
1868 }
1869 }
1870
1871 /* Copy back the reply to user space */
1872 if (reply_size) {
1873 // we wrote our own values for context - now restore the user supplied ones
1874 if(copy_from_user(reply+2, user_msg+2, sizeof(u32)*2)) {
1875 printk(KERN_WARNING"%s: Could not copy message context FROM user\n",pHba->name);
1876 rcode = -EFAULT;
1877 }
1878 if(copy_to_user(user_reply, reply, reply_size)) {
1879 printk(KERN_WARNING"%s: Could not copy reply TO user\n",pHba->name);
1880 rcode = -EFAULT;
1881 }
1882 }
1883
1884
1885cleanup:
67af2b06
MS
1886 if (rcode != -ETIME && rcode != -EINTR) {
1887 struct sg_simple_element *sg =
1888 (struct sg_simple_element*) (msg +sg_offset);
1da177e4 1889 kfree (reply);
67af2b06
MS
1890 while(sg_index) {
1891 if(sg_list[--sg_index]) {
1892 dma_free_coherent(&pHba->pDev->dev,
1893 sg[sg_index].flag_count & 0xffffff,
1894 sg_list[sg_index],
1895 sg[sg_index].addr_bus);
1896 }
1da177e4
LT
1897 }
1898 }
1899 return rcode;
1900}
1901
1da177e4
LT
1902#if defined __ia64__
1903static void adpt_ia64_info(sysInfo_S* si)
1904{
1905 // This is all the info we need for now
1906 // We will add more info as our new
1907 // managmenent utility requires it
1908 si->processorType = PROC_IA64;
1909}
1910#endif
1911
1da177e4
LT
1912#if defined __sparc__
1913static void adpt_sparc_info(sysInfo_S* si)
1914{
1915 // This is all the info we need for now
1916 // We will add more info as our new
1917 // managmenent utility requires it
1918 si->processorType = PROC_ULTRASPARC;
1919}
1920#endif
1da177e4
LT
1921#if defined __alpha__
1922static void adpt_alpha_info(sysInfo_S* si)
1923{
1924 // This is all the info we need for now
1925 // We will add more info as our new
1926 // managmenent utility requires it
1927 si->processorType = PROC_ALPHA;
1928}
1929#endif
1930
1931#if defined __i386__
1da177e4
LT
1932static void adpt_i386_info(sysInfo_S* si)
1933{
1934 // This is all the info we need for now
1935 // We will add more info as our new
1936 // managmenent utility requires it
1937 switch (boot_cpu_data.x86) {
1938 case CPU_386:
1939 si->processorType = PROC_386;
1940 break;
1941 case CPU_486:
1942 si->processorType = PROC_486;
1943 break;
1944 case CPU_586:
1945 si->processorType = PROC_PENTIUM;
1946 break;
1947 default: // Just in case
1948 si->processorType = PROC_PENTIUM;
1949 break;
1950 }
1951}
8b2cc917
AM
1952#endif
1953
1954/*
1955 * This routine returns information about the system. This does not effect
1956 * any logic and if the info is wrong - it doesn't matter.
1957 */
1da177e4 1958
8b2cc917
AM
1959/* Get all the info we can not get from kernel services */
1960static int adpt_system_info(void __user *buffer)
1961{
1962 sysInfo_S si;
1963
1964 memset(&si, 0, sizeof(si));
1965
1966 si.osType = OS_LINUX;
1967 si.osMajorVersion = 0;
1968 si.osMinorVersion = 0;
1969 si.osRevision = 0;
1970 si.busType = SI_PCI_BUS;
1971 si.processorFamily = DPTI_sig.dsProcessorFamily;
1972
1973#if defined __i386__
1974 adpt_i386_info(&si);
1975#elif defined (__ia64__)
1976 adpt_ia64_info(&si);
1977#elif defined(__sparc__)
1978 adpt_sparc_info(&si);
1979#elif defined (__alpha__)
1980 adpt_alpha_info(&si);
1981#else
1982 si.processorType = 0xff ;
1da177e4 1983#endif
8b2cc917
AM
1984 if (copy_to_user(buffer, &si, sizeof(si))){
1985 printk(KERN_WARNING"dpti: Could not copy buffer TO user\n");
1986 return -EFAULT;
1987 }
1da177e4 1988
8b2cc917
AM
1989 return 0;
1990}
1da177e4 1991
f4927c45 1992static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
1da177e4
LT
1993{
1994 int minor;
1995 int error = 0;
1996 adpt_hba* pHba;
1997 ulong flags = 0;
1998 void __user *argp = (void __user *)arg;
1999
2000 minor = iminor(inode);
2001 if (minor >= DPTI_MAX_HBA){
2002 return -ENXIO;
2003 }
0b950672 2004 mutex_lock(&adpt_configuration_lock);
1da177e4
LT
2005 for (pHba = hba_chain; pHba; pHba = pHba->next) {
2006 if (pHba->unit == minor) {
2007 break; /* found adapter */
2008 }
2009 }
0b950672 2010 mutex_unlock(&adpt_configuration_lock);
1da177e4
LT
2011 if(pHba == NULL){
2012 return -ENXIO;
2013 }
2014
a9a3047d
NA
2015 while((volatile u32) pHba->state & DPTI_STATE_RESET )
2016 schedule_timeout_uninterruptible(2);
1da177e4
LT
2017
2018 switch (cmd) {
2019 // TODO: handle 3 cases
2020 case DPT_SIGNATURE:
2021 if (copy_to_user(argp, &DPTI_sig, sizeof(DPTI_sig))) {
2022 return -EFAULT;
2023 }
2024 break;
2025 case I2OUSRCMD:
2026 return adpt_i2o_passthru(pHba, argp);
2027
2028 case DPT_CTRLINFO:{
2029 drvrHBAinfo_S HbaInfo;
2030
2031#define FLG_OSD_PCI_VALID 0x0001
2032#define FLG_OSD_DMA 0x0002
2033#define FLG_OSD_I2O 0x0004
2034 memset(&HbaInfo, 0, sizeof(HbaInfo));
2035 HbaInfo.drvrHBAnum = pHba->unit;
2036 HbaInfo.baseAddr = (ulong) pHba->base_addr_phys;
2037 HbaInfo.blinkState = adpt_read_blink_led(pHba);
2038 HbaInfo.pciBusNum = pHba->pDev->bus->number;
2039 HbaInfo.pciDeviceNum=PCI_SLOT(pHba->pDev->devfn);
2040 HbaInfo.Interrupt = pHba->pDev->irq;
2041 HbaInfo.hbaFlags = FLG_OSD_PCI_VALID | FLG_OSD_DMA | FLG_OSD_I2O;
2042 if(copy_to_user(argp, &HbaInfo, sizeof(HbaInfo))){
2043 printk(KERN_WARNING"%s: Could not copy HbaInfo TO user\n",pHba->name);
2044 return -EFAULT;
2045 }
2046 break;
2047 }
2048 case DPT_SYSINFO:
2049 return adpt_system_info(argp);
2050 case DPT_BLINKLED:{
2051 u32 value;
2052 value = (u32)adpt_read_blink_led(pHba);
2053 if (copy_to_user(argp, &value, sizeof(value))) {
2054 return -EFAULT;
2055 }
2056 break;
2057 }
2058 case I2ORESETCMD:
2059 if(pHba->host)
2060 spin_lock_irqsave(pHba->host->host_lock, flags);
2061 adpt_hba_reset(pHba);
2062 if(pHba->host)
2063 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2064 break;
2065 case I2ORESCANCMD:
2066 adpt_rescan(pHba);
2067 break;
2068 default:
2069 return -EINVAL;
2070 }
2071
2072 return error;
2073}
2074
f4927c45
AB
2075static long adpt_unlocked_ioctl(struct file *file, uint cmd, ulong arg)
2076{
2077 struct inode *inode;
2078 long ret;
2079
496ad9aa 2080 inode = file_inode(file);
f4927c45 2081
c45d15d2 2082 mutex_lock(&adpt_mutex);
f4927c45 2083 ret = adpt_ioctl(inode, file, cmd, arg);
c45d15d2 2084 mutex_unlock(&adpt_mutex);
f4927c45
AB
2085
2086 return ret;
2087}
2088
62ac5aed
MS
2089#ifdef CONFIG_COMPAT
2090static long compat_adpt_ioctl(struct file *file,
2091 unsigned int cmd, unsigned long arg)
2092{
2093 struct inode *inode;
2094 long ret;
2095
496ad9aa 2096 inode = file_inode(file);
62ac5aed 2097
c45d15d2 2098 mutex_lock(&adpt_mutex);
62ac5aed
MS
2099
2100 switch(cmd) {
2101 case DPT_SIGNATURE:
2102 case I2OUSRCMD:
2103 case DPT_CTRLINFO:
2104 case DPT_SYSINFO:
2105 case DPT_BLINKLED:
2106 case I2ORESETCMD:
2107 case I2ORESCANCMD:
2108 case (DPT_TARGET_BUSY & 0xFFFF):
2109 case DPT_TARGET_BUSY:
2110 ret = adpt_ioctl(inode, file, cmd, arg);
2111 break;
2112 default:
2113 ret = -ENOIOCTLCMD;
2114 }
2115
c45d15d2 2116 mutex_unlock(&adpt_mutex);
62ac5aed
MS
2117
2118 return ret;
2119}
2120#endif
1da177e4 2121
7d12e780 2122static irqreturn_t adpt_isr(int irq, void *dev_id)
1da177e4
LT
2123{
2124 struct scsi_cmnd* cmd;
2125 adpt_hba* pHba = dev_id;
2126 u32 m;
1c2fb3f3 2127 void __iomem *reply;
1da177e4
LT
2128 u32 status=0;
2129 u32 context;
2130 ulong flags = 0;
2131 int handled = 0;
2132
2133 if (pHba == NULL){
2134 printk(KERN_WARNING"adpt_isr: NULL dev_id\n");
2135 return IRQ_NONE;
2136 }
2137 if(pHba->host)
2138 spin_lock_irqsave(pHba->host->host_lock, flags);
2139
2140 while( readl(pHba->irq_mask) & I2O_INTERRUPT_PENDING_B) {
2141 m = readl(pHba->reply_port);
2142 if(m == EMPTY_QUEUE){
2143 // Try twice then give up
2144 rmb();
2145 m = readl(pHba->reply_port);
2146 if(m == EMPTY_QUEUE){
2147 // This really should not happen
2148 printk(KERN_ERR"dpti: Could not get reply frame\n");
2149 goto out;
2150 }
2151 }
67af2b06
MS
2152 if (pHba->reply_pool_pa <= m &&
2153 m < pHba->reply_pool_pa +
2154 (pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4)) {
2155 reply = (u8 *)pHba->reply_pool +
2156 (m - pHba->reply_pool_pa);
2157 } else {
2158 /* Ick, we should *never* be here */
2159 printk(KERN_ERR "dpti: reply frame not from pool\n");
2160 reply = (u8 *)bus_to_virt(m);
2161 }
1da177e4
LT
2162
2163 if (readl(reply) & MSG_FAIL) {
2164 u32 old_m = readl(reply+28);
1c2fb3f3 2165 void __iomem *msg;
1da177e4
LT
2166 u32 old_context;
2167 PDEBUG("%s: Failed message\n",pHba->name);
2168 if(old_m >= 0x100000){
2169 printk(KERN_ERR"%s: Bad preserved MFA (%x)- dropping frame\n",pHba->name,old_m);
2170 writel(m,pHba->reply_port);
2171 continue;
2172 }
2173 // Transaction context is 0 in failed reply frame
1c2fb3f3 2174 msg = pHba->msg_addr_virt + old_m;
1da177e4
LT
2175 old_context = readl(msg+12);
2176 writel(old_context, reply+12);
2177 adpt_send_nop(pHba, old_m);
2178 }
2179 context = readl(reply+8);
2180 if(context & 0x40000000){ // IOCTL
62ac5aed 2181 void *p = adpt_ioctl_from_context(pHba, readl(reply+12));
1c2fb3f3
BB
2182 if( p != NULL) {
2183 memcpy_fromio(p, reply, REPLY_FRAME_SIZE * 4);
1da177e4
LT
2184 }
2185 // All IOCTLs will also be post wait
2186 }
2187 if(context & 0x80000000){ // Post wait message
2188 status = readl(reply+16);
2189 if(status >> 24){
2190 status &= 0xffff; /* Get detail status */
2191 } else {
2192 status = I2O_POST_WAIT_OK;
2193 }
2194 if(!(context & 0x40000000)) {
62ac5aed
MS
2195 cmd = adpt_cmd_from_context(pHba,
2196 readl(reply+12));
1da177e4
LT
2197 if(cmd != NULL) {
2198 printk(KERN_WARNING"%s: Apparent SCSI cmd in Post Wait Context - cmd=%p context=%x\n", pHba->name, cmd, context);
2199 }
2200 }
2201 adpt_i2o_post_wait_complete(context, status);
2202 } else { // SCSI message
62ac5aed 2203 cmd = adpt_cmd_from_context (pHba, readl(reply+12));
1da177e4 2204 if(cmd != NULL){
67af2b06 2205 scsi_dma_unmap(cmd);
1da177e4
LT
2206 if(cmd->serial_number != 0) { // If not timedout
2207 adpt_i2o_to_scsi(reply, cmd);
2208 }
2209 }
2210 }
2211 writel(m, pHba->reply_port);
2212 wmb();
2213 rmb();
2214 }
2215 handled = 1;
2216out: if(pHba->host)
2217 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2218 return IRQ_RETVAL(handled);
2219}
2220
2221static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_device* d)
2222{
2223 int i;
2224 u32 msg[MAX_MESSAGE_SIZE];
2225 u32* mptr;
62ac5aed 2226 u32* lptr;
1da177e4
LT
2227 u32 *lenptr;
2228 int direction;
2229 int scsidir;
10803de4 2230 int nseg;
1da177e4
LT
2231 u32 len;
2232 u32 reqlen;
2233 s32 rcode;
62ac5aed 2234 dma_addr_t addr;
1da177e4
LT
2235
2236 memset(msg, 0 , sizeof(msg));
10803de4 2237 len = scsi_bufflen(cmd);
1da177e4
LT
2238 direction = 0x00000000;
2239
2240 scsidir = 0x00000000; // DATA NO XFER
2241 if(len) {
2242 /*
2243 * Set SCBFlags to indicate if data is being transferred
2244 * in or out, or no data transfer
2245 * Note: Do not have to verify index is less than 0 since
2246 * cmd->cmnd[0] is an unsigned char
2247 */
2248 switch(cmd->sc_data_direction){
2249 case DMA_FROM_DEVICE:
2250 scsidir =0x40000000; // DATA IN (iop<--dev)
2251 break;
2252 case DMA_TO_DEVICE:
2253 direction=0x04000000; // SGL OUT
2254 scsidir =0x80000000; // DATA OUT (iop-->dev)
2255 break;
2256 case DMA_NONE:
2257 break;
2258 case DMA_BIDIRECTIONAL:
2259 scsidir =0x40000000; // DATA IN (iop<--dev)
2260 // Assume In - and continue;
2261 break;
2262 default:
2263 printk(KERN_WARNING"%s: scsi opcode 0x%x not supported.\n",
2264 pHba->name, cmd->cmnd[0]);
2265 cmd->result = (DID_OK <<16) | (INITIATOR_ERROR << 8);
2266 cmd->scsi_done(cmd);
2267 return 0;
2268 }
2269 }
2270 // msg[0] is set later
2271 // I2O_CMD_SCSI_EXEC
2272 msg[1] = ((0xff<<24)|(HOST_TID<<12)|d->tid);
2273 msg[2] = 0;
62ac5aed 2274 msg[3] = adpt_cmd_to_context(cmd); /* Want SCSI control block back */
1da177e4
LT
2275 // Our cards use the transaction context as the tag for queueing
2276 // Adaptec/DPT Private stuff
2277 msg[4] = I2O_CMD_SCSI_EXEC|(DPT_ORGANIZATION_ID<<16);
2278 msg[5] = d->tid;
2279 /* Direction, disconnect ok | sense data | simple queue , CDBLen */
2280 // I2O_SCB_FLAG_ENABLE_DISCONNECT |
2281 // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
2282 // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
2283 msg[6] = scsidir|0x20a00000|cmd->cmd_len;
2284
2285 mptr=msg+7;
2286
2287 // Write SCSI command into the message - always 16 byte block
2288 memset(mptr, 0, 16);
2289 memcpy(mptr, cmd->cmnd, cmd->cmd_len);
2290 mptr+=4;
2291 lenptr=mptr++; /* Remember me - fill in when we know */
62ac5aed
MS
2292 if (dpt_dma64(pHba)) {
2293 reqlen = 16; // SINGLE SGE
2294 *mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
2295 *mptr++ = 1 << PAGE_SHIFT;
2296 } else {
2297 reqlen = 14; // SINGLE SGE
2298 }
1da177e4 2299 /* Now fill in the SGList and command */
1da177e4 2300
10803de4
FT
2301 nseg = scsi_dma_map(cmd);
2302 BUG_ON(nseg < 0);
2303 if (nseg) {
2304 struct scatterlist *sg;
1da177e4
LT
2305
2306 len = 0;
10803de4 2307 scsi_for_each_sg(cmd, sg, nseg, i) {
62ac5aed 2308 lptr = mptr;
1da177e4
LT
2309 *mptr++ = direction|0x10000000|sg_dma_len(sg);
2310 len+=sg_dma_len(sg);
62ac5aed
MS
2311 addr = sg_dma_address(sg);
2312 *mptr++ = dma_low(addr);
2313 if (dpt_dma64(pHba))
2314 *mptr++ = dma_high(addr);
10803de4
FT
2315 /* Make this an end of list */
2316 if (i == nseg - 1)
62ac5aed 2317 *lptr = direction|0xD0000000|sg_dma_len(sg);
1da177e4 2318 }
1da177e4
LT
2319 reqlen = mptr - msg;
2320 *lenptr = len;
2321
2322 if(cmd->underflow && len != cmd->underflow){
2323 printk(KERN_WARNING"Cmd len %08X Cmd underflow %08X\n",
2324 len, cmd->underflow);
2325 }
2326 } else {
10803de4
FT
2327 *lenptr = len = 0;
2328 reqlen = 12;
1da177e4
LT
2329 }
2330
2331 /* Stick the headers on */
2332 msg[0] = reqlen<<16 | ((reqlen > 12) ? SGL_OFFSET_12 : SGL_OFFSET_0);
2333
2334 // Send it on it's way
2335 rcode = adpt_i2o_post_this(pHba, msg, reqlen<<2);
2336 if (rcode == 0) {
2337 return 0;
2338 }
2339 return rcode;
2340}
2341
2342
c864cb14 2343static s32 adpt_scsi_host_alloc(adpt_hba* pHba, struct scsi_host_template *sht)
24601bbc 2344{
c864cb14 2345 struct Scsi_Host *host;
24601bbc 2346
c864cb14 2347 host = scsi_host_alloc(sht, sizeof(adpt_hba*));
24601bbc 2348 if (host == NULL) {
c864cb14 2349 printk("%s: scsi_host_alloc returned NULL\n", pHba->name);
24601bbc
AM
2350 return -1;
2351 }
2352 host->hostdata[0] = (unsigned long)pHba;
2353 pHba->host = host;
2354
2355 host->irq = pHba->pDev->irq;
2356 /* no IO ports, so don't have to set host->io_port and
2357 * host->n_io_port
2358 */
2359 host->io_port = 0;
2360 host->n_io_port = 0;
2361 /* see comments in scsi_host.h */
2362 host->max_id = 16;
2363 host->max_lun = 256;
2364 host->max_channel = pHba->top_scsi_channel + 1;
2365 host->cmd_per_lun = 1;
67af2b06 2366 host->unique_id = (u32)sys_tbl_pa + pHba->unit;
24601bbc
AM
2367 host->sg_tablesize = pHba->sg_tablesize;
2368 host->can_queue = pHba->post_fifo_size;
2369
2370 return 0;
2371}
2372
2373
1c2fb3f3 2374static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd)
1da177e4
LT
2375{
2376 adpt_hba* pHba;
2377 u32 hba_status;
2378 u32 dev_status;
2379 u32 reply_flags = readl(reply) & 0xff00; // Leave it shifted up 8 bits
2380 // I know this would look cleaner if I just read bytes
2381 // but the model I have been using for all the rest of the
2382 // io is in 4 byte words - so I keep that model
2383 u16 detailed_status = readl(reply+16) &0xffff;
2384 dev_status = (detailed_status & 0xff);
2385 hba_status = detailed_status >> 8;
2386
2387 // calculate resid for sg
df81d237 2388 scsi_set_resid(cmd, scsi_bufflen(cmd) - readl(reply+20));
1da177e4
LT
2389
2390 pHba = (adpt_hba*) cmd->device->host->hostdata[0];
2391
2392 cmd->sense_buffer[0] = '\0'; // initialize sense valid flag to false
2393
2394 if(!(reply_flags & MSG_FAIL)) {
2395 switch(detailed_status & I2O_SCSI_DSC_MASK) {
2396 case I2O_SCSI_DSC_SUCCESS:
2397 cmd->result = (DID_OK << 16);
2398 // handle underflow
df81d237 2399 if (readl(reply+20) < cmd->underflow) {
1da177e4
LT
2400 cmd->result = (DID_ERROR <<16);
2401 printk(KERN_WARNING"%s: SCSI CMD underflow\n",pHba->name);
2402 }
2403 break;
2404 case I2O_SCSI_DSC_REQUEST_ABORTED:
2405 cmd->result = (DID_ABORT << 16);
2406 break;
2407 case I2O_SCSI_DSC_PATH_INVALID:
2408 case I2O_SCSI_DSC_DEVICE_NOT_PRESENT:
2409 case I2O_SCSI_DSC_SELECTION_TIMEOUT:
2410 case I2O_SCSI_DSC_COMMAND_TIMEOUT:
2411 case I2O_SCSI_DSC_NO_ADAPTER:
2412 case I2O_SCSI_DSC_RESOURCE_UNAVAILABLE:
2413 printk(KERN_WARNING"%s: SCSI Timeout-Device (%d,%d,%d) hba status=0x%x, dev status=0x%x, cmd=0x%x\n",
2414 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun, hba_status, dev_status, cmd->cmnd[0]);
2415 cmd->result = (DID_TIME_OUT << 16);
2416 break;
2417 case I2O_SCSI_DSC_ADAPTER_BUSY:
2418 case I2O_SCSI_DSC_BUS_BUSY:
2419 cmd->result = (DID_BUS_BUSY << 16);
2420 break;
2421 case I2O_SCSI_DSC_SCSI_BUS_RESET:
2422 case I2O_SCSI_DSC_BDR_MESSAGE_SENT:
2423 cmd->result = (DID_RESET << 16);
2424 break;
2425 case I2O_SCSI_DSC_PARITY_ERROR_FAILURE:
2426 printk(KERN_WARNING"%s: SCSI CMD parity error\n",pHba->name);
2427 cmd->result = (DID_PARITY << 16);
2428 break;
2429 case I2O_SCSI_DSC_UNABLE_TO_ABORT:
2430 case I2O_SCSI_DSC_COMPLETE_WITH_ERROR:
2431 case I2O_SCSI_DSC_UNABLE_TO_TERMINATE:
2432 case I2O_SCSI_DSC_MR_MESSAGE_RECEIVED:
2433 case I2O_SCSI_DSC_AUTOSENSE_FAILED:
2434 case I2O_SCSI_DSC_DATA_OVERRUN:
2435 case I2O_SCSI_DSC_UNEXPECTED_BUS_FREE:
2436 case I2O_SCSI_DSC_SEQUENCE_FAILURE:
2437 case I2O_SCSI_DSC_REQUEST_LENGTH_ERROR:
2438 case I2O_SCSI_DSC_PROVIDE_FAILURE:
2439 case I2O_SCSI_DSC_REQUEST_TERMINATED:
2440 case I2O_SCSI_DSC_IDE_MESSAGE_SENT:
2441 case I2O_SCSI_DSC_UNACKNOWLEDGED_EVENT:
2442 case I2O_SCSI_DSC_MESSAGE_RECEIVED:
2443 case I2O_SCSI_DSC_INVALID_CDB:
2444 case I2O_SCSI_DSC_LUN_INVALID:
2445 case I2O_SCSI_DSC_SCSI_TID_INVALID:
2446 case I2O_SCSI_DSC_FUNCTION_UNAVAILABLE:
2447 case I2O_SCSI_DSC_NO_NEXUS:
2448 case I2O_SCSI_DSC_CDB_RECEIVED:
2449 case I2O_SCSI_DSC_LUN_ALREADY_ENABLED:
2450 case I2O_SCSI_DSC_QUEUE_FROZEN:
2451 case I2O_SCSI_DSC_REQUEST_INVALID:
2452 default:
2453 printk(KERN_WARNING"%s: SCSI error %0x-Device(%d,%d,%d) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2454 pHba->name, detailed_status & I2O_SCSI_DSC_MASK, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun,
2455 hba_status, dev_status, cmd->cmnd[0]);
2456 cmd->result = (DID_ERROR << 16);
2457 break;
2458 }
2459
2460 // copy over the request sense data if it was a check
2461 // condition status
d814c517 2462 if (dev_status == SAM_STAT_CHECK_CONDITION) {
b80ca4f7 2463 u32 len = min(SCSI_SENSE_BUFFERSIZE, 40);
1da177e4 2464 // Copy over the sense data
1c2fb3f3 2465 memcpy_fromio(cmd->sense_buffer, (reply+28) , len);
1da177e4
LT
2466 if(cmd->sense_buffer[0] == 0x70 /* class 7 */ &&
2467 cmd->sense_buffer[2] == DATA_PROTECT ){
2468 /* This is to handle an array failed */
2469 cmd->result = (DID_TIME_OUT << 16);
2470 printk(KERN_WARNING"%s: SCSI Data Protect-Device (%d,%d,%d) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2471 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun,
2472 hba_status, dev_status, cmd->cmnd[0]);
2473
2474 }
2475 }
2476 } else {
2477 /* In this condtion we could not talk to the tid
2478 * the card rejected it. We should signal a retry
2479 * for a limitted number of retries.
2480 */
2481 cmd->result = (DID_TIME_OUT << 16);
2482 printk(KERN_WARNING"%s: I2O MSG_FAIL - Device (%d,%d,%d) tid=%d, cmd=0x%x\n",
2483 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun,
2484 ((struct adpt_device*)(cmd->device->hostdata))->tid, cmd->cmnd[0]);
2485 }
2486
2487 cmd->result |= (dev_status);
2488
2489 if(cmd->scsi_done != NULL){
2490 cmd->scsi_done(cmd);
2491 }
2492 return cmd->result;
2493}
2494
2495
2496static s32 adpt_rescan(adpt_hba* pHba)
2497{
2498 s32 rcode;
2499 ulong flags = 0;
2500
2501 if(pHba->host)
2502 spin_lock_irqsave(pHba->host->host_lock, flags);
2503 if ((rcode=adpt_i2o_lct_get(pHba)) < 0)
2504 goto out;
2505 if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0)
2506 goto out;
2507 rcode = 0;
2508out: if(pHba->host)
2509 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2510 return rcode;
2511}
2512
2513
2514static s32 adpt_i2o_reparse_lct(adpt_hba* pHba)
2515{
2516 int i;
2517 int max;
2518 int tid;
2519 struct i2o_device *d;
2520 i2o_lct *lct = pHba->lct;
2521 u8 bus_no = 0;
2522 s16 scsi_id;
2523 s16 scsi_lun;
2524 u32 buf[10]; // at least 8 u32's
2525 struct adpt_device* pDev = NULL;
2526 struct i2o_device* pI2o_dev = NULL;
2527
2528 if (lct == NULL) {
2529 printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
2530 return -1;
2531 }
2532
2533 max = lct->table_size;
2534 max -= 3;
2535 max /= 9;
2536
2537 // Mark each drive as unscanned
2538 for (d = pHba->devices; d; d = d->next) {
2539 pDev =(struct adpt_device*) d->owner;
2540 if(!pDev){
2541 continue;
2542 }
2543 pDev->state |= DPTI_DEV_UNSCANNED;
2544 }
2545
2546 printk(KERN_INFO "%s: LCT has %d entries.\n", pHba->name,max);
2547
2548 for(i=0;i<max;i++) {
2549 if( lct->lct_entry[i].user_tid != 0xfff){
2550 continue;
2551 }
2552
2553 if( lct->lct_entry[i].class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
2554 lct->lct_entry[i].class_id == I2O_CLASS_SCSI_PERIPHERAL ||
2555 lct->lct_entry[i].class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
2556 tid = lct->lct_entry[i].tid;
2557 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
2558 printk(KERN_ERR"%s: Could not query device\n",pHba->name);
2559 continue;
2560 }
2561 bus_no = buf[0]>>16;
e84d96db
DC
2562 if (bus_no >= MAX_CHANNEL) { /* Something wrong skip it */
2563 printk(KERN_WARNING
2564 "%s: Channel number %d out of range\n",
2565 pHba->name, bus_no);
2566 continue;
2567 }
2568
1da177e4
LT
2569 scsi_id = buf[1];
2570 scsi_lun = (buf[2]>>8 )&0xff;
2571 pDev = pHba->channel[bus_no].device[scsi_id];
2572 /* da lun */
2573 while(pDev) {
2574 if(pDev->scsi_lun == scsi_lun) {
2575 break;
2576 }
2577 pDev = pDev->next_lun;
2578 }
2579 if(!pDev ) { // Something new add it
da2907ff
JL
2580 d = kmalloc(sizeof(struct i2o_device),
2581 GFP_ATOMIC);
1da177e4
LT
2582 if(d==NULL)
2583 {
2584 printk(KERN_CRIT "Out of memory for I2O device data.\n");
2585 return -ENOMEM;
2586 }
2587
1c2fb3f3 2588 d->controller = pHba;
1da177e4
LT
2589 d->next = NULL;
2590
2591 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2592
2593 d->flags = 0;
2594 adpt_i2o_report_hba_unit(pHba, d);
2595 adpt_i2o_install_device(pHba, d);
2596
1da177e4
LT
2597 pDev = pHba->channel[bus_no].device[scsi_id];
2598 if( pDev == NULL){
da2907ff
JL
2599 pDev =
2600 kzalloc(sizeof(struct adpt_device),
2601 GFP_ATOMIC);
1da177e4
LT
2602 if(pDev == NULL) {
2603 return -ENOMEM;
2604 }
2605 pHba->channel[bus_no].device[scsi_id] = pDev;
2606 } else {
2607 while (pDev->next_lun) {
2608 pDev = pDev->next_lun;
2609 }
da2907ff
JL
2610 pDev = pDev->next_lun =
2611 kzalloc(sizeof(struct adpt_device),
2612 GFP_ATOMIC);
1da177e4
LT
2613 if(pDev == NULL) {
2614 return -ENOMEM;
2615 }
2616 }
1da177e4
LT
2617 pDev->tid = d->lct_data.tid;
2618 pDev->scsi_channel = bus_no;
2619 pDev->scsi_id = scsi_id;
2620 pDev->scsi_lun = scsi_lun;
2621 pDev->pI2o_dev = d;
2622 d->owner = pDev;
2623 pDev->type = (buf[0])&0xff;
2624 pDev->flags = (buf[0]>>8)&0xff;
2625 // Too late, SCSI system has made up it's mind, but what the hey ...
2626 if(scsi_id > pHba->top_scsi_id){
2627 pHba->top_scsi_id = scsi_id;
2628 }
2629 if(scsi_lun > pHba->top_scsi_lun){
2630 pHba->top_scsi_lun = scsi_lun;
2631 }
2632 continue;
2633 } // end of new i2o device
2634
2635 // We found an old device - check it
2636 while(pDev) {
2637 if(pDev->scsi_lun == scsi_lun) {
2638 if(!scsi_device_online(pDev->pScsi_dev)) {
2639 printk(KERN_WARNING"%s: Setting device (%d,%d,%d) back online\n",
2640 pHba->name,bus_no,scsi_id,scsi_lun);
2641 if (pDev->pScsi_dev) {
2642 scsi_device_set_state(pDev->pScsi_dev, SDEV_RUNNING);
2643 }
2644 }
2645 d = pDev->pI2o_dev;
2646 if(d->lct_data.tid != tid) { // something changed
2647 pDev->tid = tid;
2648 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2649 if (pDev->pScsi_dev) {
2650 pDev->pScsi_dev->changed = TRUE;
2651 pDev->pScsi_dev->removable = TRUE;
2652 }
2653 }
2654 // Found it - mark it scanned
2655 pDev->state = DPTI_DEV_ONLINE;
2656 break;
2657 }
2658 pDev = pDev->next_lun;
2659 }
2660 }
2661 }
2662 for (pI2o_dev = pHba->devices; pI2o_dev; pI2o_dev = pI2o_dev->next) {
2663 pDev =(struct adpt_device*) pI2o_dev->owner;
2664 if(!pDev){
2665 continue;
2666 }
2667 // Drive offline drives that previously existed but could not be found
2668 // in the LCT table
2669 if (pDev->state & DPTI_DEV_UNSCANNED){
2670 pDev->state = DPTI_DEV_OFFLINE;
2671 printk(KERN_WARNING"%s: Device (%d,%d,%d) offline\n",pHba->name,pDev->scsi_channel,pDev->scsi_id,pDev->scsi_lun);
2672 if (pDev->pScsi_dev) {
2673 scsi_device_set_state(pDev->pScsi_dev, SDEV_OFFLINE);
2674 }
2675 }
2676 }
2677 return 0;
2678}
2679
2680static void adpt_fail_posted_scbs(adpt_hba* pHba)
2681{
2682 struct scsi_cmnd* cmd = NULL;
2683 struct scsi_device* d = NULL;
2684
2685 shost_for_each_device(d, pHba->host) {
2686 unsigned long flags;
2687 spin_lock_irqsave(&d->list_lock, flags);
2688 list_for_each_entry(cmd, &d->cmd_list, list) {
2689 if(cmd->serial_number == 0){
2690 continue;
2691 }
2692 cmd->result = (DID_OK << 16) | (QUEUE_FULL <<1);
2693 cmd->scsi_done(cmd);
2694 }
2695 spin_unlock_irqrestore(&d->list_lock, flags);
2696 }
2697}
2698
2699
2700/*============================================================================
2701 * Routines from i2o subsystem
2702 *============================================================================
2703 */
2704
2705
2706
2707/*
2708 * Bring an I2O controller into HOLD state. See the spec.
2709 */
2710static int adpt_i2o_activate_hba(adpt_hba* pHba)
2711{
2712 int rcode;
2713
2714 if(pHba->initialized ) {
2715 if (adpt_i2o_status_get(pHba) < 0) {
2716 if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2717 printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2718 return rcode;
2719 }
2720 if (adpt_i2o_status_get(pHba) < 0) {
2721 printk(KERN_INFO "HBA not responding.\n");
2722 return -1;
2723 }
2724 }
2725
2726 if(pHba->status_block->iop_state == ADAPTER_STATE_FAULTED) {
2727 printk(KERN_CRIT "%s: hardware fault\n", pHba->name);
2728 return -1;
2729 }
2730
2731 if (pHba->status_block->iop_state == ADAPTER_STATE_READY ||
2732 pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL ||
2733 pHba->status_block->iop_state == ADAPTER_STATE_HOLD ||
2734 pHba->status_block->iop_state == ADAPTER_STATE_FAILED) {
2735 adpt_i2o_reset_hba(pHba);
2736 if (adpt_i2o_status_get(pHba) < 0 || pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
2737 printk(KERN_ERR "%s: Failed to initialize.\n", pHba->name);
2738 return -1;
2739 }
2740 }
2741 } else {
2742 if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2743 printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2744 return rcode;
2745 }
2746
2747 }
2748
2749 if (adpt_i2o_init_outbound_q(pHba) < 0) {
2750 return -1;
2751 }
2752
2753 /* In HOLD state */
2754
2755 if (adpt_i2o_hrt_get(pHba) < 0) {
2756 return -1;
2757 }
2758
2759 return 0;
2760}
2761
2762/*
2763 * Bring a controller online into OPERATIONAL state.
2764 */
2765
2766static int adpt_i2o_online_hba(adpt_hba* pHba)
2767{
2768 if (adpt_i2o_systab_send(pHba) < 0) {
2769 adpt_i2o_delete_hba(pHba);
2770 return -1;
2771 }
2772 /* In READY state */
2773
2774 if (adpt_i2o_enable_hba(pHba) < 0) {
2775 adpt_i2o_delete_hba(pHba);
2776 return -1;
2777 }
2778
2779 /* In OPERATIONAL state */
2780 return 0;
2781}
2782
2783static s32 adpt_send_nop(adpt_hba*pHba,u32 m)
2784{
2785 u32 __iomem *msg;
2786 ulong timeout = jiffies + 5*HZ;
2787
2788 while(m == EMPTY_QUEUE){
2789 rmb();
2790 m = readl(pHba->post_port);
2791 if(m != EMPTY_QUEUE){
2792 break;
2793 }
2794 if(time_after(jiffies,timeout)){
2795 printk(KERN_ERR "%s: Timeout waiting for message frame!\n",pHba->name);
2796 return 2;
2797 }
a9a3047d 2798 schedule_timeout_uninterruptible(1);
1da177e4
LT
2799 }
2800 msg = (u32 __iomem *)(pHba->msg_addr_virt + m);
2801 writel( THREE_WORD_MSG_SIZE | SGL_OFFSET_0,&msg[0]);
2802 writel( I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | 0,&msg[1]);
2803 writel( 0,&msg[2]);
2804 wmb();
2805
2806 writel(m, pHba->post_port);
2807 wmb();
2808 return 0;
2809}
2810
2811static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
2812{
2813 u8 *status;
67af2b06 2814 dma_addr_t addr;
1da177e4
LT
2815 u32 __iomem *msg = NULL;
2816 int i;
2817 ulong timeout = jiffies + TMOUT_INITOUTBOUND*HZ;
1da177e4
LT
2818 u32 m;
2819
2820 do {
2821 rmb();
2822 m = readl(pHba->post_port);
2823 if (m != EMPTY_QUEUE) {
2824 break;
2825 }
2826
2827 if(time_after(jiffies,timeout)){
2828 printk(KERN_WARNING"%s: Timeout waiting for message frame\n",pHba->name);
2829 return -ETIMEDOUT;
2830 }
a9a3047d 2831 schedule_timeout_uninterruptible(1);
1da177e4
LT
2832 } while(m == EMPTY_QUEUE);
2833
2834 msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2835
67af2b06 2836 status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
bbfbbbc1 2837 if (!status) {
1da177e4
LT
2838 adpt_send_nop(pHba, m);
2839 printk(KERN_WARNING"%s: IOP reset failed - no free memory.\n",
2840 pHba->name);
2841 return -ENOMEM;
2842 }
67af2b06 2843 memset(status, 0, 4);
1da177e4
LT
2844
2845 writel(EIGHT_WORD_MSG_SIZE| SGL_OFFSET_6, &msg[0]);
2846 writel(I2O_CMD_OUTBOUND_INIT<<24 | HOST_TID<<12 | ADAPTER_TID, &msg[1]);
2847 writel(0, &msg[2]);
2848 writel(0x0106, &msg[3]); /* Transaction context */
2849 writel(4096, &msg[4]); /* Host page frame size */
2850 writel((REPLY_FRAME_SIZE)<<16|0x80, &msg[5]); /* Outbound msg frame size and Initcode */
2851 writel(0xD0000004, &msg[6]); /* Simple SG LE, EOB */
67af2b06 2852 writel((u32)addr, &msg[7]);
1da177e4
LT
2853
2854 writel(m, pHba->post_port);
2855 wmb();
2856
2857 // Wait for the reply status to come back
2858 do {
2859 if (*status) {
2860 if (*status != 0x01 /*I2O_EXEC_OUTBOUND_INIT_IN_PROGRESS*/) {
2861 break;
2862 }
2863 }
2864 rmb();
2865 if(time_after(jiffies,timeout)){
2866 printk(KERN_WARNING"%s: Timeout Initializing\n",pHba->name);
67af2b06
MS
2867 /* We lose 4 bytes of "status" here, but we
2868 cannot free these because controller may
2869 awake and corrupt those bytes at any time */
2870 /* dma_free_coherent(&pHba->pDev->dev, 4, status, addr); */
1da177e4
LT
2871 return -ETIMEDOUT;
2872 }
a9a3047d 2873 schedule_timeout_uninterruptible(1);
1da177e4
LT
2874 } while (1);
2875
2876 // If the command was successful, fill the fifo with our reply
2877 // message packets
2878 if(*status != 0x04 /*I2O_EXEC_OUTBOUND_INIT_COMPLETE*/) {
67af2b06 2879 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
1da177e4
LT
2880 return -2;
2881 }
67af2b06 2882 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
1da177e4 2883
67af2b06
MS
2884 if(pHba->reply_pool != NULL) {
2885 dma_free_coherent(&pHba->pDev->dev,
2886 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2887 pHba->reply_pool, pHba->reply_pool_pa);
2888 }
1da177e4 2889
67af2b06
MS
2890 pHba->reply_pool = dma_alloc_coherent(&pHba->pDev->dev,
2891 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2892 &pHba->reply_pool_pa, GFP_KERNEL);
bbfbbbc1
MK
2893 if (!pHba->reply_pool) {
2894 printk(KERN_ERR "%s: Could not allocate reply pool\n", pHba->name);
2895 return -ENOMEM;
1da177e4 2896 }
67af2b06 2897 memset(pHba->reply_pool, 0 , pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4);
1da177e4 2898
1da177e4 2899 for(i = 0; i < pHba->reply_fifo_size; i++) {
67af2b06
MS
2900 writel(pHba->reply_pool_pa + (i * REPLY_FRAME_SIZE * 4),
2901 pHba->reply_port);
1da177e4 2902 wmb();
1da177e4
LT
2903 }
2904 adpt_i2o_status_get(pHba);
2905 return 0;
2906}
2907
2908
2909/*
2910 * I2O System Table. Contains information about
2911 * all the IOPs in the system. Used to inform IOPs
2912 * about each other's existence.
2913 *
2914 * sys_tbl_ver is the CurrentChangeIndicator that is
2915 * used by IOPs to track changes.
2916 */
2917
2918
2919
2920static s32 adpt_i2o_status_get(adpt_hba* pHba)
2921{
2922 ulong timeout;
2923 u32 m;
2924 u32 __iomem *msg;
2925 u8 *status_block=NULL;
1da177e4
LT
2926
2927 if(pHba->status_block == NULL) {
67af2b06
MS
2928 pHba->status_block = dma_alloc_coherent(&pHba->pDev->dev,
2929 sizeof(i2o_status_block),
2930 &pHba->status_block_pa, GFP_KERNEL);
1da177e4
LT
2931 if(pHba->status_block == NULL) {
2932 printk(KERN_ERR
2933 "dpti%d: Get Status Block failed; Out of memory. \n",
2934 pHba->unit);
2935 return -ENOMEM;
2936 }
2937 }
2938 memset(pHba->status_block, 0, sizeof(i2o_status_block));
2939 status_block = (u8*)(pHba->status_block);
1da177e4
LT
2940 timeout = jiffies+TMOUT_GETSTATUS*HZ;
2941 do {
2942 rmb();
2943 m = readl(pHba->post_port);
2944 if (m != EMPTY_QUEUE) {
2945 break;
2946 }
2947 if(time_after(jiffies,timeout)){
2948 printk(KERN_ERR "%s: Timeout waiting for message !\n",
2949 pHba->name);
2950 return -ETIMEDOUT;
2951 }
a9a3047d 2952 schedule_timeout_uninterruptible(1);
1da177e4
LT
2953 } while(m==EMPTY_QUEUE);
2954
2955
2956 msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2957
2958 writel(NINE_WORD_MSG_SIZE|SGL_OFFSET_0, &msg[0]);
2959 writel(I2O_CMD_STATUS_GET<<24|HOST_TID<<12|ADAPTER_TID, &msg[1]);
2960 writel(1, &msg[2]);
2961 writel(0, &msg[3]);
2962 writel(0, &msg[4]);
2963 writel(0, &msg[5]);
67af2b06
MS
2964 writel( dma_low(pHba->status_block_pa), &msg[6]);
2965 writel( dma_high(pHba->status_block_pa), &msg[7]);
1da177e4
LT
2966 writel(sizeof(i2o_status_block), &msg[8]); // 88 bytes
2967
2968 //post message
2969 writel(m, pHba->post_port);
2970 wmb();
2971
2972 while(status_block[87]!=0xff){
2973 if(time_after(jiffies,timeout)){
2974 printk(KERN_ERR"dpti%d: Get status timeout.\n",
2975 pHba->unit);
2976 return -ETIMEDOUT;
2977 }
2978 rmb();
a9a3047d 2979 schedule_timeout_uninterruptible(1);
1da177e4
LT
2980 }
2981
2982 // Set up our number of outbound and inbound messages
2983 pHba->post_fifo_size = pHba->status_block->max_inbound_frames;
2984 if (pHba->post_fifo_size > MAX_TO_IOP_MESSAGES) {
2985 pHba->post_fifo_size = MAX_TO_IOP_MESSAGES;
2986 }
2987
2988 pHba->reply_fifo_size = pHba->status_block->max_outbound_frames;
2989 if (pHba->reply_fifo_size > MAX_FROM_IOP_MESSAGES) {
2990 pHba->reply_fifo_size = MAX_FROM_IOP_MESSAGES;
2991 }
2992
2993 // Calculate the Scatter Gather list size
62ac5aed
MS
2994 if (dpt_dma64(pHba)) {
2995 pHba->sg_tablesize
2996 = ((pHba->status_block->inbound_frame_size * 4
2997 - 14 * sizeof(u32))
2998 / (sizeof(struct sg_simple_element) + sizeof(u32)));
2999 } else {
3000 pHba->sg_tablesize
3001 = ((pHba->status_block->inbound_frame_size * 4
3002 - 12 * sizeof(u32))
3003 / sizeof(struct sg_simple_element));
3004 }
1da177e4
LT
3005 if (pHba->sg_tablesize > SG_LIST_ELEMENTS) {
3006 pHba->sg_tablesize = SG_LIST_ELEMENTS;
3007 }
3008
3009
3010#ifdef DEBUG
3011 printk("dpti%d: State = ",pHba->unit);
3012 switch(pHba->status_block->iop_state) {
3013 case 0x01:
3014 printk("INIT\n");
3015 break;
3016 case 0x02:
3017 printk("RESET\n");
3018 break;
3019 case 0x04:
3020 printk("HOLD\n");
3021 break;
3022 case 0x05:
3023 printk("READY\n");
3024 break;
3025 case 0x08:
3026 printk("OPERATIONAL\n");
3027 break;
3028 case 0x10:
3029 printk("FAILED\n");
3030 break;
3031 case 0x11:
3032 printk("FAULTED\n");
3033 break;
3034 default:
3035 printk("%x (unknown!!)\n",pHba->status_block->iop_state);
3036 }
3037#endif
3038 return 0;
3039}
3040
3041/*
3042 * Get the IOP's Logical Configuration Table
3043 */
3044static int adpt_i2o_lct_get(adpt_hba* pHba)
3045{
3046 u32 msg[8];
3047 int ret;
3048 u32 buf[16];
3049
3050 if ((pHba->lct_size == 0) || (pHba->lct == NULL)){
3051 pHba->lct_size = pHba->status_block->expected_lct_size;
3052 }
3053 do {
3054 if (pHba->lct == NULL) {
67af2b06
MS
3055 pHba->lct = dma_alloc_coherent(&pHba->pDev->dev,
3056 pHba->lct_size, &pHba->lct_pa,
da2907ff 3057 GFP_ATOMIC);
1da177e4
LT
3058 if(pHba->lct == NULL) {
3059 printk(KERN_CRIT "%s: Lct Get failed. Out of memory.\n",
3060 pHba->name);
3061 return -ENOMEM;
3062 }
3063 }
3064 memset(pHba->lct, 0, pHba->lct_size);
3065
3066 msg[0] = EIGHT_WORD_MSG_SIZE|SGL_OFFSET_6;
3067 msg[1] = I2O_CMD_LCT_NOTIFY<<24 | HOST_TID<<12 | ADAPTER_TID;
3068 msg[2] = 0;
3069 msg[3] = 0;
3070 msg[4] = 0xFFFFFFFF; /* All devices */
3071 msg[5] = 0x00000000; /* Report now */
3072 msg[6] = 0xD0000000|pHba->lct_size;
67af2b06 3073 msg[7] = (u32)pHba->lct_pa;
1da177e4
LT
3074
3075 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 360))) {
3076 printk(KERN_ERR "%s: LCT Get failed (status=%#10x.\n",
3077 pHba->name, ret);
3078 printk(KERN_ERR"Adaptec: Error Reading Hardware.\n");
3079 return ret;
3080 }
3081
3082 if ((pHba->lct->table_size << 2) > pHba->lct_size) {
3083 pHba->lct_size = pHba->lct->table_size << 2;
67af2b06
MS
3084 dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
3085 pHba->lct, pHba->lct_pa);
1da177e4
LT
3086 pHba->lct = NULL;
3087 }
3088 } while (pHba->lct == NULL);
3089
3090 PDEBUG("%s: Hardware resource table read.\n", pHba->name);
3091
3092
3093 // I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO;
3094 if(adpt_i2o_query_scalar(pHba, 0 , 0x8000, -1, buf, sizeof(buf))>=0) {
3095 pHba->FwDebugBufferSize = buf[1];
62ac5aed
MS
3096 pHba->FwDebugBuffer_P = ioremap(pHba->base_addr_phys + buf[0],
3097 pHba->FwDebugBufferSize);
3098 if (pHba->FwDebugBuffer_P) {
3099 pHba->FwDebugFlags_P = pHba->FwDebugBuffer_P +
3100 FW_DEBUG_FLAGS_OFFSET;
3101 pHba->FwDebugBLEDvalue_P = pHba->FwDebugBuffer_P +
3102 FW_DEBUG_BLED_OFFSET;
3103 pHba->FwDebugBLEDflag_P = pHba->FwDebugBLEDvalue_P + 1;
3104 pHba->FwDebugStrLength_P = pHba->FwDebugBuffer_P +
3105 FW_DEBUG_STR_LENGTH_OFFSET;
3106 pHba->FwDebugBuffer_P += buf[2];
3107 pHba->FwDebugFlags = 0;
3108 }
1da177e4
LT
3109 }
3110
3111 return 0;
3112}
3113
3114static int adpt_i2o_build_sys_table(void)
3115{
67af2b06 3116 adpt_hba* pHba = hba_chain;
1da177e4
LT
3117 int count = 0;
3118
67af2b06
MS
3119 if (sys_tbl)
3120 dma_free_coherent(&pHba->pDev->dev, sys_tbl_len,
3121 sys_tbl, sys_tbl_pa);
3122
1da177e4
LT
3123 sys_tbl_len = sizeof(struct i2o_sys_tbl) + // Header + IOPs
3124 (hba_count) * sizeof(struct i2o_sys_tbl_entry);
3125
67af2b06
MS
3126 sys_tbl = dma_alloc_coherent(&pHba->pDev->dev,
3127 sys_tbl_len, &sys_tbl_pa, GFP_KERNEL);
bbfbbbc1 3128 if (!sys_tbl) {
1da177e4
LT
3129 printk(KERN_WARNING "SysTab Set failed. Out of memory.\n");
3130 return -ENOMEM;
3131 }
67af2b06 3132 memset(sys_tbl, 0, sys_tbl_len);
1da177e4
LT
3133
3134 sys_tbl->num_entries = hba_count;
3135 sys_tbl->version = I2OVERSION;
3136 sys_tbl->change_ind = sys_tbl_ind++;
3137
3138 for(pHba = hba_chain; pHba; pHba = pHba->next) {
67af2b06 3139 u64 addr;
1da177e4
LT
3140 // Get updated Status Block so we have the latest information
3141 if (adpt_i2o_status_get(pHba)) {
3142 sys_tbl->num_entries--;
3143 continue; // try next one
3144 }
3145
3146 sys_tbl->iops[count].org_id = pHba->status_block->org_id;
3147 sys_tbl->iops[count].iop_id = pHba->unit + 2;
3148 sys_tbl->iops[count].seg_num = 0;
3149 sys_tbl->iops[count].i2o_version = pHba->status_block->i2o_version;
3150 sys_tbl->iops[count].iop_state = pHba->status_block->iop_state;
3151 sys_tbl->iops[count].msg_type = pHba->status_block->msg_type;
3152 sys_tbl->iops[count].frame_size = pHba->status_block->inbound_frame_size;
3153 sys_tbl->iops[count].last_changed = sys_tbl_ind - 1; // ??
3154 sys_tbl->iops[count].iop_capabilities = pHba->status_block->iop_capabilities;
67af2b06
MS
3155 addr = pHba->base_addr_phys + 0x40;
3156 sys_tbl->iops[count].inbound_low = dma_low(addr);
3157 sys_tbl->iops[count].inbound_high = dma_high(addr);
1da177e4
LT
3158
3159 count++;
3160 }
3161
3162#ifdef DEBUG
3163{
3164 u32 *table = (u32*)sys_tbl;
3165 printk(KERN_DEBUG"sys_tbl_len=%d in 32bit words\n",(sys_tbl_len >>2));
3166 for(count = 0; count < (sys_tbl_len >>2); count++) {
3167 printk(KERN_INFO "sys_tbl[%d] = %0#10x\n",
3168 count, table[count]);
3169 }
3170}
3171#endif
3172
3173 return 0;
3174}
3175
3176
3177/*
3178 * Dump the information block associated with a given unit (TID)
3179 */
3180
3181static void adpt_i2o_report_hba_unit(adpt_hba* pHba, struct i2o_device *d)
3182{
3183 char buf[64];
3184 int unit = d->lct_data.tid;
3185
3186 printk(KERN_INFO "TID %3.3d ", unit);
3187
3188 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 3, buf, 16)>=0)
3189 {
3190 buf[16]=0;
3191 printk(" Vendor: %-12.12s", buf);
3192 }
3193 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 4, buf, 16)>=0)
3194 {
3195 buf[16]=0;
3196 printk(" Device: %-12.12s", buf);
3197 }
3198 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 6, buf, 8)>=0)
3199 {
3200 buf[8]=0;
3201 printk(" Rev: %-12.12s\n", buf);
3202 }
3203#ifdef DEBUG
3204 printk(KERN_INFO "\tClass: %.21s\n", adpt_i2o_get_class_name(d->lct_data.class_id));
3205 printk(KERN_INFO "\tSubclass: 0x%04X\n", d->lct_data.sub_class);
3206 printk(KERN_INFO "\tFlags: ");
3207
3208 if(d->lct_data.device_flags&(1<<0))
3209 printk("C"); // ConfigDialog requested
3210 if(d->lct_data.device_flags&(1<<1))
3211 printk("U"); // Multi-user capable
3212 if(!(d->lct_data.device_flags&(1<<4)))
3213 printk("P"); // Peer service enabled!
3214 if(!(d->lct_data.device_flags&(1<<5)))
3215 printk("M"); // Mgmt service enabled!
3216 printk("\n");
3217#endif
3218}
3219
3220#ifdef DEBUG
3221/*
3222 * Do i2o class name lookup
3223 */
3224static const char *adpt_i2o_get_class_name(int class)
3225{
3226 int idx = 16;
3227 static char *i2o_class_name[] = {
3228 "Executive",
3229 "Device Driver Module",
3230 "Block Device",
3231 "Tape Device",
3232 "LAN Interface",
3233 "WAN Interface",
3234 "Fibre Channel Port",
3235 "Fibre Channel Device",
3236 "SCSI Device",
3237 "ATE Port",
3238 "ATE Device",
3239 "Floppy Controller",
3240 "Floppy Device",
3241 "Secondary Bus Port",
3242 "Peer Transport Agent",
3243 "Peer Transport",
3244 "Unknown"
3245 };
3246
3247 switch(class&0xFFF) {
3248 case I2O_CLASS_EXECUTIVE:
3249 idx = 0; break;
3250 case I2O_CLASS_DDM:
3251 idx = 1; break;
3252 case I2O_CLASS_RANDOM_BLOCK_STORAGE:
3253 idx = 2; break;
3254 case I2O_CLASS_SEQUENTIAL_STORAGE:
3255 idx = 3; break;
3256 case I2O_CLASS_LAN:
3257 idx = 4; break;
3258 case I2O_CLASS_WAN:
3259 idx = 5; break;
3260 case I2O_CLASS_FIBRE_CHANNEL_PORT:
3261 idx = 6; break;
3262 case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
3263 idx = 7; break;
3264 case I2O_CLASS_SCSI_PERIPHERAL:
3265 idx = 8; break;
3266 case I2O_CLASS_ATE_PORT:
3267 idx = 9; break;
3268 case I2O_CLASS_ATE_PERIPHERAL:
3269 idx = 10; break;
3270 case I2O_CLASS_FLOPPY_CONTROLLER:
3271 idx = 11; break;
3272 case I2O_CLASS_FLOPPY_DEVICE:
3273 idx = 12; break;
3274 case I2O_CLASS_BUS_ADAPTER_PORT:
3275 idx = 13; break;
3276 case I2O_CLASS_PEER_TRANSPORT_AGENT:
3277 idx = 14; break;
3278 case I2O_CLASS_PEER_TRANSPORT:
3279 idx = 15; break;
3280 }
3281 return i2o_class_name[idx];
3282}
3283#endif
3284
3285
3286static s32 adpt_i2o_hrt_get(adpt_hba* pHba)
3287{
3288 u32 msg[6];
3289 int ret, size = sizeof(i2o_hrt);
3290
3291 do {
3292 if (pHba->hrt == NULL) {
67af2b06
MS
3293 pHba->hrt = dma_alloc_coherent(&pHba->pDev->dev,
3294 size, &pHba->hrt_pa, GFP_KERNEL);
1da177e4
LT
3295 if (pHba->hrt == NULL) {
3296 printk(KERN_CRIT "%s: Hrt Get failed; Out of memory.\n", pHba->name);
3297 return -ENOMEM;
3298 }
3299 }
3300
3301 msg[0]= SIX_WORD_MSG_SIZE| SGL_OFFSET_4;
3302 msg[1]= I2O_CMD_HRT_GET<<24 | HOST_TID<<12 | ADAPTER_TID;
3303 msg[2]= 0;
3304 msg[3]= 0;
3305 msg[4]= (0xD0000000 | size); /* Simple transaction */
67af2b06 3306 msg[5]= (u32)pHba->hrt_pa; /* Dump it here */
1da177e4
LT
3307
3308 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg),20))) {
3309 printk(KERN_ERR "%s: Unable to get HRT (status=%#10x)\n", pHba->name, ret);
3310 return ret;
3311 }
3312
3313 if (pHba->hrt->num_entries * pHba->hrt->entry_len << 2 > size) {
67af2b06
MS
3314 int newsize = pHba->hrt->num_entries * pHba->hrt->entry_len << 2;
3315 dma_free_coherent(&pHba->pDev->dev, size,
3316 pHba->hrt, pHba->hrt_pa);
3317 size = newsize;
1da177e4
LT
3318 pHba->hrt = NULL;
3319 }
3320 } while(pHba->hrt == NULL);
3321 return 0;
3322}
3323
3324/*
3325 * Query one scalar group value or a whole scalar group.
3326 */
3327static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid,
3328 int group, int field, void *buf, int buflen)
3329{
3330 u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field };
67af2b06
MS
3331 u8 *opblk_va;
3332 dma_addr_t opblk_pa;
3333 u8 *resblk_va;
3334 dma_addr_t resblk_pa;
1da177e4
LT
3335
3336 int size;
3337
3338 /* 8 bytes for header */
67af2b06
MS
3339 resblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3340 sizeof(u8) * (8 + buflen), &resblk_pa, GFP_KERNEL);
3341 if (resblk_va == NULL) {
1da177e4
LT
3342 printk(KERN_CRIT "%s: query scalar failed; Out of memory.\n", pHba->name);
3343 return -ENOMEM;
3344 }
3345
67af2b06
MS
3346 opblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3347 sizeof(opblk), &opblk_pa, GFP_KERNEL);
3348 if (opblk_va == NULL) {
3349 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3350 resblk_va, resblk_pa);
3351 printk(KERN_CRIT "%s: query operatio failed; Out of memory.\n",
3352 pHba->name);
3353 return -ENOMEM;
3354 }
1da177e4
LT
3355 if (field == -1) /* whole group */
3356 opblk[4] = -1;
3357
67af2b06 3358 memcpy(opblk_va, opblk, sizeof(opblk));
1da177e4 3359 size = adpt_i2o_issue_params(I2O_CMD_UTIL_PARAMS_GET, pHba, tid,
67af2b06
MS
3360 opblk_va, opblk_pa, sizeof(opblk),
3361 resblk_va, resblk_pa, sizeof(u8)*(8+buflen));
3362 dma_free_coherent(&pHba->pDev->dev, sizeof(opblk), opblk_va, opblk_pa);
1da177e4 3363 if (size == -ETIME) {
67af2b06
MS
3364 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3365 resblk_va, resblk_pa);
1da177e4
LT
3366 printk(KERN_WARNING "%s: issue params failed; Timed out.\n", pHba->name);
3367 return -ETIME;
3368 } else if (size == -EINTR) {
67af2b06
MS
3369 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3370 resblk_va, resblk_pa);
1da177e4
LT
3371 printk(KERN_WARNING "%s: issue params failed; Interrupted.\n", pHba->name);
3372 return -EINTR;
3373 }
3374
67af2b06 3375 memcpy(buf, resblk_va+8, buflen); /* cut off header */
1da177e4 3376
67af2b06
MS
3377 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3378 resblk_va, resblk_pa);
1da177e4
LT
3379 if (size < 0)
3380 return size;
3381
3382 return buflen;
3383}
3384
3385
3386/* Issue UTIL_PARAMS_GET or UTIL_PARAMS_SET
3387 *
3388 * This function can be used for all UtilParamsGet/Set operations.
3389 * The OperationBlock is given in opblk-buffer,
3390 * and results are returned in resblk-buffer.
3391 * Note that the minimum sized resblk is 8 bytes and contains
3392 * ResultCount, ErrorInfoSize, BlockStatus and BlockSize.
3393 */
3394static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid,
67af2b06
MS
3395 void *opblk_va, dma_addr_t opblk_pa, int oplen,
3396 void *resblk_va, dma_addr_t resblk_pa, int reslen)
1da177e4
LT
3397{
3398 u32 msg[9];
67af2b06 3399 u32 *res = (u32 *)resblk_va;
1da177e4
LT
3400 int wait_status;
3401
3402 msg[0] = NINE_WORD_MSG_SIZE | SGL_OFFSET_5;
3403 msg[1] = cmd << 24 | HOST_TID << 12 | tid;
3404 msg[2] = 0;
3405 msg[3] = 0;
3406 msg[4] = 0;
3407 msg[5] = 0x54000000 | oplen; /* OperationBlock */
67af2b06 3408 msg[6] = (u32)opblk_pa;
1da177e4 3409 msg[7] = 0xD0000000 | reslen; /* ResultBlock */
67af2b06 3410 msg[8] = (u32)resblk_pa;
1da177e4
LT
3411
3412 if ((wait_status = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 20))) {
67af2b06 3413 printk("adpt_i2o_issue_params: post_wait failed (%p)\n", resblk_va);
1da177e4
LT
3414 return wait_status; /* -DetailedStatus */
3415 }
3416
3417 if (res[1]&0x00FF0000) { /* BlockStatus != SUCCESS */
3418 printk(KERN_WARNING "%s: %s - Error:\n ErrorInfoSize = 0x%02x, "
3419 "BlockStatus = 0x%02x, BlockSize = 0x%04x\n",
3420 pHba->name,
3421 (cmd == I2O_CMD_UTIL_PARAMS_SET) ? "PARAMS_SET"
3422 : "PARAMS_GET",
3423 res[1]>>24, (res[1]>>16)&0xFF, res[1]&0xFFFF);
3424 return -((res[1] >> 16) & 0xFF); /* -BlockStatus */
3425 }
3426
3427 return 4 + ((res[1] & 0x0000FFFF) << 2); /* bytes used in resblk */
3428}
3429
3430
3431static s32 adpt_i2o_quiesce_hba(adpt_hba* pHba)
3432{
3433 u32 msg[4];
3434 int ret;
3435
3436 adpt_i2o_status_get(pHba);
3437
3438 /* SysQuiesce discarded if IOP not in READY or OPERATIONAL state */
3439
3440 if((pHba->status_block->iop_state != ADAPTER_STATE_READY) &&
3441 (pHba->status_block->iop_state != ADAPTER_STATE_OPERATIONAL)){
3442 return 0;
3443 }
3444
3445 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3446 msg[1] = I2O_CMD_SYS_QUIESCE<<24|HOST_TID<<12|ADAPTER_TID;
3447 msg[2] = 0;
3448 msg[3] = 0;
3449
3450 if((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3451 printk(KERN_INFO"dpti%d: Unable to quiesce (status=%#x).\n",
3452 pHba->unit, -ret);
3453 } else {
3454 printk(KERN_INFO"dpti%d: Quiesced.\n",pHba->unit);
3455 }
3456
3457 adpt_i2o_status_get(pHba);
3458 return ret;
3459}
3460
3461
3462/*
3463 * Enable IOP. Allows the IOP to resume external operations.
3464 */
3465static int adpt_i2o_enable_hba(adpt_hba* pHba)
3466{
3467 u32 msg[4];
3468 int ret;
3469
3470 adpt_i2o_status_get(pHba);
3471 if(!pHba->status_block){
3472 return -ENOMEM;
3473 }
3474 /* Enable only allowed on READY state */
3475 if(pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL)
3476 return 0;
3477
3478 if(pHba->status_block->iop_state != ADAPTER_STATE_READY)
3479 return -EINVAL;
3480
3481 msg[0]=FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3482 msg[1]=I2O_CMD_SYS_ENABLE<<24|HOST_TID<<12|ADAPTER_TID;
3483 msg[2]= 0;
3484 msg[3]= 0;
3485
3486 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3487 printk(KERN_WARNING"%s: Could not enable (status=%#10x).\n",
3488 pHba->name, ret);
3489 } else {
3490 PDEBUG("%s: Enabled.\n", pHba->name);
3491 }
3492
3493 adpt_i2o_status_get(pHba);
3494 return ret;
3495}
3496
3497
3498static int adpt_i2o_systab_send(adpt_hba* pHba)
3499{
3500 u32 msg[12];
3501 int ret;
3502
3503 msg[0] = I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6;
3504 msg[1] = I2O_CMD_SYS_TAB_SET<<24 | HOST_TID<<12 | ADAPTER_TID;
3505 msg[2] = 0;
3506 msg[3] = 0;
3507 msg[4] = (0<<16) | ((pHba->unit+2) << 12); /* Host 0 IOP ID (unit + 2) */
3508 msg[5] = 0; /* Segment 0 */
3509
3510 /*
3511 * Provide three SGL-elements:
3512 * System table (SysTab), Private memory space declaration and
3513 * Private i/o space declaration
3514 */
3515 msg[6] = 0x54000000 | sys_tbl_len;
67af2b06 3516 msg[7] = (u32)sys_tbl_pa;
1da177e4
LT
3517 msg[8] = 0x54000000 | 0;
3518 msg[9] = 0;
3519 msg[10] = 0xD4000000 | 0;
3520 msg[11] = 0;
3521
3522 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 120))) {
3523 printk(KERN_INFO "%s: Unable to set SysTab (status=%#10x).\n",
3524 pHba->name, ret);
3525 }
3526#ifdef DEBUG
3527 else {
3528 PINFO("%s: SysTab set.\n", pHba->name);
3529 }
3530#endif
3531
3532 return ret;
3533 }
3534
3535
3536/*============================================================================
3537 *
3538 *============================================================================
3539 */
3540
3541
3542#ifdef UARTDELAY
3543
3544static static void adpt_delay(int millisec)
3545{
3546 int i;
3547 for (i = 0; i < millisec; i++) {
3548 udelay(1000); /* delay for one millisecond */
3549 }
3550}
3551
3552#endif
3553
24601bbc 3554static struct scsi_host_template driver_template = {
c864cb14 3555 .module = THIS_MODULE,
1da177e4
LT
3556 .name = "dpt_i2o",
3557 .proc_name = "dpt_i2o",
ff98f7ce 3558 .show_info = adpt_show_info,
1da177e4
LT
3559 .info = adpt_info,
3560 .queuecommand = adpt_queue,
3561 .eh_abort_handler = adpt_abort,
3562 .eh_device_reset_handler = adpt_device_reset,
3563 .eh_bus_reset_handler = adpt_bus_reset,
3564 .eh_host_reset_handler = adpt_reset,
3565 .bios_param = adpt_bios_param,
3566 .slave_configure = adpt_slave_configure,
3567 .can_queue = MAX_TO_IOP_MESSAGES,
3568 .this_id = 7,
3569 .cmd_per_lun = 1,
3570 .use_clustering = ENABLE_CLUSTERING,
3571};
c864cb14
MS
3572
3573static int __init adpt_init(void)
3574{
3575 int error;
3576 adpt_hba *pHba, *next;
3577
3578 printk("Loading Adaptec I2O RAID: Version " DPT_I2O_VERSION "\n");
3579
3580 error = adpt_detect(&driver_template);
3581 if (error < 0)
3582 return error;
3583 if (hba_chain == NULL)
3584 return -ENODEV;
3585
3586 for (pHba = hba_chain; pHba; pHba = pHba->next) {
3587 error = scsi_add_host(pHba->host, &pHba->pDev->dev);
3588 if (error)
3589 goto fail;
3590 scsi_scan_host(pHba->host);
3591 }
3592 return 0;
3593fail:
3594 for (pHba = hba_chain; pHba; pHba = next) {
3595 next = pHba->next;
3596 scsi_remove_host(pHba->host);
3597 }
3598 return error;
3599}
3600
3601static void __exit adpt_exit(void)
3602{
3603 adpt_hba *pHba, *next;
3604
3605 for (pHba = hba_chain; pHba; pHba = pHba->next)
3606 scsi_remove_host(pHba->host);
3607 for (pHba = hba_chain; pHba; pHba = next) {
3608 next = pHba->next;
3609 adpt_release(pHba->host);
3610 }
3611}
3612
3613module_init(adpt_init);
3614module_exit(adpt_exit);
3615
1da177e4 3616MODULE_LICENSE("GPL");
This page took 1.138635 seconds and 5 git commands to generate.