[PATCH] Add kernel<->userspace ABI stability documentation
[deliverable/linux.git] / drivers / block / cciss.c
CommitLineData
1da177e4
LT
1/*
2 * Disk Array driver for HP SA 5xxx and 6xxx Controllers
fb86a35b 3 * Copyright 2000, 2006 Hewlett-Packard Development Company, L.P.
1da177e4
LT
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 *
19 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
20 *
21 */
22
23#include <linux/config.h> /* CONFIG_PROC_FS */
24#include <linux/module.h>
25#include <linux/interrupt.h>
26#include <linux/types.h>
27#include <linux/pci.h>
28#include <linux/kernel.h>
29#include <linux/slab.h>
30#include <linux/delay.h>
31#include <linux/major.h>
32#include <linux/fs.h>
33#include <linux/bio.h>
34#include <linux/blkpg.h>
35#include <linux/timer.h>
36#include <linux/proc_fs.h>
37#include <linux/init.h>
38#include <linux/hdreg.h>
39#include <linux/spinlock.h>
40#include <linux/compat.h>
2056a782 41#include <linux/blktrace_api.h>
1da177e4
LT
42#include <asm/uaccess.h>
43#include <asm/io.h>
44
eb0df996 45#include <linux/dma-mapping.h>
1da177e4
LT
46#include <linux/blkdev.h>
47#include <linux/genhd.h>
48#include <linux/completion.h>
49
50#define CCISS_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin))
fb86a35b
MM
51#define DRIVER_NAME "HP CISS Driver (v 2.6.10)"
52#define DRIVER_VERSION CCISS_DRIVER_VERSION(2,6,10)
1da177e4
LT
53
54/* Embedded module documentation macros - see modules.h */
55MODULE_AUTHOR("Hewlett-Packard Company");
fb86a35b 56MODULE_DESCRIPTION("Driver for HP Controller SA5xxx SA6xxx version 2.6.10");
1da177e4 57MODULE_SUPPORTED_DEVICE("HP SA5i SA5i+ SA532 SA5300 SA5312 SA641 SA642 SA6400"
9dc7a86e 58 " SA6i P600 P800 P400 P400i E200 E200i");
1da177e4
LT
59MODULE_LICENSE("GPL");
60
61#include "cciss_cmd.h"
62#include "cciss.h"
63#include <linux/cciss_ioctl.h>
64
65/* define the PCI info for the cards we can control */
66static const struct pci_device_id cciss_pci_device_id[] = {
67 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISS,
68 0x0E11, 0x4070, 0, 0, 0},
69 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB,
70 0x0E11, 0x4080, 0, 0, 0},
71 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB,
72 0x0E11, 0x4082, 0, 0, 0},
73 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB,
74 0x0E11, 0x4083, 0, 0, 0},
75 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC,
76 0x0E11, 0x409A, 0, 0, 0},
77 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC,
78 0x0E11, 0x409B, 0, 0, 0},
79 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC,
80 0x0E11, 0x409C, 0, 0, 0},
81 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC,
82 0x0E11, 0x409D, 0, 0, 0},
83 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC,
84 0x0E11, 0x4091, 0, 0, 0},
85 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSA,
86 0x103C, 0x3225, 0, 0, 0},
9dc7a86e 87 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC,
1da177e4 88 0x103c, 0x3223, 0, 0, 0},
3de0a70b 89 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC,
9dc7a86e 90 0x103c, 0x3234, 0, 0, 0},
3de0a70b 91 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC,
9dc7a86e
MM
92 0x103c, 0x3235, 0, 0, 0},
93 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD,
94 0x103c, 0x3211, 0, 0, 0},
95 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD,
96 0x103c, 0x3212, 0, 0, 0},
97 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD,
98 0x103c, 0x3213, 0, 0, 0},
99 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD,
100 0x103c, 0x3214, 0, 0, 0},
101 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD,
102 0x103c, 0x3215, 0, 0, 0},
1da177e4
LT
103 {0,}
104};
105MODULE_DEVICE_TABLE(pci, cciss_pci_device_id);
106
945f390f 107#define NR_PRODUCTS ARRAY_SIZE(products)
1da177e4
LT
108
109/* board_id = Subsystem Device ID & Vendor ID
110 * product = Marketing Name for the board
111 * access = Address of the struct of function pointers
112 */
113static struct board_type products[] = {
114 { 0x40700E11, "Smart Array 5300", &SA5_access },
115 { 0x40800E11, "Smart Array 5i", &SA5B_access},
116 { 0x40820E11, "Smart Array 532", &SA5B_access},
117 { 0x40830E11, "Smart Array 5312", &SA5B_access},
118 { 0x409A0E11, "Smart Array 641", &SA5_access},
119 { 0x409B0E11, "Smart Array 642", &SA5_access},
120 { 0x409C0E11, "Smart Array 6400", &SA5_access},
121 { 0x409D0E11, "Smart Array 6400 EM", &SA5_access},
122 { 0x40910E11, "Smart Array 6i", &SA5_access},
123 { 0x3225103C, "Smart Array P600", &SA5_access},
124 { 0x3223103C, "Smart Array P800", &SA5_access},
9dc7a86e
MM
125 { 0x3234103C, "Smart Array P400", &SA5_access},
126 { 0x3235103C, "Smart Array P400i", &SA5_access},
127 { 0x3211103C, "Smart Array E200i", &SA5_access},
128 { 0x3212103C, "Smart Array E200", &SA5_access},
129 { 0x3213103C, "Smart Array E200i", &SA5_access},
130 { 0x3214103C, "Smart Array E200i", &SA5_access},
131 { 0x3215103C, "Smart Array E200i", &SA5_access},
1da177e4
LT
132};
133
134/* How long to wait (in millesconds) for board to go into simple mode */
135#define MAX_CONFIG_WAIT 30000
136#define MAX_IOCTL_CONFIG_WAIT 1000
137
138/*define how many times we will try a command because of bus resets */
139#define MAX_CMD_RETRIES 3
140
141#define READ_AHEAD 1024
142#define NR_CMDS 384 /* #commands that can be outstanding */
143#define MAX_CTLR 32
144
145/* Originally cciss driver only supports 8 major numbers */
146#define MAX_CTLR_ORIG 8
147
148
1da177e4
LT
149static ctlr_info_t *hba[MAX_CTLR];
150
151static void do_cciss_request(request_queue_t *q);
3da8b713 152static irqreturn_t do_cciss_intr(int irq, void *dev_id, struct pt_regs *regs);
1da177e4
LT
153static int cciss_open(struct inode *inode, struct file *filep);
154static int cciss_release(struct inode *inode, struct file *filep);
155static int cciss_ioctl(struct inode *inode, struct file *filep,
156 unsigned int cmd, unsigned long arg);
a885c8c4 157static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo);
1da177e4
LT
158
159static int revalidate_allvol(ctlr_info_t *host);
160static int cciss_revalidate(struct gendisk *disk);
ddd47442
MM
161static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk);
162static int deregister_disk(struct gendisk *disk, drive_info_struct *drv, int clear_all);
1da177e4 163
ddd47442
MM
164static void cciss_read_capacity(int ctlr, int logvol, ReadCapdata_struct *buf,
165 int withirq, unsigned int *total_size, unsigned int *block_size);
166static void cciss_geometry_inquiry(int ctlr, int logvol,
167 int withirq, unsigned int total_size,
168 unsigned int block_size, InquiryData_struct *inq_buff,
169 drive_info_struct *drv);
1da177e4 170static void cciss_getgeometry(int cntl_num);
fb86a35b 171static void __devinit cciss_interrupt_mode(ctlr_info_t *, struct pci_dev *, __u32);
1da177e4
LT
172static void start_io( ctlr_info_t *h);
173static int sendcmd( __u8 cmd, int ctlr, void *buff, size_t size,
174 unsigned int use_unit_num, unsigned int log_unit, __u8 page_code,
175 unsigned char *scsi3addr, int cmd_type);
ddd47442
MM
176static int sendcmd_withirq(__u8 cmd, int ctlr, void *buff, size_t size,
177 unsigned int use_unit_num, unsigned int log_unit, __u8 page_code,
178 int cmd_type);
1da177e4 179
33079b21
MM
180static void fail_all_cmds(unsigned long ctlr);
181
1da177e4
LT
182#ifdef CONFIG_PROC_FS
183static int cciss_proc_get_info(char *buffer, char **start, off_t offset,
184 int length, int *eof, void *data);
185static void cciss_procinit(int i);
186#else
187static void cciss_procinit(int i) {}
188#endif /* CONFIG_PROC_FS */
189
190#ifdef CONFIG_COMPAT
191static long cciss_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg);
192#endif
193
194static struct block_device_operations cciss_fops = {
195 .owner = THIS_MODULE,
196 .open = cciss_open,
197 .release = cciss_release,
198 .ioctl = cciss_ioctl,
a885c8c4 199 .getgeo = cciss_getgeo,
1da177e4
LT
200#ifdef CONFIG_COMPAT
201 .compat_ioctl = cciss_compat_ioctl,
202#endif
203 .revalidate_disk= cciss_revalidate,
204};
205
206/*
207 * Enqueuing and dequeuing functions for cmdlists.
208 */
209static inline void addQ(CommandList_struct **Qptr, CommandList_struct *c)
210{
211 if (*Qptr == NULL) {
212 *Qptr = c;
213 c->next = c->prev = c;
214 } else {
215 c->prev = (*Qptr)->prev;
216 c->next = (*Qptr);
217 (*Qptr)->prev->next = c;
218 (*Qptr)->prev = c;
219 }
220}
221
222static inline CommandList_struct *removeQ(CommandList_struct **Qptr,
223 CommandList_struct *c)
224{
225 if (c && c->next != c) {
226 if (*Qptr == c) *Qptr = c->next;
227 c->prev->next = c->next;
228 c->next->prev = c->prev;
229 } else {
230 *Qptr = NULL;
231 }
232 return c;
233}
234
235#include "cciss_scsi.c" /* For SCSI tape support */
236
237#ifdef CONFIG_PROC_FS
238
239/*
240 * Report information about this controller.
241 */
242#define ENG_GIG 1000000000
243#define ENG_GIG_FACTOR (ENG_GIG/512)
244#define RAID_UNKNOWN 6
245static const char *raid_label[] = {"0","4","1(1+0)","5","5+1","ADG",
246 "UNKNOWN"};
247
248static struct proc_dir_entry *proc_cciss;
249
250static int cciss_proc_get_info(char *buffer, char **start, off_t offset,
251 int length, int *eof, void *data)
252{
253 off_t pos = 0;
254 off_t len = 0;
255 int size, i, ctlr;
256 ctlr_info_t *h = (ctlr_info_t*)data;
257 drive_info_struct *drv;
258 unsigned long flags;
259 sector_t vol_sz, vol_sz_frac;
260
261 ctlr = h->ctlr;
262
263 /* prevent displaying bogus info during configuration
264 * or deconfiguration of a logical volume
265 */
266 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
267 if (h->busy_configuring) {
268 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
269 return -EBUSY;
270 }
271 h->busy_configuring = 1;
272 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
273
274 size = sprintf(buffer, "%s: HP %s Controller\n"
275 "Board ID: 0x%08lx\n"
276 "Firmware Version: %c%c%c%c\n"
277 "IRQ: %d\n"
278 "Logical drives: %d\n"
279 "Current Q depth: %d\n"
280 "Current # commands on controller: %d\n"
281 "Max Q depth since init: %d\n"
282 "Max # commands on controller since init: %d\n"
283 "Max SG entries since init: %d\n\n",
284 h->devname,
285 h->product_name,
286 (unsigned long)h->board_id,
287 h->firm_ver[0], h->firm_ver[1], h->firm_ver[2], h->firm_ver[3],
fb86a35b 288 (unsigned int)h->intr[SIMPLE_MODE_INT],
1da177e4
LT
289 h->num_luns,
290 h->Qdepth, h->commands_outstanding,
291 h->maxQsinceinit, h->max_outstanding, h->maxSG);
292
293 pos += size; len += size;
294 cciss_proc_tape_report(ctlr, buffer, &pos, &len);
295 for(i=0; i<=h->highest_lun; i++) {
296
297 drv = &h->drv[i];
ddd47442 298 if (drv->heads == 0)
1da177e4
LT
299 continue;
300
301 vol_sz = drv->nr_blocks;
302 vol_sz_frac = sector_div(vol_sz, ENG_GIG_FACTOR);
303 vol_sz_frac *= 100;
304 sector_div(vol_sz_frac, ENG_GIG_FACTOR);
305
306 if (drv->raid_level > 5)
307 drv->raid_level = RAID_UNKNOWN;
308 size = sprintf(buffer+len, "cciss/c%dd%d:"
309 "\t%4u.%02uGB\tRAID %s\n",
310 ctlr, i, (int)vol_sz, (int)vol_sz_frac,
311 raid_label[drv->raid_level]);
312 pos += size; len += size;
313 }
314
315 *eof = 1;
316 *start = buffer+offset;
317 len -= offset;
318 if (len>length)
319 len = length;
320 h->busy_configuring = 0;
321 return len;
322}
323
324static int
325cciss_proc_write(struct file *file, const char __user *buffer,
326 unsigned long count, void *data)
327{
328 unsigned char cmd[80];
329 int len;
330#ifdef CONFIG_CISS_SCSI_TAPE
331 ctlr_info_t *h = (ctlr_info_t *) data;
332 int rc;
333#endif
334
335 if (count > sizeof(cmd)-1) return -EINVAL;
336 if (copy_from_user(cmd, buffer, count)) return -EFAULT;
337 cmd[count] = '\0';
338 len = strlen(cmd); // above 3 lines ensure safety
339 if (len && cmd[len-1] == '\n')
340 cmd[--len] = '\0';
341# ifdef CONFIG_CISS_SCSI_TAPE
342 if (strcmp("engage scsi", cmd)==0) {
343 rc = cciss_engage_scsi(h->ctlr);
344 if (rc != 0) return -rc;
345 return count;
346 }
347 /* might be nice to have "disengage" too, but it's not
348 safely possible. (only 1 module use count, lock issues.) */
349# endif
350 return -EINVAL;
351}
352
353/*
354 * Get us a file in /proc/cciss that says something about each controller.
355 * Create /proc/cciss if it doesn't exist yet.
356 */
357static void __devinit cciss_procinit(int i)
358{
359 struct proc_dir_entry *pde;
360
361 if (proc_cciss == NULL) {
362 proc_cciss = proc_mkdir("cciss", proc_root_driver);
363 if (!proc_cciss)
364 return;
365 }
366
367 pde = create_proc_read_entry(hba[i]->devname,
368 S_IWUSR | S_IRUSR | S_IRGRP | S_IROTH,
369 proc_cciss, cciss_proc_get_info, hba[i]);
370 pde->write_proc = cciss_proc_write;
371}
372#endif /* CONFIG_PROC_FS */
373
374/*
375 * For operations that cannot sleep, a command block is allocated at init,
376 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
377 * which ones are free or in use. For operations that can wait for kmalloc
378 * to possible sleep, this routine can be called with get_from_pool set to 0.
379 * cmd_free() MUST be called with a got_from_pool set to 0 if cmd_alloc was.
380 */
381static CommandList_struct * cmd_alloc(ctlr_info_t *h, int get_from_pool)
382{
383 CommandList_struct *c;
384 int i;
385 u64bit temp64;
386 dma_addr_t cmd_dma_handle, err_dma_handle;
387
388 if (!get_from_pool)
389 {
390 c = (CommandList_struct *) pci_alloc_consistent(
391 h->pdev, sizeof(CommandList_struct), &cmd_dma_handle);
392 if(c==NULL)
393 return NULL;
394 memset(c, 0, sizeof(CommandList_struct));
395
33079b21
MM
396 c->cmdindex = -1;
397
1da177e4
LT
398 c->err_info = (ErrorInfo_struct *)pci_alloc_consistent(
399 h->pdev, sizeof(ErrorInfo_struct),
400 &err_dma_handle);
401
402 if (c->err_info == NULL)
403 {
404 pci_free_consistent(h->pdev,
405 sizeof(CommandList_struct), c, cmd_dma_handle);
406 return NULL;
407 }
408 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
409 } else /* get it out of the controllers pool */
410 {
411 do {
412 i = find_first_zero_bit(h->cmd_pool_bits, NR_CMDS);
413 if (i == NR_CMDS)
414 return NULL;
415 } while(test_and_set_bit(i & (BITS_PER_LONG - 1), h->cmd_pool_bits+(i/BITS_PER_LONG)) != 0);
416#ifdef CCISS_DEBUG
417 printk(KERN_DEBUG "cciss: using command buffer %d\n", i);
418#endif
419 c = h->cmd_pool + i;
420 memset(c, 0, sizeof(CommandList_struct));
421 cmd_dma_handle = h->cmd_pool_dhandle
422 + i*sizeof(CommandList_struct);
423 c->err_info = h->errinfo_pool + i;
424 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
425 err_dma_handle = h->errinfo_pool_dhandle
426 + i*sizeof(ErrorInfo_struct);
427 h->nr_allocs++;
33079b21
MM
428
429 c->cmdindex = i;
1da177e4
LT
430 }
431
432 c->busaddr = (__u32) cmd_dma_handle;
433 temp64.val = (__u64) err_dma_handle;
434 c->ErrDesc.Addr.lower = temp64.val32.lower;
435 c->ErrDesc.Addr.upper = temp64.val32.upper;
436 c->ErrDesc.Len = sizeof(ErrorInfo_struct);
437
438 c->ctlr = h->ctlr;
439 return c;
440
441
442}
443
444/*
445 * Frees a command block that was previously allocated with cmd_alloc().
446 */
447static void cmd_free(ctlr_info_t *h, CommandList_struct *c, int got_from_pool)
448{
449 int i;
450 u64bit temp64;
451
452 if( !got_from_pool)
453 {
454 temp64.val32.lower = c->ErrDesc.Addr.lower;
455 temp64.val32.upper = c->ErrDesc.Addr.upper;
456 pci_free_consistent(h->pdev, sizeof(ErrorInfo_struct),
457 c->err_info, (dma_addr_t) temp64.val);
458 pci_free_consistent(h->pdev, sizeof(CommandList_struct),
459 c, (dma_addr_t) c->busaddr);
460 } else
461 {
462 i = c - h->cmd_pool;
463 clear_bit(i&(BITS_PER_LONG-1), h->cmd_pool_bits+(i/BITS_PER_LONG));
464 h->nr_frees++;
465 }
466}
467
468static inline ctlr_info_t *get_host(struct gendisk *disk)
469{
470 return disk->queue->queuedata;
471}
472
473static inline drive_info_struct *get_drv(struct gendisk *disk)
474{
475 return disk->private_data;
476}
477
478/*
479 * Open. Make sure the device is really there.
480 */
481static int cciss_open(struct inode *inode, struct file *filep)
482{
483 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
484 drive_info_struct *drv = get_drv(inode->i_bdev->bd_disk);
485
486#ifdef CCISS_DEBUG
487 printk(KERN_DEBUG "cciss_open %s\n", inode->i_bdev->bd_disk->disk_name);
488#endif /* CCISS_DEBUG */
489
ddd47442
MM
490 if (host->busy_initializing || drv->busy_configuring)
491 return -EBUSY;
1da177e4
LT
492 /*
493 * Root is allowed to open raw volume zero even if it's not configured
494 * so array config can still work. Root is also allowed to open any
495 * volume that has a LUN ID, so it can issue IOCTL to reread the
496 * disk information. I don't think I really like this
497 * but I'm already using way to many device nodes to claim another one
498 * for "raw controller".
499 */
500 if (drv->nr_blocks == 0) {
501 if (iminor(inode) != 0) { /* not node 0? */
502 /* if not node 0 make sure it is a partition = 0 */
503 if (iminor(inode) & 0x0f) {
504 return -ENXIO;
505 /* if it is, make sure we have a LUN ID */
506 } else if (drv->LunID == 0) {
507 return -ENXIO;
508 }
509 }
510 if (!capable(CAP_SYS_ADMIN))
511 return -EPERM;
512 }
513 drv->usage_count++;
514 host->usage_count++;
515 return 0;
516}
517/*
518 * Close. Sync first.
519 */
520static int cciss_release(struct inode *inode, struct file *filep)
521{
522 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
523 drive_info_struct *drv = get_drv(inode->i_bdev->bd_disk);
524
525#ifdef CCISS_DEBUG
526 printk(KERN_DEBUG "cciss_release %s\n", inode->i_bdev->bd_disk->disk_name);
527#endif /* CCISS_DEBUG */
528
529 drv->usage_count--;
530 host->usage_count--;
531 return 0;
532}
533
534#ifdef CONFIG_COMPAT
535
536static int do_ioctl(struct file *f, unsigned cmd, unsigned long arg)
537{
538 int ret;
539 lock_kernel();
540 ret = cciss_ioctl(f->f_dentry->d_inode, f, cmd, arg);
541 unlock_kernel();
542 return ret;
543}
544
545static int cciss_ioctl32_passthru(struct file *f, unsigned cmd, unsigned long arg);
546static int cciss_ioctl32_big_passthru(struct file *f, unsigned cmd, unsigned long arg);
547
548static long cciss_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg)
549{
550 switch (cmd) {
551 case CCISS_GETPCIINFO:
552 case CCISS_GETINTINFO:
553 case CCISS_SETINTINFO:
554 case CCISS_GETNODENAME:
555 case CCISS_SETNODENAME:
556 case CCISS_GETHEARTBEAT:
557 case CCISS_GETBUSTYPES:
558 case CCISS_GETFIRMVER:
559 case CCISS_GETDRIVVER:
560 case CCISS_REVALIDVOLS:
561 case CCISS_DEREGDISK:
562 case CCISS_REGNEWDISK:
563 case CCISS_REGNEWD:
564 case CCISS_RESCANDISK:
565 case CCISS_GETLUNINFO:
566 return do_ioctl(f, cmd, arg);
567
568 case CCISS_PASSTHRU32:
569 return cciss_ioctl32_passthru(f, cmd, arg);
570 case CCISS_BIG_PASSTHRU32:
571 return cciss_ioctl32_big_passthru(f, cmd, arg);
572
573 default:
574 return -ENOIOCTLCMD;
575 }
576}
577
578static int cciss_ioctl32_passthru(struct file *f, unsigned cmd, unsigned long arg)
579{
580 IOCTL32_Command_struct __user *arg32 =
581 (IOCTL32_Command_struct __user *) arg;
582 IOCTL_Command_struct arg64;
583 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
584 int err;
585 u32 cp;
586
587 err = 0;
588 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, sizeof(arg64.LUN_info));
589 err |= copy_from_user(&arg64.Request, &arg32->Request, sizeof(arg64.Request));
590 err |= copy_from_user(&arg64.error_info, &arg32->error_info, sizeof(arg64.error_info));
591 err |= get_user(arg64.buf_size, &arg32->buf_size);
592 err |= get_user(cp, &arg32->buf);
593 arg64.buf = compat_ptr(cp);
594 err |= copy_to_user(p, &arg64, sizeof(arg64));
595
596 if (err)
597 return -EFAULT;
598
599 err = do_ioctl(f, CCISS_PASSTHRU, (unsigned long) p);
600 if (err)
601 return err;
602 err |= copy_in_user(&arg32->error_info, &p->error_info, sizeof(arg32->error_info));
603 if (err)
604 return -EFAULT;
605 return err;
606}
607
608static int cciss_ioctl32_big_passthru(struct file *file, unsigned cmd, unsigned long arg)
609{
610 BIG_IOCTL32_Command_struct __user *arg32 =
611 (BIG_IOCTL32_Command_struct __user *) arg;
612 BIG_IOCTL_Command_struct arg64;
613 BIG_IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
614 int err;
615 u32 cp;
616
617 err = 0;
618 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, sizeof(arg64.LUN_info));
619 err |= copy_from_user(&arg64.Request, &arg32->Request, sizeof(arg64.Request));
620 err |= copy_from_user(&arg64.error_info, &arg32->error_info, sizeof(arg64.error_info));
621 err |= get_user(arg64.buf_size, &arg32->buf_size);
622 err |= get_user(arg64.malloc_size, &arg32->malloc_size);
623 err |= get_user(cp, &arg32->buf);
624 arg64.buf = compat_ptr(cp);
625 err |= copy_to_user(p, &arg64, sizeof(arg64));
626
627 if (err)
628 return -EFAULT;
629
630 err = do_ioctl(file, CCISS_BIG_PASSTHRU, (unsigned long) p);
631 if (err)
632 return err;
633 err |= copy_in_user(&arg32->error_info, &p->error_info, sizeof(arg32->error_info));
634 if (err)
635 return -EFAULT;
636 return err;
637}
638#endif
a885c8c4
CH
639
640static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo)
641{
642 drive_info_struct *drv = get_drv(bdev->bd_disk);
643
644 if (!drv->cylinders)
645 return -ENXIO;
646
647 geo->heads = drv->heads;
648 geo->sectors = drv->sectors;
649 geo->cylinders = drv->cylinders;
650 return 0;
651}
652
1da177e4
LT
653/*
654 * ioctl
655 */
656static int cciss_ioctl(struct inode *inode, struct file *filep,
657 unsigned int cmd, unsigned long arg)
658{
659 struct block_device *bdev = inode->i_bdev;
660 struct gendisk *disk = bdev->bd_disk;
661 ctlr_info_t *host = get_host(disk);
662 drive_info_struct *drv = get_drv(disk);
663 int ctlr = host->ctlr;
664 void __user *argp = (void __user *)arg;
665
666#ifdef CCISS_DEBUG
667 printk(KERN_DEBUG "cciss_ioctl: Called with cmd=%x %lx\n", cmd, arg);
668#endif /* CCISS_DEBUG */
669
670 switch(cmd) {
1da177e4
LT
671 case CCISS_GETPCIINFO:
672 {
673 cciss_pci_info_struct pciinfo;
674
675 if (!arg) return -EINVAL;
cd6fb584 676 pciinfo.domain = pci_domain_nr(host->pdev->bus);
1da177e4
LT
677 pciinfo.bus = host->pdev->bus->number;
678 pciinfo.dev_fn = host->pdev->devfn;
679 pciinfo.board_id = host->board_id;
680 if (copy_to_user(argp, &pciinfo, sizeof( cciss_pci_info_struct )))
681 return -EFAULT;
682 return(0);
683 }
684 case CCISS_GETINTINFO:
685 {
686 cciss_coalint_struct intinfo;
687 if (!arg) return -EINVAL;
688 intinfo.delay = readl(&host->cfgtable->HostWrite.CoalIntDelay);
689 intinfo.count = readl(&host->cfgtable->HostWrite.CoalIntCount);
690 if (copy_to_user(argp, &intinfo, sizeof( cciss_coalint_struct )))
691 return -EFAULT;
692 return(0);
693 }
694 case CCISS_SETINTINFO:
695 {
696 cciss_coalint_struct intinfo;
697 unsigned long flags;
698 int i;
699
700 if (!arg) return -EINVAL;
701 if (!capable(CAP_SYS_ADMIN)) return -EPERM;
702 if (copy_from_user(&intinfo, argp, sizeof( cciss_coalint_struct)))
703 return -EFAULT;
704 if ( (intinfo.delay == 0 ) && (intinfo.count == 0))
705
706 {
707// printk("cciss_ioctl: delay and count cannot be 0\n");
708 return( -EINVAL);
709 }
710 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
711 /* Update the field, and then ring the doorbell */
712 writel( intinfo.delay,
713 &(host->cfgtable->HostWrite.CoalIntDelay));
714 writel( intinfo.count,
715 &(host->cfgtable->HostWrite.CoalIntCount));
716 writel( CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
717
718 for(i=0;i<MAX_IOCTL_CONFIG_WAIT;i++) {
719 if (!(readl(host->vaddr + SA5_DOORBELL)
720 & CFGTBL_ChangeReq))
721 break;
722 /* delay and try again */
723 udelay(1000);
724 }
725 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
726 if (i >= MAX_IOCTL_CONFIG_WAIT)
727 return -EAGAIN;
728 return(0);
729 }
730 case CCISS_GETNODENAME:
731 {
732 NodeName_type NodeName;
733 int i;
734
735 if (!arg) return -EINVAL;
736 for(i=0;i<16;i++)
737 NodeName[i] = readb(&host->cfgtable->ServerName[i]);
738 if (copy_to_user(argp, NodeName, sizeof( NodeName_type)))
739 return -EFAULT;
740 return(0);
741 }
742 case CCISS_SETNODENAME:
743 {
744 NodeName_type NodeName;
745 unsigned long flags;
746 int i;
747
748 if (!arg) return -EINVAL;
749 if (!capable(CAP_SYS_ADMIN)) return -EPERM;
750
751 if (copy_from_user(NodeName, argp, sizeof( NodeName_type)))
752 return -EFAULT;
753
754 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
755
756 /* Update the field, and then ring the doorbell */
757 for(i=0;i<16;i++)
758 writeb( NodeName[i], &host->cfgtable->ServerName[i]);
759
760 writel( CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
761
762 for(i=0;i<MAX_IOCTL_CONFIG_WAIT;i++) {
763 if (!(readl(host->vaddr + SA5_DOORBELL)
764 & CFGTBL_ChangeReq))
765 break;
766 /* delay and try again */
767 udelay(1000);
768 }
769 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
770 if (i >= MAX_IOCTL_CONFIG_WAIT)
771 return -EAGAIN;
772 return(0);
773 }
774
775 case CCISS_GETHEARTBEAT:
776 {
777 Heartbeat_type heartbeat;
778
779 if (!arg) return -EINVAL;
780 heartbeat = readl(&host->cfgtable->HeartBeat);
781 if (copy_to_user(argp, &heartbeat, sizeof( Heartbeat_type)))
782 return -EFAULT;
783 return(0);
784 }
785 case CCISS_GETBUSTYPES:
786 {
787 BusTypes_type BusTypes;
788
789 if (!arg) return -EINVAL;
790 BusTypes = readl(&host->cfgtable->BusTypes);
791 if (copy_to_user(argp, &BusTypes, sizeof( BusTypes_type) ))
792 return -EFAULT;
793 return(0);
794 }
795 case CCISS_GETFIRMVER:
796 {
797 FirmwareVer_type firmware;
798
799 if (!arg) return -EINVAL;
800 memcpy(firmware, host->firm_ver, 4);
801
802 if (copy_to_user(argp, firmware, sizeof( FirmwareVer_type)))
803 return -EFAULT;
804 return(0);
805 }
806 case CCISS_GETDRIVVER:
807 {
808 DriverVer_type DriverVer = DRIVER_VERSION;
809
810 if (!arg) return -EINVAL;
811
812 if (copy_to_user(argp, &DriverVer, sizeof( DriverVer_type) ))
813 return -EFAULT;
814 return(0);
815 }
816
817 case CCISS_REVALIDVOLS:
818 if (bdev != bdev->bd_contains || drv != host->drv)
819 return -ENXIO;
820 return revalidate_allvol(host);
821
822 case CCISS_GETLUNINFO: {
823 LogvolInfo_struct luninfo;
1da177e4
LT
824
825 luninfo.LunID = drv->LunID;
826 luninfo.num_opens = drv->usage_count;
827 luninfo.num_parts = 0;
1da177e4
LT
828 if (copy_to_user(argp, &luninfo,
829 sizeof(LogvolInfo_struct)))
830 return -EFAULT;
831 return(0);
832 }
833 case CCISS_DEREGDISK:
ddd47442 834 return rebuild_lun_table(host, disk);
1da177e4
LT
835
836 case CCISS_REGNEWD:
ddd47442 837 return rebuild_lun_table(host, NULL);
1da177e4
LT
838
839 case CCISS_PASSTHRU:
840 {
841 IOCTL_Command_struct iocommand;
842 CommandList_struct *c;
843 char *buff = NULL;
844 u64bit temp64;
845 unsigned long flags;
846 DECLARE_COMPLETION(wait);
847
848 if (!arg) return -EINVAL;
849
850 if (!capable(CAP_SYS_RAWIO)) return -EPERM;
851
852 if (copy_from_user(&iocommand, argp, sizeof( IOCTL_Command_struct) ))
853 return -EFAULT;
854 if((iocommand.buf_size < 1) &&
855 (iocommand.Request.Type.Direction != XFER_NONE))
856 {
857 return -EINVAL;
858 }
859#if 0 /* 'buf_size' member is 16-bits, and always smaller than kmalloc limit */
860 /* Check kmalloc limits */
861 if(iocommand.buf_size > 128000)
862 return -EINVAL;
863#endif
864 if(iocommand.buf_size > 0)
865 {
866 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
867 if( buff == NULL)
868 return -EFAULT;
869 }
870 if (iocommand.Request.Type.Direction == XFER_WRITE)
871 {
872 /* Copy the data into the buffer we created */
873 if (copy_from_user(buff, iocommand.buf, iocommand.buf_size))
874 {
875 kfree(buff);
876 return -EFAULT;
877 }
878 } else {
879 memset(buff, 0, iocommand.buf_size);
880 }
881 if ((c = cmd_alloc(host , 0)) == NULL)
882 {
883 kfree(buff);
884 return -ENOMEM;
885 }
886 // Fill in the command type
887 c->cmd_type = CMD_IOCTL_PEND;
888 // Fill in Command Header
889 c->Header.ReplyQueue = 0; // unused in simple mode
890 if( iocommand.buf_size > 0) // buffer to fill
891 {
892 c->Header.SGList = 1;
893 c->Header.SGTotal= 1;
894 } else // no buffers to fill
895 {
896 c->Header.SGList = 0;
897 c->Header.SGTotal= 0;
898 }
899 c->Header.LUN = iocommand.LUN_info;
900 c->Header.Tag.lower = c->busaddr; // use the kernel address the cmd block for tag
901
902 // Fill in Request block
903 c->Request = iocommand.Request;
904
905 // Fill in the scatter gather information
906 if (iocommand.buf_size > 0 )
907 {
908 temp64.val = pci_map_single( host->pdev, buff,
909 iocommand.buf_size,
910 PCI_DMA_BIDIRECTIONAL);
911 c->SG[0].Addr.lower = temp64.val32.lower;
912 c->SG[0].Addr.upper = temp64.val32.upper;
913 c->SG[0].Len = iocommand.buf_size;
914 c->SG[0].Ext = 0; // we are not chaining
915 }
916 c->waiting = &wait;
917
918 /* Put the request on the tail of the request queue */
919 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
920 addQ(&host->reqQ, c);
921 host->Qdepth++;
922 start_io(host);
923 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
924
925 wait_for_completion(&wait);
926
927 /* unlock the buffers from DMA */
928 temp64.val32.lower = c->SG[0].Addr.lower;
929 temp64.val32.upper = c->SG[0].Addr.upper;
930 pci_unmap_single( host->pdev, (dma_addr_t) temp64.val,
931 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
932
933 /* Copy the error information out */
934 iocommand.error_info = *(c->err_info);
935 if ( copy_to_user(argp, &iocommand, sizeof( IOCTL_Command_struct) ) )
936 {
937 kfree(buff);
938 cmd_free(host, c, 0);
939 return( -EFAULT);
940 }
941
942 if (iocommand.Request.Type.Direction == XFER_READ)
943 {
944 /* Copy the data out of the buffer we created */
945 if (copy_to_user(iocommand.buf, buff, iocommand.buf_size))
946 {
947 kfree(buff);
948 cmd_free(host, c, 0);
949 return -EFAULT;
950 }
951 }
952 kfree(buff);
953 cmd_free(host, c, 0);
954 return(0);
955 }
956 case CCISS_BIG_PASSTHRU: {
957 BIG_IOCTL_Command_struct *ioc;
958 CommandList_struct *c;
959 unsigned char **buff = NULL;
960 int *buff_size = NULL;
961 u64bit temp64;
962 unsigned long flags;
963 BYTE sg_used = 0;
964 int status = 0;
965 int i;
966 DECLARE_COMPLETION(wait);
967 __u32 left;
968 __u32 sz;
969 BYTE __user *data_ptr;
970
971 if (!arg)
972 return -EINVAL;
973 if (!capable(CAP_SYS_RAWIO))
974 return -EPERM;
975 ioc = (BIG_IOCTL_Command_struct *)
976 kmalloc(sizeof(*ioc), GFP_KERNEL);
977 if (!ioc) {
978 status = -ENOMEM;
979 goto cleanup1;
980 }
981 if (copy_from_user(ioc, argp, sizeof(*ioc))) {
982 status = -EFAULT;
983 goto cleanup1;
984 }
985 if ((ioc->buf_size < 1) &&
986 (ioc->Request.Type.Direction != XFER_NONE)) {
987 status = -EINVAL;
988 goto cleanup1;
989 }
990 /* Check kmalloc limits using all SGs */
991 if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
992 status = -EINVAL;
993 goto cleanup1;
994 }
995 if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) {
996 status = -EINVAL;
997 goto cleanup1;
998 }
06ff37ff 999 buff = kzalloc(MAXSGENTRIES * sizeof(char *), GFP_KERNEL);
1da177e4
LT
1000 if (!buff) {
1001 status = -ENOMEM;
1002 goto cleanup1;
1003 }
1da177e4
LT
1004 buff_size = (int *) kmalloc(MAXSGENTRIES * sizeof(int),
1005 GFP_KERNEL);
1006 if (!buff_size) {
1007 status = -ENOMEM;
1008 goto cleanup1;
1009 }
1010 left = ioc->buf_size;
1011 data_ptr = ioc->buf;
1012 while (left) {
1013 sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
1014 buff_size[sg_used] = sz;
1015 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
1016 if (buff[sg_used] == NULL) {
1017 status = -ENOMEM;
1018 goto cleanup1;
1019 }
15534d38
JA
1020 if (ioc->Request.Type.Direction == XFER_WRITE) {
1021 if (copy_from_user(buff[sg_used], data_ptr, sz)) {
1da177e4 1022 status = -ENOMEM;
15534d38
JA
1023 goto cleanup1;
1024 }
1da177e4
LT
1025 } else {
1026 memset(buff[sg_used], 0, sz);
1027 }
1028 left -= sz;
1029 data_ptr += sz;
1030 sg_used++;
1031 }
1032 if ((c = cmd_alloc(host , 0)) == NULL) {
1033 status = -ENOMEM;
1034 goto cleanup1;
1035 }
1036 c->cmd_type = CMD_IOCTL_PEND;
1037 c->Header.ReplyQueue = 0;
1038
1039 if( ioc->buf_size > 0) {
1040 c->Header.SGList = sg_used;
1041 c->Header.SGTotal= sg_used;
1042 } else {
1043 c->Header.SGList = 0;
1044 c->Header.SGTotal= 0;
1045 }
1046 c->Header.LUN = ioc->LUN_info;
1047 c->Header.Tag.lower = c->busaddr;
1048
1049 c->Request = ioc->Request;
1050 if (ioc->buf_size > 0 ) {
1051 int i;
1052 for(i=0; i<sg_used; i++) {
1053 temp64.val = pci_map_single( host->pdev, buff[i],
1054 buff_size[i],
1055 PCI_DMA_BIDIRECTIONAL);
1056 c->SG[i].Addr.lower = temp64.val32.lower;
1057 c->SG[i].Addr.upper = temp64.val32.upper;
1058 c->SG[i].Len = buff_size[i];
1059 c->SG[i].Ext = 0; /* we are not chaining */
1060 }
1061 }
1062 c->waiting = &wait;
1063 /* Put the request on the tail of the request queue */
1064 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1065 addQ(&host->reqQ, c);
1066 host->Qdepth++;
1067 start_io(host);
1068 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1069 wait_for_completion(&wait);
1070 /* unlock the buffers from DMA */
1071 for(i=0; i<sg_used; i++) {
1072 temp64.val32.lower = c->SG[i].Addr.lower;
1073 temp64.val32.upper = c->SG[i].Addr.upper;
1074 pci_unmap_single( host->pdev, (dma_addr_t) temp64.val,
1075 buff_size[i], PCI_DMA_BIDIRECTIONAL);
1076 }
1077 /* Copy the error information out */
1078 ioc->error_info = *(c->err_info);
1079 if (copy_to_user(argp, ioc, sizeof(*ioc))) {
1080 cmd_free(host, c, 0);
1081 status = -EFAULT;
1082 goto cleanup1;
1083 }
1084 if (ioc->Request.Type.Direction == XFER_READ) {
1085 /* Copy the data out of the buffer we created */
1086 BYTE __user *ptr = ioc->buf;
1087 for(i=0; i< sg_used; i++) {
1088 if (copy_to_user(ptr, buff[i], buff_size[i])) {
1089 cmd_free(host, c, 0);
1090 status = -EFAULT;
1091 goto cleanup1;
1092 }
1093 ptr += buff_size[i];
1094 }
1095 }
1096 cmd_free(host, c, 0);
1097 status = 0;
1098cleanup1:
1099 if (buff) {
1100 for(i=0; i<sg_used; i++)
6044ec88 1101 kfree(buff[i]);
1da177e4
LT
1102 kfree(buff);
1103 }
6044ec88
JJ
1104 kfree(buff_size);
1105 kfree(ioc);
1da177e4
LT
1106 return(status);
1107 }
1108 default:
1109 return -ENOTTY;
1110 }
1111
1112}
1113
1114/*
1115 * revalidate_allvol is for online array config utilities. After a
1116 * utility reconfigures the drives in the array, it can use this function
1117 * (through an ioctl) to make the driver zap any previous disk structs for
1118 * that controller and get new ones.
1119 *
1120 * Right now I'm using the getgeometry() function to do this, but this
1121 * function should probably be finer grained and allow you to revalidate one
1122 * particualar logical volume (instead of all of them on a particular
1123 * controller).
1124 */
1125static int revalidate_allvol(ctlr_info_t *host)
1126{
1127 int ctlr = host->ctlr, i;
1128 unsigned long flags;
1129
1130 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1131 if (host->usage_count > 1) {
1132 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1133 printk(KERN_WARNING "cciss: Device busy for volume"
1134 " revalidation (usage=%d)\n", host->usage_count);
1135 return -EBUSY;
1136 }
1137 host->usage_count++;
1138 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1139
1140 for(i=0; i< NWD; i++) {
1141 struct gendisk *disk = host->gendisk[i];
6f5a0f7c 1142 if (disk) {
1143 request_queue_t *q = disk->queue;
1144
1145 if (disk->flags & GENHD_FL_UP)
1146 del_gendisk(disk);
1147 if (q)
1148 blk_cleanup_queue(q);
6f5a0f7c 1149 }
1da177e4
LT
1150 }
1151
1152 /*
1153 * Set the partition and block size structures for all volumes
1154 * on this controller to zero. We will reread all of this data
1155 */
1156 memset(host->drv, 0, sizeof(drive_info_struct)
1157 * CISS_MAX_LUN);
1158 /*
1159 * Tell the array controller not to give us any interrupts while
1160 * we check the new geometry. Then turn interrupts back on when
1161 * we're done.
1162 */
1163 host->access.set_intr_mask(host, CCISS_INTR_OFF);
1164 cciss_getgeometry(ctlr);
1165 host->access.set_intr_mask(host, CCISS_INTR_ON);
1166
1167 /* Loop through each real device */
1168 for (i = 0; i < NWD; i++) {
1169 struct gendisk *disk = host->gendisk[i];
1170 drive_info_struct *drv = &(host->drv[i]);
1171 /* we must register the controller even if no disks exist */
1172 /* this is for the online array utilities */
1173 if (!drv->heads && i)
1174 continue;
ad2b9312 1175 blk_queue_hardsect_size(drv->queue, drv->block_size);
1da177e4
LT
1176 set_capacity(disk, drv->nr_blocks);
1177 add_disk(disk);
1178 }
1179 host->usage_count--;
1180 return 0;
1181}
1182
ca1e0484
MM
1183static inline void complete_buffers(struct bio *bio, int status)
1184{
1185 while (bio) {
1186 struct bio *xbh = bio->bi_next;
1187 int nr_sectors = bio_sectors(bio);
1188
1189 bio->bi_next = NULL;
1190 blk_finished_io(len);
1191 bio_endio(bio, nr_sectors << 9, status ? 0 : -EIO);
1192 bio = xbh;
1193 }
1194
1195}
1196
1197static void cciss_softirq_done(struct request *rq)
1198{
1199 CommandList_struct *cmd = rq->completion_data;
1200 ctlr_info_t *h = hba[cmd->ctlr];
1201 unsigned long flags;
1202 u64bit temp64;
1203 int i, ddir;
1204
1205 if (cmd->Request.Type.Direction == XFER_READ)
1206 ddir = PCI_DMA_FROMDEVICE;
1207 else
1208 ddir = PCI_DMA_TODEVICE;
1209
1210 /* command did not need to be retried */
1211 /* unmap the DMA mapping for all the scatter gather elements */
1212 for(i=0; i<cmd->Header.SGList; i++) {
1213 temp64.val32.lower = cmd->SG[i].Addr.lower;
1214 temp64.val32.upper = cmd->SG[i].Addr.upper;
1215 pci_unmap_page(h->pdev, temp64.val, cmd->SG[i].Len, ddir);
1216 }
1217
1218 complete_buffers(rq->bio, rq->errors);
1219
1220#ifdef CCISS_DEBUG
1221 printk("Done with %p\n", rq);
1222#endif /* CCISS_DEBUG */
1223
1224 spin_lock_irqsave(&h->lock, flags);
1225 end_that_request_last(rq, rq->errors);
1226 cmd_free(h, cmd,1);
1227 spin_unlock_irqrestore(&h->lock, flags);
1228}
1229
ddd47442
MM
1230/* This function will check the usage_count of the drive to be updated/added.
1231 * If the usage_count is zero then the drive information will be updated and
1232 * the disk will be re-registered with the kernel. If not then it will be
1233 * left alone for the next reboot. The exception to this is disk 0 which
1234 * will always be left registered with the kernel since it is also the
1235 * controller node. Any changes to disk 0 will show up on the next
1236 * reboot.
1237*/
1238static void cciss_update_drive_info(int ctlr, int drv_index)
1239 {
1240 ctlr_info_t *h = hba[ctlr];
1241 struct gendisk *disk;
1242 ReadCapdata_struct *size_buff = NULL;
1243 InquiryData_struct *inq_buff = NULL;
1244 unsigned int block_size;
1245 unsigned int total_size;
1246 unsigned long flags = 0;
1247 int ret = 0;
1248
1249 /* if the disk already exists then deregister it before proceeding*/
1250 if (h->drv[drv_index].raid_level != -1){
1251 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
1252 h->drv[drv_index].busy_configuring = 1;
1253 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1254 ret = deregister_disk(h->gendisk[drv_index],
1255 &h->drv[drv_index], 0);
1256 h->drv[drv_index].busy_configuring = 0;
1257 }
1258
1259 /* If the disk is in use return */
1260 if (ret)
1261 return;
1262
1263
1264 /* Get information about the disk and modify the driver sturcture */
1265 size_buff = kmalloc(sizeof( ReadCapdata_struct), GFP_KERNEL);
1266 if (size_buff == NULL)
1267 goto mem_msg;
1268 inq_buff = kmalloc(sizeof( InquiryData_struct), GFP_KERNEL);
1269 if (inq_buff == NULL)
1270 goto mem_msg;
1271
1272 cciss_read_capacity(ctlr, drv_index, size_buff, 1,
1273 &total_size, &block_size);
1274 cciss_geometry_inquiry(ctlr, drv_index, 1, total_size, block_size,
1275 inq_buff, &h->drv[drv_index]);
1276
1277 ++h->num_luns;
1278 disk = h->gendisk[drv_index];
1279 set_capacity(disk, h->drv[drv_index].nr_blocks);
1280
1281
1282 /* if it's the controller it's already added */
1283 if (drv_index){
1284 disk->queue = blk_init_queue(do_cciss_request, &h->lock);
1285
1286 /* Set up queue information */
1287 disk->queue->backing_dev_info.ra_pages = READ_AHEAD;
1288 blk_queue_bounce_limit(disk->queue, hba[ctlr]->pdev->dma_mask);
1289
1290 /* This is a hardware imposed limit. */
1291 blk_queue_max_hw_segments(disk->queue, MAXSGENTRIES);
1292
1293 /* This is a limit in the driver and could be eliminated. */
1294 blk_queue_max_phys_segments(disk->queue, MAXSGENTRIES);
1295
1296 blk_queue_max_sectors(disk->queue, 512);
1297
ca1e0484
MM
1298 blk_queue_softirq_done(disk->queue, cciss_softirq_done);
1299
ddd47442
MM
1300 disk->queue->queuedata = hba[ctlr];
1301
1302 blk_queue_hardsect_size(disk->queue,
1303 hba[ctlr]->drv[drv_index].block_size);
1304
1305 h->drv[drv_index].queue = disk->queue;
1306 add_disk(disk);
1307 }
1308
1309freeret:
1310 kfree(size_buff);
1311 kfree(inq_buff);
1312 return;
1313mem_msg:
1314 printk(KERN_ERR "cciss: out of memory\n");
1315 goto freeret;
1316}
1317
1318/* This function will find the first index of the controllers drive array
1319 * that has a -1 for the raid_level and will return that index. This is
1320 * where new drives will be added. If the index to be returned is greater
1321 * than the highest_lun index for the controller then highest_lun is set
1322 * to this new index. If there are no available indexes then -1 is returned.
1323*/
1324static int cciss_find_free_drive_index(int ctlr)
1325{
1326 int i;
1327
1328 for (i=0; i < CISS_MAX_LUN; i++){
1329 if (hba[ctlr]->drv[i].raid_level == -1){
1330 if (i > hba[ctlr]->highest_lun)
1331 hba[ctlr]->highest_lun = i;
1332 return i;
1333 }
1334 }
1335 return -1;
1336}
1337
1338/* This function will add and remove logical drives from the Logical
1339 * drive array of the controller and maintain persistancy of ordering
1340 * so that mount points are preserved until the next reboot. This allows
1341 * for the removal of logical drives in the middle of the drive array
1342 * without a re-ordering of those drives.
1343 * INPUT
1344 * h = The controller to perform the operations on
1345 * del_disk = The disk to remove if specified. If the value given
1346 * is NULL then no disk is removed.
1347*/
1348static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk)
1da177e4 1349{
ddd47442
MM
1350 int ctlr = h->ctlr;
1351 int num_luns;
1352 ReportLunData_struct *ld_buff = NULL;
1353 drive_info_struct *drv = NULL;
1354 int return_code;
1355 int listlength = 0;
1356 int i;
1357 int drv_found;
1358 int drv_index = 0;
1359 __u32 lunid = 0;
1da177e4 1360 unsigned long flags;
ddd47442
MM
1361
1362 /* Set busy_configuring flag for this operation */
1363 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
1364 if (h->num_luns >= CISS_MAX_LUN){
1365 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1366 return -EINVAL;
1367 }
1368
1369 if (h->busy_configuring){
1370 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1371 return -EBUSY;
1372 }
1373 h->busy_configuring = 1;
1374
1375 /* if del_disk is NULL then we are being called to add a new disk
1376 * and update the logical drive table. If it is not NULL then
1377 * we will check if the disk is in use or not.
1378 */
1379 if (del_disk != NULL){
1380 drv = get_drv(del_disk);
1381 drv->busy_configuring = 1;
1382 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1383 return_code = deregister_disk(del_disk, drv, 1);
1384 drv->busy_configuring = 0;
1385 h->busy_configuring = 0;
1386 return return_code;
1387 } else {
1388 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1389 if (!capable(CAP_SYS_RAWIO))
1390 return -EPERM;
1391
1392 ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
1393 if (ld_buff == NULL)
1394 goto mem_msg;
1395
1396 return_code = sendcmd_withirq(CISS_REPORT_LOG, ctlr, ld_buff,
1397 sizeof(ReportLunData_struct), 0, 0, 0,
1398 TYPE_CMD);
1399
1400 if (return_code == IO_OK){
1401 listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[0])) << 24;
1402 listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[1])) << 16;
1403 listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[2])) << 8;
1404 listlength |= 0xff & (unsigned int)(ld_buff->LUNListLength[3]);
1405 } else{ /* reading number of logical volumes failed */
1406 printk(KERN_WARNING "cciss: report logical volume"
1407 " command failed\n");
1408 listlength = 0;
1409 goto freeret;
1410 }
1411
1412 num_luns = listlength / 8; /* 8 bytes per entry */
1413 if (num_luns > CISS_MAX_LUN){
1414 num_luns = CISS_MAX_LUN;
1415 printk(KERN_WARNING "cciss: more luns configured"
1416 " on controller than can be handled by"
1417 " this driver.\n");
1418 }
1419
1420 /* Compare controller drive array to drivers drive array.
1421 * Check for updates in the drive information and any new drives
1422 * on the controller.
1423 */
1424 for (i=0; i < num_luns; i++){
1425 int j;
1426
1427 drv_found = 0;
1428
1429 lunid = (0xff &
1430 (unsigned int)(ld_buff->LUN[i][3])) << 24;
1431 lunid |= (0xff &
1432 (unsigned int)(ld_buff->LUN[i][2])) << 16;
1433 lunid |= (0xff &
1434 (unsigned int)(ld_buff->LUN[i][1])) << 8;
1435 lunid |= 0xff &
1436 (unsigned int)(ld_buff->LUN[i][0]);
1437
1438 /* Find if the LUN is already in the drive array
1439 * of the controller. If so then update its info
1440 * if not is use. If it does not exist then find
1441 * the first free index and add it.
1442 */
1443 for (j=0; j <= h->highest_lun; j++){
1444 if (h->drv[j].LunID == lunid){
1445 drv_index = j;
1446 drv_found = 1;
1447 }
1448 }
1449
1450 /* check if the drive was found already in the array */
1451 if (!drv_found){
1452 drv_index = cciss_find_free_drive_index(ctlr);
1453 if (drv_index == -1)
1454 goto freeret;
1455
1456 }
1457 h->drv[drv_index].LunID = lunid;
1458 cciss_update_drive_info(ctlr, drv_index);
1459 } /* end for */
1460 } /* end else */
1461
1462freeret:
1463 kfree(ld_buff);
1464 h->busy_configuring = 0;
1465 /* We return -1 here to tell the ACU that we have registered/updated
1466 * all of the drives that we can and to keep it from calling us
1467 * additional times.
1468 */
1469 return -1;
1470mem_msg:
1471 printk(KERN_ERR "cciss: out of memory\n");
1472 goto freeret;
1473}
1474
1475/* This function will deregister the disk and it's queue from the
1476 * kernel. It must be called with the controller lock held and the
1477 * drv structures busy_configuring flag set. It's parameters are:
1478 *
1479 * disk = This is the disk to be deregistered
1480 * drv = This is the drive_info_struct associated with the disk to be
1481 * deregistered. It contains information about the disk used
1482 * by the driver.
1483 * clear_all = This flag determines whether or not the disk information
1484 * is going to be completely cleared out and the highest_lun
1485 * reset. Sometimes we want to clear out information about
1486 * the disk in preperation for re-adding it. In this case
1487 * the highest_lun should be left unchanged and the LunID
1488 * should not be cleared.
1489*/
1490static int deregister_disk(struct gendisk *disk, drive_info_struct *drv,
1491 int clear_all)
1492{
1da177e4 1493 ctlr_info_t *h = get_host(disk);
1da177e4
LT
1494
1495 if (!capable(CAP_SYS_RAWIO))
1496 return -EPERM;
1497
1da177e4 1498 /* make sure logical volume is NOT is use */
ddd47442
MM
1499 if(clear_all || (h->gendisk[0] == disk)) {
1500 if (drv->usage_count > 1)
1da177e4
LT
1501 return -EBUSY;
1502 }
ddd47442
MM
1503 else
1504 if( drv->usage_count > 0 )
1505 return -EBUSY;
1da177e4 1506
ddd47442
MM
1507 /* invalidate the devices and deregister the disk. If it is disk
1508 * zero do not deregister it but just zero out it's values. This
1509 * allows us to delete disk zero but keep the controller registered.
1510 */
1511 if (h->gendisk[0] != disk){
6f5a0f7c 1512 if (disk) {
1513 request_queue_t *q = disk->queue;
1514 if (disk->flags & GENHD_FL_UP)
1515 del_gendisk(disk);
2f6331fa 1516 if (q) {
6f5a0f7c 1517 blk_cleanup_queue(q);
2f6331fa
MM
1518 drv->queue = NULL;
1519 }
ddd47442
MM
1520 }
1521 }
1522
1523 --h->num_luns;
1524 /* zero out the disk size info */
1525 drv->nr_blocks = 0;
1526 drv->block_size = 0;
1527 drv->heads = 0;
1528 drv->sectors = 0;
1529 drv->cylinders = 0;
1530 drv->raid_level = -1; /* This can be used as a flag variable to
1531 * indicate that this element of the drive
1532 * array is free.
1533 */
1534
1535 if (clear_all){
1da177e4
LT
1536 /* check to see if it was the last disk */
1537 if (drv == h->drv + h->highest_lun) {
1538 /* if so, find the new hightest lun */
1539 int i, newhighest =-1;
1540 for(i=0; i<h->highest_lun; i++) {
1541 /* if the disk has size > 0, it is available */
ddd47442 1542 if (h->drv[i].heads)
1da177e4
LT
1543 newhighest = i;
1544 }
1545 h->highest_lun = newhighest;
1da177e4 1546 }
ddd47442 1547
1da177e4 1548 drv->LunID = 0;
ddd47442 1549 }
1da177e4
LT
1550 return(0);
1551}
ddd47442 1552
1da177e4
LT
1553static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff,
1554 size_t size,
1555 unsigned int use_unit_num, /* 0: address the controller,
1556 1: address logical volume log_unit,
1557 2: periph device address is scsi3addr */
1558 unsigned int log_unit, __u8 page_code, unsigned char *scsi3addr,
1559 int cmd_type)
1560{
1561 ctlr_info_t *h= hba[ctlr];
1562 u64bit buff_dma_handle;
1563 int status = IO_OK;
1564
1565 c->cmd_type = CMD_IOCTL_PEND;
1566 c->Header.ReplyQueue = 0;
1567 if( buff != NULL) {
1568 c->Header.SGList = 1;
1569 c->Header.SGTotal= 1;
1570 } else {
1571 c->Header.SGList = 0;
1572 c->Header.SGTotal= 0;
1573 }
1574 c->Header.Tag.lower = c->busaddr;
1575
1576 c->Request.Type.Type = cmd_type;
1577 if (cmd_type == TYPE_CMD) {
1578 switch(cmd) {
1579 case CISS_INQUIRY:
1580 /* If the logical unit number is 0 then, this is going
1581 to controller so It's a physical command
1582 mode = 0 target = 0. So we have nothing to write.
1583 otherwise, if use_unit_num == 1,
1584 mode = 1(volume set addressing) target = LUNID
1585 otherwise, if use_unit_num == 2,
1586 mode = 0(periph dev addr) target = scsi3addr */
1587 if (use_unit_num == 1) {
1588 c->Header.LUN.LogDev.VolId=
1589 h->drv[log_unit].LunID;
1590 c->Header.LUN.LogDev.Mode = 1;
1591 } else if (use_unit_num == 2) {
1592 memcpy(c->Header.LUN.LunAddrBytes,scsi3addr,8);
1593 c->Header.LUN.LogDev.Mode = 0;
1594 }
1595 /* are we trying to read a vital product page */
1596 if(page_code != 0) {
1597 c->Request.CDB[1] = 0x01;
1598 c->Request.CDB[2] = page_code;
1599 }
1600 c->Request.CDBLen = 6;
1601 c->Request.Type.Attribute = ATTR_SIMPLE;
1602 c->Request.Type.Direction = XFER_READ;
1603 c->Request.Timeout = 0;
1604 c->Request.CDB[0] = CISS_INQUIRY;
1605 c->Request.CDB[4] = size & 0xFF;
1606 break;
1607 case CISS_REPORT_LOG:
1608 case CISS_REPORT_PHYS:
1609 /* Talking to controller so It's a physical command
1610 mode = 00 target = 0. Nothing to write.
1611 */
1612 c->Request.CDBLen = 12;
1613 c->Request.Type.Attribute = ATTR_SIMPLE;
1614 c->Request.Type.Direction = XFER_READ;
1615 c->Request.Timeout = 0;
1616 c->Request.CDB[0] = cmd;
1617 c->Request.CDB[6] = (size >> 24) & 0xFF; //MSB
1618 c->Request.CDB[7] = (size >> 16) & 0xFF;
1619 c->Request.CDB[8] = (size >> 8) & 0xFF;
1620 c->Request.CDB[9] = size & 0xFF;
1621 break;
1622
1623 case CCISS_READ_CAPACITY:
1624 c->Header.LUN.LogDev.VolId = h->drv[log_unit].LunID;
1625 c->Header.LUN.LogDev.Mode = 1;
1626 c->Request.CDBLen = 10;
1627 c->Request.Type.Attribute = ATTR_SIMPLE;
1628 c->Request.Type.Direction = XFER_READ;
1629 c->Request.Timeout = 0;
1630 c->Request.CDB[0] = cmd;
1631 break;
1632 case CCISS_CACHE_FLUSH:
1633 c->Request.CDBLen = 12;
1634 c->Request.Type.Attribute = ATTR_SIMPLE;
1635 c->Request.Type.Direction = XFER_WRITE;
1636 c->Request.Timeout = 0;
1637 c->Request.CDB[0] = BMIC_WRITE;
1638 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
1639 break;
1640 default:
1641 printk(KERN_WARNING
1642 "cciss%d: Unknown Command 0x%c\n", ctlr, cmd);
1643 return(IO_ERROR);
1644 }
1645 } else if (cmd_type == TYPE_MSG) {
1646 switch (cmd) {
3da8b713 1647 case 0: /* ABORT message */
1648 c->Request.CDBLen = 12;
1649 c->Request.Type.Attribute = ATTR_SIMPLE;
1650 c->Request.Type.Direction = XFER_WRITE;
1651 c->Request.Timeout = 0;
1652 c->Request.CDB[0] = cmd; /* abort */
1653 c->Request.CDB[1] = 0; /* abort a command */
1654 /* buff contains the tag of the command to abort */
1655 memcpy(&c->Request.CDB[4], buff, 8);
1656 break;
1657 case 1: /* RESET message */
1658 c->Request.CDBLen = 12;
1659 c->Request.Type.Attribute = ATTR_SIMPLE;
1660 c->Request.Type.Direction = XFER_WRITE;
1661 c->Request.Timeout = 0;
1662 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
1663 c->Request.CDB[0] = cmd; /* reset */
1664 c->Request.CDB[1] = 0x04; /* reset a LUN */
1da177e4
LT
1665 case 3: /* No-Op message */
1666 c->Request.CDBLen = 1;
1667 c->Request.Type.Attribute = ATTR_SIMPLE;
1668 c->Request.Type.Direction = XFER_WRITE;
1669 c->Request.Timeout = 0;
1670 c->Request.CDB[0] = cmd;
1671 break;
1672 default:
1673 printk(KERN_WARNING
1674 "cciss%d: unknown message type %d\n",
1675 ctlr, cmd);
1676 return IO_ERROR;
1677 }
1678 } else {
1679 printk(KERN_WARNING
1680 "cciss%d: unknown command type %d\n", ctlr, cmd_type);
1681 return IO_ERROR;
1682 }
1683 /* Fill in the scatter gather information */
1684 if (size > 0) {
1685 buff_dma_handle.val = (__u64) pci_map_single(h->pdev,
1686 buff, size, PCI_DMA_BIDIRECTIONAL);
1687 c->SG[0].Addr.lower = buff_dma_handle.val32.lower;
1688 c->SG[0].Addr.upper = buff_dma_handle.val32.upper;
1689 c->SG[0].Len = size;
1690 c->SG[0].Ext = 0; /* we are not chaining */
1691 }
1692 return status;
1693}
1694static int sendcmd_withirq(__u8 cmd,
1695 int ctlr,
1696 void *buff,
1697 size_t size,
1698 unsigned int use_unit_num,
1699 unsigned int log_unit,
1700 __u8 page_code,
1701 int cmd_type)
1702{
1703 ctlr_info_t *h = hba[ctlr];
1704 CommandList_struct *c;
1705 u64bit buff_dma_handle;
1706 unsigned long flags;
1707 int return_status;
1708 DECLARE_COMPLETION(wait);
1709
1710 if ((c = cmd_alloc(h , 0)) == NULL)
1711 return -ENOMEM;
1712 return_status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num,
1713 log_unit, page_code, NULL, cmd_type);
1714 if (return_status != IO_OK) {
1715 cmd_free(h, c, 0);
1716 return return_status;
1717 }
1718resend_cmd2:
1719 c->waiting = &wait;
1720
1721 /* Put the request on the tail of the queue and send it */
1722 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1723 addQ(&h->reqQ, c);
1724 h->Qdepth++;
1725 start_io(h);
1726 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1727
1728 wait_for_completion(&wait);
1729
1730 if(c->err_info->CommandStatus != 0)
1731 { /* an error has occurred */
1732 switch(c->err_info->CommandStatus)
1733 {
1734 case CMD_TARGET_STATUS:
1735 printk(KERN_WARNING "cciss: cmd %p has "
1736 " completed with errors\n", c);
1737 if( c->err_info->ScsiStatus)
1738 {
1739 printk(KERN_WARNING "cciss: cmd %p "
1740 "has SCSI Status = %x\n",
1741 c,
1742 c->err_info->ScsiStatus);
1743 }
1744
1745 break;
1746 case CMD_DATA_UNDERRUN:
1747 case CMD_DATA_OVERRUN:
1748 /* expected for inquire and report lun commands */
1749 break;
1750 case CMD_INVALID:
1751 printk(KERN_WARNING "cciss: Cmd %p is "
1752 "reported invalid\n", c);
1753 return_status = IO_ERROR;
1754 break;
1755 case CMD_PROTOCOL_ERR:
1756 printk(KERN_WARNING "cciss: cmd %p has "
1757 "protocol error \n", c);
1758 return_status = IO_ERROR;
1759 break;
1760case CMD_HARDWARE_ERR:
1761 printk(KERN_WARNING "cciss: cmd %p had "
1762 " hardware error\n", c);
1763 return_status = IO_ERROR;
1764 break;
1765 case CMD_CONNECTION_LOST:
1766 printk(KERN_WARNING "cciss: cmd %p had "
1767 "connection lost\n", c);
1768 return_status = IO_ERROR;
1769 break;
1770 case CMD_ABORTED:
1771 printk(KERN_WARNING "cciss: cmd %p was "
1772 "aborted\n", c);
1773 return_status = IO_ERROR;
1774 break;
1775 case CMD_ABORT_FAILED:
1776 printk(KERN_WARNING "cciss: cmd %p reports "
1777 "abort failed\n", c);
1778 return_status = IO_ERROR;
1779 break;
1780 case CMD_UNSOLICITED_ABORT:
1781 printk(KERN_WARNING
1782 "cciss%d: unsolicited abort %p\n",
1783 ctlr, c);
1784 if (c->retry_count < MAX_CMD_RETRIES) {
1785 printk(KERN_WARNING
1786 "cciss%d: retrying %p\n",
1787 ctlr, c);
1788 c->retry_count++;
1789 /* erase the old error information */
1790 memset(c->err_info, 0,
1791 sizeof(ErrorInfo_struct));
1792 return_status = IO_OK;
1793 INIT_COMPLETION(wait);
1794 goto resend_cmd2;
1795 }
1796 return_status = IO_ERROR;
1797 break;
1798 default:
1799 printk(KERN_WARNING "cciss: cmd %p returned "
1800 "unknown status %x\n", c,
1801 c->err_info->CommandStatus);
1802 return_status = IO_ERROR;
1803 }
1804 }
1805 /* unlock the buffers from DMA */
bb2a37bf
MM
1806 buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
1807 buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
1da177e4 1808 pci_unmap_single( h->pdev, (dma_addr_t) buff_dma_handle.val,
bb2a37bf 1809 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
1da177e4
LT
1810 cmd_free(h, c, 0);
1811 return(return_status);
1812
1813}
1814static void cciss_geometry_inquiry(int ctlr, int logvol,
1815 int withirq, unsigned int total_size,
1816 unsigned int block_size, InquiryData_struct *inq_buff,
1817 drive_info_struct *drv)
1818{
1819 int return_code;
1820 memset(inq_buff, 0, sizeof(InquiryData_struct));
1821 if (withirq)
1822 return_code = sendcmd_withirq(CISS_INQUIRY, ctlr,
1823 inq_buff, sizeof(*inq_buff), 1, logvol ,0xC1, TYPE_CMD);
1824 else
1825 return_code = sendcmd(CISS_INQUIRY, ctlr, inq_buff,
1826 sizeof(*inq_buff), 1, logvol ,0xC1, NULL, TYPE_CMD);
1827 if (return_code == IO_OK) {
1828 if(inq_buff->data_byte[8] == 0xFF) {
1829 printk(KERN_WARNING
1830 "cciss: reading geometry failed, volume "
1831 "does not support reading geometry\n");
1832 drv->block_size = block_size;
1833 drv->nr_blocks = total_size;
1834 drv->heads = 255;
1835 drv->sectors = 32; // Sectors per track
1836 drv->cylinders = total_size / 255 / 32;
1837 } else {
1838 unsigned int t;
1839
1840 drv->block_size = block_size;
1841 drv->nr_blocks = total_size;
1842 drv->heads = inq_buff->data_byte[6];
1843 drv->sectors = inq_buff->data_byte[7];
1844 drv->cylinders = (inq_buff->data_byte[4] & 0xff) << 8;
1845 drv->cylinders += inq_buff->data_byte[5];
1846 drv->raid_level = inq_buff->data_byte[8];
1847 t = drv->heads * drv->sectors;
1848 if (t > 1) {
1849 drv->cylinders = total_size/t;
1850 }
1851 }
1852 } else { /* Get geometry failed */
1853 printk(KERN_WARNING "cciss: reading geometry failed\n");
1854 }
1855 printk(KERN_INFO " heads= %d, sectors= %d, cylinders= %d\n\n",
1856 drv->heads, drv->sectors, drv->cylinders);
1857}
1858static void
1859cciss_read_capacity(int ctlr, int logvol, ReadCapdata_struct *buf,
1860 int withirq, unsigned int *total_size, unsigned int *block_size)
1861{
1862 int return_code;
1863 memset(buf, 0, sizeof(*buf));
1864 if (withirq)
1865 return_code = sendcmd_withirq(CCISS_READ_CAPACITY,
1866 ctlr, buf, sizeof(*buf), 1, logvol, 0, TYPE_CMD);
1867 else
1868 return_code = sendcmd(CCISS_READ_CAPACITY,
1869 ctlr, buf, sizeof(*buf), 1, logvol, 0, NULL, TYPE_CMD);
1870 if (return_code == IO_OK) {
1871 *total_size = be32_to_cpu(*((__be32 *) &buf->total_size[0]))+1;
1872 *block_size = be32_to_cpu(*((__be32 *) &buf->block_size[0]));
1873 } else { /* read capacity command failed */
1874 printk(KERN_WARNING "cciss: read capacity failed\n");
1875 *total_size = 0;
1876 *block_size = BLOCK_SIZE;
1877 }
1878 printk(KERN_INFO " blocks= %u block_size= %d\n",
1879 *total_size, *block_size);
1880 return;
1881}
1882
1da177e4
LT
1883static int cciss_revalidate(struct gendisk *disk)
1884{
1885 ctlr_info_t *h = get_host(disk);
1886 drive_info_struct *drv = get_drv(disk);
1887 int logvol;
1888 int FOUND=0;
1889 unsigned int block_size;
1890 unsigned int total_size;
1891 ReadCapdata_struct *size_buff = NULL;
1892 InquiryData_struct *inq_buff = NULL;
1893
1894 for(logvol=0; logvol < CISS_MAX_LUN; logvol++)
1895 {
1896 if(h->drv[logvol].LunID == drv->LunID) {
1897 FOUND=1;
1898 break;
1899 }
1900 }
1901
1902 if (!FOUND) return 1;
1903
1904 size_buff = kmalloc(sizeof( ReadCapdata_struct), GFP_KERNEL);
1905 if (size_buff == NULL)
1906 {
1907 printk(KERN_WARNING "cciss: out of memory\n");
1908 return 1;
1909 }
1910 inq_buff = kmalloc(sizeof( InquiryData_struct), GFP_KERNEL);
1911 if (inq_buff == NULL)
1912 {
1913 printk(KERN_WARNING "cciss: out of memory\n");
1914 kfree(size_buff);
1915 return 1;
1916 }
1917
1918 cciss_read_capacity(h->ctlr, logvol, size_buff, 1, &total_size, &block_size);
1919 cciss_geometry_inquiry(h->ctlr, logvol, 1, total_size, block_size, inq_buff, drv);
1920
ad2b9312 1921 blk_queue_hardsect_size(drv->queue, drv->block_size);
1da177e4
LT
1922 set_capacity(disk, drv->nr_blocks);
1923
1924 kfree(size_buff);
1925 kfree(inq_buff);
1926 return 0;
1927}
1928
1929/*
1930 * Wait polling for a command to complete.
1931 * The memory mapped FIFO is polled for the completion.
1932 * Used only at init time, interrupts from the HBA are disabled.
1933 */
1934static unsigned long pollcomplete(int ctlr)
1935{
1936 unsigned long done;
1937 int i;
1938
1939 /* Wait (up to 20 seconds) for a command to complete */
1940
1941 for (i = 20 * HZ; i > 0; i--) {
1942 done = hba[ctlr]->access.command_completed(hba[ctlr]);
86e84862
NA
1943 if (done == FIFO_EMPTY)
1944 schedule_timeout_uninterruptible(1);
1945 else
1da177e4
LT
1946 return (done);
1947 }
1948 /* Invalid address to tell caller we ran out of time */
1949 return 1;
1950}
3da8b713 1951
1952static int add_sendcmd_reject(__u8 cmd, int ctlr, unsigned long complete)
1953{
1954 /* We get in here if sendcmd() is polling for completions
1955 and gets some command back that it wasn't expecting --
1956 something other than that which it just sent down.
1957 Ordinarily, that shouldn't happen, but it can happen when
1958 the scsi tape stuff gets into error handling mode, and
1959 starts using sendcmd() to try to abort commands and
1960 reset tape drives. In that case, sendcmd may pick up
1961 completions of commands that were sent to logical drives
1962 through the block i/o system, or cciss ioctls completing, etc.
1963 In that case, we need to save those completions for later
1964 processing by the interrupt handler.
1965 */
1966
1967#ifdef CONFIG_CISS_SCSI_TAPE
1968 struct sendcmd_reject_list *srl = &hba[ctlr]->scsi_rejects;
1969
1970 /* If it's not the scsi tape stuff doing error handling, (abort */
1971 /* or reset) then we don't expect anything weird. */
1972 if (cmd != CCISS_RESET_MSG && cmd != CCISS_ABORT_MSG) {
1973#endif
1974 printk( KERN_WARNING "cciss cciss%d: SendCmd "
1975 "Invalid command list address returned! (%lx)\n",
1976 ctlr, complete);
1977 /* not much we can do. */
1978#ifdef CONFIG_CISS_SCSI_TAPE
1979 return 1;
1980 }
1981
1982 /* We've sent down an abort or reset, but something else
1983 has completed */
1984 if (srl->ncompletions >= (NR_CMDS + 2)) {
1985 /* Uh oh. No room to save it for later... */
1986 printk(KERN_WARNING "cciss%d: Sendcmd: Invalid command addr, "
1987 "reject list overflow, command lost!\n", ctlr);
1988 return 1;
1989 }
1990 /* Save it for later */
1991 srl->complete[srl->ncompletions] = complete;
1992 srl->ncompletions++;
1993#endif
1994 return 0;
1995}
1996
1da177e4
LT
1997/*
1998 * Send a command to the controller, and wait for it to complete.
1999 * Only used at init time.
2000 */
2001static int sendcmd(
2002 __u8 cmd,
2003 int ctlr,
2004 void *buff,
2005 size_t size,
2006 unsigned int use_unit_num, /* 0: address the controller,
2007 1: address logical volume log_unit,
2008 2: periph device address is scsi3addr */
2009 unsigned int log_unit,
2010 __u8 page_code,
2011 unsigned char *scsi3addr,
2012 int cmd_type)
2013{
2014 CommandList_struct *c;
2015 int i;
2016 unsigned long complete;
2017 ctlr_info_t *info_p= hba[ctlr];
2018 u64bit buff_dma_handle;
3da8b713 2019 int status, done = 0;
1da177e4
LT
2020
2021 if ((c = cmd_alloc(info_p, 1)) == NULL) {
2022 printk(KERN_WARNING "cciss: unable to get memory");
2023 return(IO_ERROR);
2024 }
2025 status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num,
2026 log_unit, page_code, scsi3addr, cmd_type);
2027 if (status != IO_OK) {
2028 cmd_free(info_p, c, 1);
2029 return status;
2030 }
2031resend_cmd1:
2032 /*
2033 * Disable interrupt
2034 */
2035#ifdef CCISS_DEBUG
2036 printk(KERN_DEBUG "cciss: turning intr off\n");
2037#endif /* CCISS_DEBUG */
2038 info_p->access.set_intr_mask(info_p, CCISS_INTR_OFF);
2039
2040 /* Make sure there is room in the command FIFO */
3da8b713 2041 /* Actually it should be completely empty at this time */
2042 /* unless we are in here doing error handling for the scsi */
2043 /* tape side of the driver. */
1da177e4
LT
2044 for (i = 200000; i > 0; i--)
2045 {
2046 /* if fifo isn't full go */
2047 if (!(info_p->access.fifo_full(info_p)))
2048 {
2049
2050 break;
2051 }
2052 udelay(10);
2053 printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full,"
2054 " waiting!\n", ctlr);
2055 }
2056 /*
2057 * Send the cmd
2058 */
2059 info_p->access.submit_command(info_p, c);
3da8b713 2060 done = 0;
2061 do {
2062 complete = pollcomplete(ctlr);
1da177e4
LT
2063
2064#ifdef CCISS_DEBUG
3da8b713 2065 printk(KERN_DEBUG "cciss: command completed\n");
1da177e4
LT
2066#endif /* CCISS_DEBUG */
2067
3da8b713 2068 if (complete == 1) {
2069 printk( KERN_WARNING
2070 "cciss cciss%d: SendCmd Timeout out, "
2071 "No command list address returned!\n",
2072 ctlr);
2073 status = IO_ERROR;
2074 done = 1;
2075 break;
2076 }
2077
2078 /* This will need to change for direct lookup completions */
1da177e4
LT
2079 if ( (complete & CISS_ERROR_BIT)
2080 && (complete & ~CISS_ERROR_BIT) == c->busaddr)
2081 {
2082 /* if data overrun or underun on Report command
2083 ignore it
2084 */
2085 if (((c->Request.CDB[0] == CISS_REPORT_LOG) ||
2086 (c->Request.CDB[0] == CISS_REPORT_PHYS) ||
2087 (c->Request.CDB[0] == CISS_INQUIRY)) &&
2088 ((c->err_info->CommandStatus ==
2089 CMD_DATA_OVERRUN) ||
2090 (c->err_info->CommandStatus ==
2091 CMD_DATA_UNDERRUN)
2092 ))
2093 {
2094 complete = c->busaddr;
2095 } else {
2096 if (c->err_info->CommandStatus ==
2097 CMD_UNSOLICITED_ABORT) {
2098 printk(KERN_WARNING "cciss%d: "
2099 "unsolicited abort %p\n",
2100 ctlr, c);
2101 if (c->retry_count < MAX_CMD_RETRIES) {
2102 printk(KERN_WARNING
2103 "cciss%d: retrying %p\n",
2104 ctlr, c);
2105 c->retry_count++;
2106 /* erase the old error */
2107 /* information */
2108 memset(c->err_info, 0,
2109 sizeof(ErrorInfo_struct));
2110 goto resend_cmd1;
2111 } else {
2112 printk(KERN_WARNING
2113 "cciss%d: retried %p too "
2114 "many times\n", ctlr, c);
2115 status = IO_ERROR;
2116 goto cleanup1;
2117 }
3da8b713 2118 } else if (c->err_info->CommandStatus == CMD_UNABORTABLE) {
2119 printk(KERN_WARNING "cciss%d: command could not be aborted.\n", ctlr);
2120 status = IO_ERROR;
2121 goto cleanup1;
1da177e4
LT
2122 }
2123 printk(KERN_WARNING "ciss ciss%d: sendcmd"
2124 " Error %x \n", ctlr,
2125 c->err_info->CommandStatus);
2126 printk(KERN_WARNING "ciss ciss%d: sendcmd"
2127 " offensive info\n"
2128 " size %x\n num %x value %x\n", ctlr,
2129 c->err_info->MoreErrInfo.Invalid_Cmd.offense_size,
2130 c->err_info->MoreErrInfo.Invalid_Cmd.offense_num,
2131 c->err_info->MoreErrInfo.Invalid_Cmd.offense_value);
2132 status = IO_ERROR;
2133 goto cleanup1;
2134 }
2135 }
3da8b713 2136 /* This will need changing for direct lookup completions */
1da177e4 2137 if (complete != c->busaddr) {
3da8b713 2138 if (add_sendcmd_reject(cmd, ctlr, complete) != 0) {
2139 BUG(); /* we are pretty much hosed if we get here. */
2140 }
2141 continue;
2142 } else
2143 done = 1;
2144 } while (!done);
1da177e4
LT
2145
2146cleanup1:
2147 /* unlock the data buffer from DMA */
bb2a37bf
MM
2148 buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
2149 buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
1da177e4 2150 pci_unmap_single(info_p->pdev, (dma_addr_t) buff_dma_handle.val,
bb2a37bf 2151 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
3da8b713 2152#ifdef CONFIG_CISS_SCSI_TAPE
2153 /* if we saved some commands for later, process them now. */
2154 if (info_p->scsi_rejects.ncompletions > 0)
2155 do_cciss_intr(0, info_p, NULL);
2156#endif
1da177e4
LT
2157 cmd_free(info_p, c, 1);
2158 return (status);
2159}
2160/*
2161 * Map (physical) PCI mem into (virtual) kernel space
2162 */
2163static void __iomem *remap_pci_mem(ulong base, ulong size)
2164{
2165 ulong page_base = ((ulong) base) & PAGE_MASK;
2166 ulong page_offs = ((ulong) base) - page_base;
2167 void __iomem *page_remapped = ioremap(page_base, page_offs+size);
2168
2169 return page_remapped ? (page_remapped + page_offs) : NULL;
2170}
2171
2172/*
2173 * Takes jobs of the Q and sends them to the hardware, then puts it on
2174 * the Q to wait for completion.
2175 */
2176static void start_io( ctlr_info_t *h)
2177{
2178 CommandList_struct *c;
2179
2180 while(( c = h->reqQ) != NULL )
2181 {
2182 /* can't do anything if fifo is full */
2183 if ((h->access.fifo_full(h))) {
2184 printk(KERN_WARNING "cciss: fifo full\n");
2185 break;
2186 }
2187
80682fa9 2188 /* Get the first entry from the Request Q */
1da177e4
LT
2189 removeQ(&(h->reqQ), c);
2190 h->Qdepth--;
2191
2192 /* Tell the controller execute command */
2193 h->access.submit_command(h, c);
2194
2195 /* Put job onto the completed Q */
2196 addQ (&(h->cmpQ), c);
2197 }
2198}
1da177e4
LT
2199/* Assumes that CCISS_LOCK(h->ctlr) is held. */
2200/* Zeros out the error record and then resends the command back */
2201/* to the controller */
2202static inline void resend_cciss_cmd( ctlr_info_t *h, CommandList_struct *c)
2203{
2204 /* erase the old error information */
2205 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
2206
2207 /* add it to software queue and then send it to the controller */
2208 addQ(&(h->reqQ),c);
2209 h->Qdepth++;
2210 if(h->Qdepth > h->maxQsinceinit)
2211 h->maxQsinceinit = h->Qdepth;
2212
2213 start_io(h);
2214}
a9925a06 2215
1da177e4 2216/* checks the status of the job and calls complete buffers to mark all
a9925a06
JA
2217 * buffers for the completed job. Note that this function does not need
2218 * to hold the hba/queue lock.
1da177e4
LT
2219 */
2220static inline void complete_command( ctlr_info_t *h, CommandList_struct *cmd,
2221 int timeout)
2222{
2223 int status = 1;
1da177e4 2224 int retry_cmd = 0;
1da177e4
LT
2225
2226 if (timeout)
2227 status = 0;
2228
2229 if(cmd->err_info->CommandStatus != 0)
2230 { /* an error has occurred */
2231 switch(cmd->err_info->CommandStatus)
2232 {
2233 unsigned char sense_key;
2234 case CMD_TARGET_STATUS:
2235 status = 0;
2236
2237 if( cmd->err_info->ScsiStatus == 0x02)
2238 {
2239 printk(KERN_WARNING "cciss: cmd %p "
2240 "has CHECK CONDITION "
2241 " byte 2 = 0x%x\n", cmd,
2242 cmd->err_info->SenseInfo[2]
2243 );
2244 /* check the sense key */
2245 sense_key = 0xf &
2246 cmd->err_info->SenseInfo[2];
2247 /* no status or recovered error */
2248 if((sense_key == 0x0) ||
2249 (sense_key == 0x1))
2250 {
2251 status = 1;
2252 }
2253 } else
2254 {
2255 printk(KERN_WARNING "cciss: cmd %p "
2256 "has SCSI Status 0x%x\n",
2257 cmd, cmd->err_info->ScsiStatus);
2258 }
2259 break;
2260 case CMD_DATA_UNDERRUN:
2261 printk(KERN_WARNING "cciss: cmd %p has"
2262 " completed with data underrun "
2263 "reported\n", cmd);
2264 break;
2265 case CMD_DATA_OVERRUN:
2266 printk(KERN_WARNING "cciss: cmd %p has"
2267 " completed with data overrun "
2268 "reported\n", cmd);
2269 break;
2270 case CMD_INVALID:
2271 printk(KERN_WARNING "cciss: cmd %p is "
2272 "reported invalid\n", cmd);
2273 status = 0;
2274 break;
2275 case CMD_PROTOCOL_ERR:
2276 printk(KERN_WARNING "cciss: cmd %p has "
2277 "protocol error \n", cmd);
2278 status = 0;
2279 break;
2280 case CMD_HARDWARE_ERR:
2281 printk(KERN_WARNING "cciss: cmd %p had "
2282 " hardware error\n", cmd);
2283 status = 0;
2284 break;
2285 case CMD_CONNECTION_LOST:
2286 printk(KERN_WARNING "cciss: cmd %p had "
2287 "connection lost\n", cmd);
2288 status=0;
2289 break;
2290 case CMD_ABORTED:
2291 printk(KERN_WARNING "cciss: cmd %p was "
2292 "aborted\n", cmd);
2293 status=0;
2294 break;
2295 case CMD_ABORT_FAILED:
2296 printk(KERN_WARNING "cciss: cmd %p reports "
2297 "abort failed\n", cmd);
2298 status=0;
2299 break;
2300 case CMD_UNSOLICITED_ABORT:
2301 printk(KERN_WARNING "cciss%d: unsolicited "
2302 "abort %p\n", h->ctlr, cmd);
2303 if (cmd->retry_count < MAX_CMD_RETRIES) {
2304 retry_cmd=1;
2305 printk(KERN_WARNING
2306 "cciss%d: retrying %p\n",
2307 h->ctlr, cmd);
2308 cmd->retry_count++;
2309 } else
2310 printk(KERN_WARNING
2311 "cciss%d: %p retried too "
2312 "many times\n", h->ctlr, cmd);
2313 status=0;
2314 break;
2315 case CMD_TIMEOUT:
2316 printk(KERN_WARNING "cciss: cmd %p timedout\n",
2317 cmd);
2318 status=0;
2319 break;
2320 default:
2321 printk(KERN_WARNING "cciss: cmd %p returned "
2322 "unknown status %x\n", cmd,
2323 cmd->err_info->CommandStatus);
2324 status=0;
2325 }
2326 }
2327 /* We need to return this command */
2328 if(retry_cmd) {
2329 resend_cciss_cmd(h,cmd);
2330 return;
2331 }
1da177e4 2332
a9925a06
JA
2333 cmd->rq->completion_data = cmd;
2334 cmd->rq->errors = status;
2056a782 2335 blk_add_trace_rq(cmd->rq->q, cmd->rq, BLK_TA_COMPLETE);
a9925a06 2336 blk_complete_request(cmd->rq);
1da177e4
LT
2337}
2338
2339/*
2340 * Get a request and submit it to the controller.
2341 */
2342static void do_cciss_request(request_queue_t *q)
2343{
2344 ctlr_info_t *h= q->queuedata;
2345 CommandList_struct *c;
2346 int start_blk, seg;
2347 struct request *creq;
2348 u64bit temp64;
2349 struct scatterlist tmp_sg[MAXSGENTRIES];
2350 drive_info_struct *drv;
2351 int i, dir;
2352
2353 /* We call start_io here in case there is a command waiting on the
2354 * queue that has not been sent.
2355 */
2356 if (blk_queue_plugged(q))
2357 goto startio;
2358
2359queue:
2360 creq = elv_next_request(q);
2361 if (!creq)
2362 goto startio;
2363
089fe1b2 2364 BUG_ON(creq->nr_phys_segments > MAXSGENTRIES);
1da177e4
LT
2365
2366 if (( c = cmd_alloc(h, 1)) == NULL)
2367 goto full;
2368
2369 blkdev_dequeue_request(creq);
2370
2371 spin_unlock_irq(q->queue_lock);
2372
2373 c->cmd_type = CMD_RWREQ;
2374 c->rq = creq;
2375
2376 /* fill in the request */
2377 drv = creq->rq_disk->private_data;
2378 c->Header.ReplyQueue = 0; // unused in simple mode
33079b21
MM
2379 /* got command from pool, so use the command block index instead */
2380 /* for direct lookups. */
2381 /* The first 2 bits are reserved for controller error reporting. */
2382 c->Header.Tag.lower = (c->cmdindex << 3);
2383 c->Header.Tag.lower |= 0x04; /* flag for direct lookup. */
1da177e4
LT
2384 c->Header.LUN.LogDev.VolId= drv->LunID;
2385 c->Header.LUN.LogDev.Mode = 1;
2386 c->Request.CDBLen = 10; // 12 byte commands not in FW yet;
2387 c->Request.Type.Type = TYPE_CMD; // It is a command.
2388 c->Request.Type.Attribute = ATTR_SIMPLE;
2389 c->Request.Type.Direction =
2390 (rq_data_dir(creq) == READ) ? XFER_READ: XFER_WRITE;
2391 c->Request.Timeout = 0; // Don't time out
2392 c->Request.CDB[0] = (rq_data_dir(creq) == READ) ? CCISS_READ : CCISS_WRITE;
2393 start_blk = creq->sector;
2394#ifdef CCISS_DEBUG
2395 printk(KERN_DEBUG "ciss: sector =%d nr_sectors=%d\n",(int) creq->sector,
2396 (int) creq->nr_sectors);
2397#endif /* CCISS_DEBUG */
2398
2399 seg = blk_rq_map_sg(q, creq, tmp_sg);
2400
2401 /* get the DMA records for the setup */
2402 if (c->Request.Type.Direction == XFER_READ)
2403 dir = PCI_DMA_FROMDEVICE;
2404 else
2405 dir = PCI_DMA_TODEVICE;
2406
2407 for (i=0; i<seg; i++)
2408 {
2409 c->SG[i].Len = tmp_sg[i].length;
2410 temp64.val = (__u64) pci_map_page(h->pdev, tmp_sg[i].page,
2411 tmp_sg[i].offset, tmp_sg[i].length,
2412 dir);
2413 c->SG[i].Addr.lower = temp64.val32.lower;
2414 c->SG[i].Addr.upper = temp64.val32.upper;
2415 c->SG[i].Ext = 0; // we are not chaining
2416 }
2417 /* track how many SG entries we are using */
2418 if( seg > h->maxSG)
2419 h->maxSG = seg;
2420
2421#ifdef CCISS_DEBUG
2422 printk(KERN_DEBUG "cciss: Submitting %d sectors in %d segments\n", creq->nr_sectors, seg);
2423#endif /* CCISS_DEBUG */
2424
2425 c->Header.SGList = c->Header.SGTotal = seg;
2426 c->Request.CDB[1]= 0;
2427 c->Request.CDB[2]= (start_blk >> 24) & 0xff; //MSB
2428 c->Request.CDB[3]= (start_blk >> 16) & 0xff;
2429 c->Request.CDB[4]= (start_blk >> 8) & 0xff;
2430 c->Request.CDB[5]= start_blk & 0xff;
2431 c->Request.CDB[6]= 0; // (sect >> 24) & 0xff; MSB
2432 c->Request.CDB[7]= (creq->nr_sectors >> 8) & 0xff;
2433 c->Request.CDB[8]= creq->nr_sectors & 0xff;
2434 c->Request.CDB[9] = c->Request.CDB[11] = c->Request.CDB[12] = 0;
2435
2436 spin_lock_irq(q->queue_lock);
2437
2438 addQ(&(h->reqQ),c);
2439 h->Qdepth++;
2440 if(h->Qdepth > h->maxQsinceinit)
2441 h->maxQsinceinit = h->Qdepth;
2442
2443 goto queue;
2444full:
2445 blk_stop_queue(q);
2446startio:
2447 /* We will already have the driver lock here so not need
2448 * to lock it.
2449 */
2450 start_io(h);
2451}
2452
3da8b713 2453static inline unsigned long get_next_completion(ctlr_info_t *h)
2454{
2455#ifdef CONFIG_CISS_SCSI_TAPE
2456 /* Any rejects from sendcmd() lying around? Process them first */
2457 if (h->scsi_rejects.ncompletions == 0)
2458 return h->access.command_completed(h);
2459 else {
2460 struct sendcmd_reject_list *srl;
2461 int n;
2462 srl = &h->scsi_rejects;
2463 n = --srl->ncompletions;
2464 /* printk("cciss%d: processing saved reject\n", h->ctlr); */
2465 printk("p");
2466 return srl->complete[n];
2467 }
2468#else
2469 return h->access.command_completed(h);
2470#endif
2471}
2472
2473static inline int interrupt_pending(ctlr_info_t *h)
2474{
2475#ifdef CONFIG_CISS_SCSI_TAPE
2476 return ( h->access.intr_pending(h)
2477 || (h->scsi_rejects.ncompletions > 0));
2478#else
2479 return h->access.intr_pending(h);
2480#endif
2481}
2482
2483static inline long interrupt_not_for_us(ctlr_info_t *h)
2484{
2485#ifdef CONFIG_CISS_SCSI_TAPE
2486 return (((h->access.intr_pending(h) == 0) ||
2487 (h->interrupts_enabled == 0))
2488 && (h->scsi_rejects.ncompletions == 0));
2489#else
2490 return (((h->access.intr_pending(h) == 0) ||
2491 (h->interrupts_enabled == 0)));
2492#endif
2493}
2494
1da177e4
LT
2495static irqreturn_t do_cciss_intr(int irq, void *dev_id, struct pt_regs *regs)
2496{
2497 ctlr_info_t *h = dev_id;
2498 CommandList_struct *c;
2499 unsigned long flags;
33079b21 2500 __u32 a, a1, a2;
1da177e4
LT
2501 int j;
2502 int start_queue = h->next_to_run;
2503
3da8b713 2504 if (interrupt_not_for_us(h))
1da177e4 2505 return IRQ_NONE;
1da177e4
LT
2506 /*
2507 * If there are completed commands in the completion queue,
2508 * we had better do something about it.
2509 */
2510 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
3da8b713 2511 while (interrupt_pending(h)) {
2512 while((a = get_next_completion(h)) != FIFO_EMPTY) {
1da177e4 2513 a1 = a;
33079b21
MM
2514 if ((a & 0x04)) {
2515 a2 = (a >> 3);
2516 if (a2 >= NR_CMDS) {
2517 printk(KERN_WARNING "cciss: controller cciss%d failed, stopping.\n", h->ctlr);
2518 fail_all_cmds(h->ctlr);
2519 return IRQ_HANDLED;
2520 }
2521
2522 c = h->cmd_pool + a2;
2523 a = c->busaddr;
2524
2525 } else {
1da177e4 2526 a &= ~3;
33079b21
MM
2527 if ((c = h->cmpQ) == NULL) {
2528 printk(KERN_WARNING "cciss: Completion of %08x ignored\n", a1);
1da177e4
LT
2529 continue;
2530 }
2531 while(c->busaddr != a) {
2532 c = c->next;
2533 if (c == h->cmpQ)
2534 break;
2535 }
33079b21 2536 }
1da177e4
LT
2537 /*
2538 * If we've found the command, take it off the
2539 * completion Q and free it
2540 */
2541 if (c->busaddr == a) {
2542 removeQ(&h->cmpQ, c);
2543 if (c->cmd_type == CMD_RWREQ) {
2544 complete_command(h, c, 0);
2545 } else if (c->cmd_type == CMD_IOCTL_PEND) {
2546 complete(c->waiting);
2547 }
2548# ifdef CONFIG_CISS_SCSI_TAPE
2549 else if (c->cmd_type == CMD_SCSI)
2550 complete_scsi_command(c, 0, a1);
2551# endif
2552 continue;
2553 }
2554 }
2555 }
2556
2557 /* check to see if we have maxed out the number of commands that can
2558 * be placed on the queue. If so then exit. We do this check here
2559 * in case the interrupt we serviced was from an ioctl and did not
2560 * free any new commands.
2561 */
2562 if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS)
2563 goto cleanup;
2564
2565 /* We have room on the queue for more commands. Now we need to queue
2566 * them up. We will also keep track of the next queue to run so
2567 * that every queue gets a chance to be started first.
2568 */
ad2b9312
MM
2569 for (j=0; j < h->highest_lun + 1; j++){
2570 int curr_queue = (start_queue + j) % (h->highest_lun + 1);
1da177e4
LT
2571 /* make sure the disk has been added and the drive is real
2572 * because this can be called from the middle of init_one.
2573 */
ad2b9312 2574 if(!(h->drv[curr_queue].queue) ||
1da177e4
LT
2575 !(h->drv[curr_queue].heads))
2576 continue;
2577 blk_start_queue(h->gendisk[curr_queue]->queue);
2578
2579 /* check to see if we have maxed out the number of commands
2580 * that can be placed on the queue.
2581 */
2582 if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS)
2583 {
2584 if (curr_queue == start_queue){
ad2b9312 2585 h->next_to_run = (start_queue + 1) % (h->highest_lun + 1);
1da177e4
LT
2586 goto cleanup;
2587 } else {
2588 h->next_to_run = curr_queue;
2589 goto cleanup;
2590 }
2591 } else {
ad2b9312 2592 curr_queue = (curr_queue + 1) % (h->highest_lun + 1);
1da177e4
LT
2593 }
2594 }
2595
2596cleanup:
2597 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
2598 return IRQ_HANDLED;
2599}
1da177e4
LT
2600/*
2601 * We cannot read the structure directly, for portablity we must use
2602 * the io functions.
2603 * This is for debug only.
2604 */
2605#ifdef CCISS_DEBUG
2606static void print_cfg_table( CfgTable_struct *tb)
2607{
2608 int i;
2609 char temp_name[17];
2610
2611 printk("Controller Configuration information\n");
2612 printk("------------------------------------\n");
2613 for(i=0;i<4;i++)
2614 temp_name[i] = readb(&(tb->Signature[i]));
2615 temp_name[4]='\0';
2616 printk(" Signature = %s\n", temp_name);
2617 printk(" Spec Number = %d\n", readl(&(tb->SpecValence)));
2618 printk(" Transport methods supported = 0x%x\n",
2619 readl(&(tb-> TransportSupport)));
2620 printk(" Transport methods active = 0x%x\n",
2621 readl(&(tb->TransportActive)));
2622 printk(" Requested transport Method = 0x%x\n",
2623 readl(&(tb->HostWrite.TransportRequest)));
2624 printk(" Coalese Interrupt Delay = 0x%x\n",
2625 readl(&(tb->HostWrite.CoalIntDelay)));
2626 printk(" Coalese Interrupt Count = 0x%x\n",
2627 readl(&(tb->HostWrite.CoalIntCount)));
2628 printk(" Max outstanding commands = 0x%d\n",
2629 readl(&(tb->CmdsOutMax)));
2630 printk(" Bus Types = 0x%x\n", readl(&(tb-> BusTypes)));
2631 for(i=0;i<16;i++)
2632 temp_name[i] = readb(&(tb->ServerName[i]));
2633 temp_name[16] = '\0';
2634 printk(" Server Name = %s\n", temp_name);
2635 printk(" Heartbeat Counter = 0x%x\n\n\n",
2636 readl(&(tb->HeartBeat)));
2637}
2638#endif /* CCISS_DEBUG */
2639
2640static void release_io_mem(ctlr_info_t *c)
2641{
2642 /* if IO mem was not protected do nothing */
2643 if( c->io_mem_addr == 0)
2644 return;
2645 release_region(c->io_mem_addr, c->io_mem_length);
2646 c->io_mem_addr = 0;
2647 c->io_mem_length = 0;
2648}
2649
2650static int find_PCI_BAR_index(struct pci_dev *pdev,
2651 unsigned long pci_bar_addr)
2652{
2653 int i, offset, mem_type, bar_type;
2654 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
2655 return 0;
2656 offset = 0;
2657 for (i=0; i<DEVICE_COUNT_RESOURCE; i++) {
2658 bar_type = pci_resource_flags(pdev, i) &
2659 PCI_BASE_ADDRESS_SPACE;
2660 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
2661 offset += 4;
2662 else {
2663 mem_type = pci_resource_flags(pdev, i) &
2664 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
2665 switch (mem_type) {
2666 case PCI_BASE_ADDRESS_MEM_TYPE_32:
2667 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
2668 offset += 4; /* 32 bit */
2669 break;
2670 case PCI_BASE_ADDRESS_MEM_TYPE_64:
2671 offset += 8;
2672 break;
2673 default: /* reserved in PCI 2.2 */
2674 printk(KERN_WARNING "Base address is invalid\n");
2675 return -1;
2676 break;
2677 }
2678 }
2679 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
2680 return i+1;
2681 }
2682 return -1;
2683}
2684
fb86a35b
MM
2685/* If MSI/MSI-X is supported by the kernel we will try to enable it on
2686 * controllers that are capable. If not, we use IO-APIC mode.
2687 */
2688
2689static void __devinit cciss_interrupt_mode(ctlr_info_t *c, struct pci_dev *pdev, __u32 board_id)
2690{
2691#ifdef CONFIG_PCI_MSI
2692 int err;
2693 struct msix_entry cciss_msix_entries[4] = {{0,0}, {0,1},
2694 {0,2}, {0,3}};
2695
2696 /* Some boards advertise MSI but don't really support it */
2697 if ((board_id == 0x40700E11) ||
2698 (board_id == 0x40800E11) ||
2699 (board_id == 0x40820E11) ||
2700 (board_id == 0x40830E11))
2701 goto default_int_mode;
2702
2703 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) {
2704 err = pci_enable_msix(pdev, cciss_msix_entries, 4);
2705 if (!err) {
2706 c->intr[0] = cciss_msix_entries[0].vector;
2707 c->intr[1] = cciss_msix_entries[1].vector;
2708 c->intr[2] = cciss_msix_entries[2].vector;
2709 c->intr[3] = cciss_msix_entries[3].vector;
2710 c->msix_vector = 1;
2711 return;
2712 }
2713 if (err > 0) {
2714 printk(KERN_WARNING "cciss: only %d MSI-X vectors "
2715 "available\n", err);
2716 } else {
2717 printk(KERN_WARNING "cciss: MSI-X init failed %d\n",
2718 err);
2719 }
2720 }
2721 if (pci_find_capability(pdev, PCI_CAP_ID_MSI)) {
2722 if (!pci_enable_msi(pdev)) {
2723 c->intr[SIMPLE_MODE_INT] = pdev->irq;
2724 c->msi_vector = 1;
2725 return;
2726 } else {
2727 printk(KERN_WARNING "cciss: MSI init failed\n");
2728 c->intr[SIMPLE_MODE_INT] = pdev->irq;
2729 return;
2730 }
2731 }
89a7689e 2732default_int_mode:
fb86a35b
MM
2733#endif /* CONFIG_PCI_MSI */
2734 /* if we get here we're going to use the default interrupt mode */
fb86a35b
MM
2735 c->intr[SIMPLE_MODE_INT] = pdev->irq;
2736 return;
2737}
2738
1da177e4
LT
2739static int cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
2740{
2741 ushort subsystem_vendor_id, subsystem_device_id, command;
2742 __u32 board_id, scratchpad = 0;
2743 __u64 cfg_offset;
2744 __u32 cfg_base_addr;
2745 __u64 cfg_base_addr_index;
2746 int i;
2747
2748 /* check to see if controller has been disabled */
2749 /* BEFORE trying to enable it */
2750 (void) pci_read_config_word(pdev, PCI_COMMAND,&command);
2751 if(!(command & 0x02))
2752 {
2753 printk(KERN_WARNING "cciss: controller appears to be disabled\n");
2754 return(-1);
2755 }
2756
2757 if (pci_enable_device(pdev))
2758 {
2759 printk(KERN_ERR "cciss: Unable to Enable PCI device\n");
2760 return( -1);
2761 }
1da177e4
LT
2762
2763 subsystem_vendor_id = pdev->subsystem_vendor;
2764 subsystem_device_id = pdev->subsystem_device;
2765 board_id = (((__u32) (subsystem_device_id << 16) & 0xffff0000) |
2766 subsystem_vendor_id);
2767
2768 /* search for our IO range so we can protect it */
2769 for(i=0; i<DEVICE_COUNT_RESOURCE; i++)
2770 {
2771 /* is this an IO range */
2772 if( pci_resource_flags(pdev, i) & 0x01 ) {
2773 c->io_mem_addr = pci_resource_start(pdev, i);
2774 c->io_mem_length = pci_resource_end(pdev, i) -
2775 pci_resource_start(pdev, i) +1;
2776#ifdef CCISS_DEBUG
2777 printk("IO value found base_addr[%d] %lx %lx\n", i,
2778 c->io_mem_addr, c->io_mem_length);
2779#endif /* CCISS_DEBUG */
2780 /* register the IO range */
2781 if(!request_region( c->io_mem_addr,
2782 c->io_mem_length, "cciss"))
2783 {
2784 printk(KERN_WARNING "cciss I/O memory range already in use addr=%lx length=%ld\n",
2785 c->io_mem_addr, c->io_mem_length);
2786 c->io_mem_addr= 0;
2787 c->io_mem_length = 0;
2788 }
2789 break;
2790 }
2791 }
2792
2793#ifdef CCISS_DEBUG
2794 printk("command = %x\n", command);
2795 printk("irq = %x\n", pdev->irq);
2796 printk("board_id = %x\n", board_id);
2797#endif /* CCISS_DEBUG */
2798
fb86a35b
MM
2799/* If the kernel supports MSI/MSI-X we will try to enable that functionality,
2800 * else we use the IO-APIC interrupt assigned to us by system ROM.
2801 */
2802 cciss_interrupt_mode(c, pdev, board_id);
1da177e4
LT
2803
2804 /*
2805 * Memory base addr is first addr , the second points to the config
2806 * table
2807 */
2808
2809 c->paddr = pci_resource_start(pdev, 0); /* addressing mode bits already removed */
2810#ifdef CCISS_DEBUG
2811 printk("address 0 = %x\n", c->paddr);
2812#endif /* CCISS_DEBUG */
2813 c->vaddr = remap_pci_mem(c->paddr, 200);
2814
2815 /* Wait for the board to become ready. (PCI hotplug needs this.)
2816 * We poll for up to 120 secs, once per 100ms. */
2817 for (i=0; i < 1200; i++) {
2818 scratchpad = readl(c->vaddr + SA5_SCRATCHPAD_OFFSET);
2819 if (scratchpad == CCISS_FIRMWARE_READY)
2820 break;
2821 set_current_state(TASK_INTERRUPTIBLE);
2822 schedule_timeout(HZ / 10); /* wait 100ms */
2823 }
2824 if (scratchpad != CCISS_FIRMWARE_READY) {
2825 printk(KERN_WARNING "cciss: Board not ready. Timed out.\n");
2826 return -1;
2827 }
2828
2829 /* get the address index number */
2830 cfg_base_addr = readl(c->vaddr + SA5_CTCFG_OFFSET);
2831 cfg_base_addr &= (__u32) 0x0000ffff;
2832#ifdef CCISS_DEBUG
2833 printk("cfg base address = %x\n", cfg_base_addr);
2834#endif /* CCISS_DEBUG */
2835 cfg_base_addr_index =
2836 find_PCI_BAR_index(pdev, cfg_base_addr);
2837#ifdef CCISS_DEBUG
2838 printk("cfg base address index = %x\n", cfg_base_addr_index);
2839#endif /* CCISS_DEBUG */
2840 if (cfg_base_addr_index == -1) {
2841 printk(KERN_WARNING "cciss: Cannot find cfg_base_addr_index\n");
2842 release_io_mem(c);
2843 return -1;
2844 }
2845
2846 cfg_offset = readl(c->vaddr + SA5_CTMEM_OFFSET);
2847#ifdef CCISS_DEBUG
2848 printk("cfg offset = %x\n", cfg_offset);
2849#endif /* CCISS_DEBUG */
2850 c->cfgtable = remap_pci_mem(pci_resource_start(pdev,
2851 cfg_base_addr_index) + cfg_offset,
2852 sizeof(CfgTable_struct));
2853 c->board_id = board_id;
2854
2855#ifdef CCISS_DEBUG
945f390f 2856 print_cfg_table(c->cfgtable);
1da177e4
LT
2857#endif /* CCISS_DEBUG */
2858
2859 for(i=0; i<NR_PRODUCTS; i++) {
2860 if (board_id == products[i].board_id) {
2861 c->product_name = products[i].product_name;
2862 c->access = *(products[i].access);
2863 break;
2864 }
2865 }
2866 if (i == NR_PRODUCTS) {
2867 printk(KERN_WARNING "cciss: Sorry, I don't know how"
2868 " to access the Smart Array controller %08lx\n",
2869 (unsigned long)board_id);
2870 return -1;
2871 }
2872 if ( (readb(&c->cfgtable->Signature[0]) != 'C') ||
2873 (readb(&c->cfgtable->Signature[1]) != 'I') ||
2874 (readb(&c->cfgtable->Signature[2]) != 'S') ||
2875 (readb(&c->cfgtable->Signature[3]) != 'S') )
2876 {
2877 printk("Does not appear to be a valid CISS config table\n");
2878 return -1;
2879 }
2880
2881#ifdef CONFIG_X86
2882{
2883 /* Need to enable prefetch in the SCSI core for 6400 in x86 */
2884 __u32 prefetch;
2885 prefetch = readl(&(c->cfgtable->SCSI_Prefetch));
2886 prefetch |= 0x100;
2887 writel(prefetch, &(c->cfgtable->SCSI_Prefetch));
2888}
2889#endif
2890
2891#ifdef CCISS_DEBUG
2892 printk("Trying to put board into Simple mode\n");
2893#endif /* CCISS_DEBUG */
2894 c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
2895 /* Update the field, and then ring the doorbell */
2896 writel( CFGTBL_Trans_Simple,
2897 &(c->cfgtable->HostWrite.TransportRequest));
2898 writel( CFGTBL_ChangeReq, c->vaddr + SA5_DOORBELL);
2899
2900 /* under certain very rare conditions, this can take awhile.
2901 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
2902 * as we enter this code.) */
2903 for(i=0;i<MAX_CONFIG_WAIT;i++) {
2904 if (!(readl(c->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
2905 break;
2906 /* delay and try again */
2907 set_current_state(TASK_INTERRUPTIBLE);
2908 schedule_timeout(10);
2909 }
2910
2911#ifdef CCISS_DEBUG
2912 printk(KERN_DEBUG "I counter got to %d %x\n", i, readl(c->vaddr + SA5_DOORBELL));
2913#endif /* CCISS_DEBUG */
2914#ifdef CCISS_DEBUG
2915 print_cfg_table(c->cfgtable);
2916#endif /* CCISS_DEBUG */
2917
2918 if (!(readl(&(c->cfgtable->TransportActive)) & CFGTBL_Trans_Simple))
2919 {
2920 printk(KERN_WARNING "cciss: unable to get board into"
2921 " simple mode\n");
2922 return -1;
2923 }
2924 return 0;
2925
2926}
2927
2928/*
2929 * Gets information about the local volumes attached to the controller.
2930 */
2931static void cciss_getgeometry(int cntl_num)
2932{
2933 ReportLunData_struct *ld_buff;
2934 ReadCapdata_struct *size_buff;
2935 InquiryData_struct *inq_buff;
2936 int return_code;
2937 int i;
2938 int listlength = 0;
2939 __u32 lunid = 0;
2940 int block_size;
2941 int total_size;
2942
06ff37ff 2943 ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
1da177e4
LT
2944 if (ld_buff == NULL)
2945 {
2946 printk(KERN_ERR "cciss: out of memory\n");
2947 return;
2948 }
1da177e4
LT
2949 size_buff = kmalloc(sizeof( ReadCapdata_struct), GFP_KERNEL);
2950 if (size_buff == NULL)
2951 {
2952 printk(KERN_ERR "cciss: out of memory\n");
2953 kfree(ld_buff);
2954 return;
2955 }
2956 inq_buff = kmalloc(sizeof( InquiryData_struct), GFP_KERNEL);
2957 if (inq_buff == NULL)
2958 {
2959 printk(KERN_ERR "cciss: out of memory\n");
2960 kfree(ld_buff);
2961 kfree(size_buff);
2962 return;
2963 }
2964 /* Get the firmware version */
2965 return_code = sendcmd(CISS_INQUIRY, cntl_num, inq_buff,
2966 sizeof(InquiryData_struct), 0, 0 ,0, NULL, TYPE_CMD);
2967 if (return_code == IO_OK)
2968 {
2969 hba[cntl_num]->firm_ver[0] = inq_buff->data_byte[32];
2970 hba[cntl_num]->firm_ver[1] = inq_buff->data_byte[33];
2971 hba[cntl_num]->firm_ver[2] = inq_buff->data_byte[34];
2972 hba[cntl_num]->firm_ver[3] = inq_buff->data_byte[35];
2973 } else /* send command failed */
2974 {
2975 printk(KERN_WARNING "cciss: unable to determine firmware"
2976 " version of controller\n");
2977 }
2978 /* Get the number of logical volumes */
2979 return_code = sendcmd(CISS_REPORT_LOG, cntl_num, ld_buff,
2980 sizeof(ReportLunData_struct), 0, 0, 0, NULL, TYPE_CMD);
2981
2982 if( return_code == IO_OK)
2983 {
2984#ifdef CCISS_DEBUG
2985 printk("LUN Data\n--------------------------\n");
2986#endif /* CCISS_DEBUG */
2987
2988 listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[0])) << 24;
2989 listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[1])) << 16;
2990 listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[2])) << 8;
2991 listlength |= 0xff & (unsigned int)(ld_buff->LUNListLength[3]);
2992 } else /* reading number of logical volumes failed */
2993 {
2994 printk(KERN_WARNING "cciss: report logical volume"
2995 " command failed\n");
2996 listlength = 0;
2997 }
2998 hba[cntl_num]->num_luns = listlength / 8; // 8 bytes pre entry
2999 if (hba[cntl_num]->num_luns > CISS_MAX_LUN)
3000 {
3001 printk(KERN_ERR "ciss: only %d number of logical volumes supported\n",
3002 CISS_MAX_LUN);
3003 hba[cntl_num]->num_luns = CISS_MAX_LUN;
3004 }
3005#ifdef CCISS_DEBUG
3006 printk(KERN_DEBUG "Length = %x %x %x %x = %d\n", ld_buff->LUNListLength[0],
3007 ld_buff->LUNListLength[1], ld_buff->LUNListLength[2],
3008 ld_buff->LUNListLength[3], hba[cntl_num]->num_luns);
3009#endif /* CCISS_DEBUG */
3010
3011 hba[cntl_num]->highest_lun = hba[cntl_num]->num_luns-1;
ddd47442
MM
3012// for(i=0; i< hba[cntl_num]->num_luns; i++)
3013 for(i=0; i < CISS_MAX_LUN; i++)
1da177e4 3014 {
ddd47442
MM
3015 if (i < hba[cntl_num]->num_luns){
3016 lunid = (0xff & (unsigned int)(ld_buff->LUN[i][3]))
3017 << 24;
3018 lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][2]))
3019 << 16;
3020 lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][1]))
3021 << 8;
1da177e4
LT
3022 lunid |= 0xff & (unsigned int)(ld_buff->LUN[i][0]);
3023
3024 hba[cntl_num]->drv[i].LunID = lunid;
3025
3026
3027#ifdef CCISS_DEBUG
3028 printk(KERN_DEBUG "LUN[%d]: %x %x %x %x = %x\n", i,
ddd47442
MM
3029 ld_buff->LUN[i][0], ld_buff->LUN[i][1],
3030 ld_buff->LUN[i][2], ld_buff->LUN[i][3],
3031 hba[cntl_num]->drv[i].LunID);
1da177e4
LT
3032#endif /* CCISS_DEBUG */
3033 cciss_read_capacity(cntl_num, i, size_buff, 0,
3034 &total_size, &block_size);
ddd47442
MM
3035 cciss_geometry_inquiry(cntl_num, i, 0, total_size,
3036 block_size, inq_buff, &hba[cntl_num]->drv[i]);
3037 } else {
3038 /* initialize raid_level to indicate a free space */
3039 hba[cntl_num]->drv[i].raid_level = -1;
3040 }
1da177e4
LT
3041 }
3042 kfree(ld_buff);
3043 kfree(size_buff);
3044 kfree(inq_buff);
3045}
3046
3047/* Function to find the first free pointer into our hba[] array */
3048/* Returns -1 if no free entries are left. */
3049static int alloc_cciss_hba(void)
3050{
3051 struct gendisk *disk[NWD];
3052 int i, n;
3053 for (n = 0; n < NWD; n++) {
3054 disk[n] = alloc_disk(1 << NWD_SHIFT);
3055 if (!disk[n])
3056 goto out;
3057 }
3058
3059 for(i=0; i< MAX_CTLR; i++) {
3060 if (!hba[i]) {
3061 ctlr_info_t *p;
06ff37ff 3062 p = kzalloc(sizeof(ctlr_info_t), GFP_KERNEL);
1da177e4
LT
3063 if (!p)
3064 goto Enomem;
1da177e4
LT
3065 for (n = 0; n < NWD; n++)
3066 p->gendisk[n] = disk[n];
3067 hba[i] = p;
3068 return i;
3069 }
3070 }
3071 printk(KERN_WARNING "cciss: This driver supports a maximum"
3072 " of %d controllers.\n", MAX_CTLR);
3073 goto out;
3074Enomem:
3075 printk(KERN_ERR "cciss: out of memory.\n");
3076out:
3077 while (n--)
3078 put_disk(disk[n]);
3079 return -1;
3080}
3081
3082static void free_hba(int i)
3083{
3084 ctlr_info_t *p = hba[i];
3085 int n;
3086
3087 hba[i] = NULL;
3088 for (n = 0; n < NWD; n++)
3089 put_disk(p->gendisk[n]);
3090 kfree(p);
3091}
3092
3093/*
3094 * This is it. Find all the controllers and register them. I really hate
3095 * stealing all these major device numbers.
3096 * returns the number of block devices registered.
3097 */
3098static int __devinit cciss_init_one(struct pci_dev *pdev,
3099 const struct pci_device_id *ent)
3100{
3101 request_queue_t *q;
3102 int i;
3103 int j;
3104 int rc;
3105
3106 printk(KERN_DEBUG "cciss: Device 0x%x has been found at"
3107 " bus %d dev %d func %d\n",
3108 pdev->device, pdev->bus->number, PCI_SLOT(pdev->devfn),
3109 PCI_FUNC(pdev->devfn));
3110 i = alloc_cciss_hba();
3111 if(i < 0)
3112 return (-1);
1f8ef380
MM
3113
3114 hba[i]->busy_initializing = 1;
3115
1da177e4
LT
3116 if (cciss_pci_init(hba[i], pdev) != 0)
3117 goto clean1;
3118
3119 sprintf(hba[i]->devname, "cciss%d", i);
3120 hba[i]->ctlr = i;
3121 hba[i]->pdev = pdev;
3122
3123 /* configure PCI DMA stuff */
eb0df996 3124 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK))
1da177e4 3125 printk("cciss: using DAC cycles\n");
eb0df996 3126 else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK))
1da177e4
LT
3127 printk("cciss: not using DAC cycles\n");
3128 else {
3129 printk("cciss: no suitable DMA available\n");
3130 goto clean1;
3131 }
3132
3133 /*
3134 * register with the major number, or get a dynamic major number
3135 * by passing 0 as argument. This is done for greater than
3136 * 8 controller support.
3137 */
3138 if (i < MAX_CTLR_ORIG)
564de74a 3139 hba[i]->major = COMPAQ_CISS_MAJOR + i;
1da177e4
LT
3140 rc = register_blkdev(hba[i]->major, hba[i]->devname);
3141 if(rc == -EBUSY || rc == -EINVAL) {
3142 printk(KERN_ERR
3143 "cciss: Unable to get major number %d for %s "
3144 "on hba %d\n", hba[i]->major, hba[i]->devname, i);
3145 goto clean1;
3146 }
3147 else {
3148 if (i >= MAX_CTLR_ORIG)
3149 hba[i]->major = rc;
3150 }
3151
3152 /* make sure the board interrupts are off */
3153 hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF);
fb86a35b 3154 if( request_irq(hba[i]->intr[SIMPLE_MODE_INT], do_cciss_intr,
1da177e4
LT
3155 SA_INTERRUPT | SA_SHIRQ | SA_SAMPLE_RANDOM,
3156 hba[i]->devname, hba[i])) {
3157 printk(KERN_ERR "cciss: Unable to get irq %d for %s\n",
fb86a35b 3158 hba[i]->intr[SIMPLE_MODE_INT], hba[i]->devname);
1da177e4
LT
3159 goto clean2;
3160 }
3161 hba[i]->cmd_pool_bits = kmalloc(((NR_CMDS+BITS_PER_LONG-1)/BITS_PER_LONG)*sizeof(unsigned long), GFP_KERNEL);
3162 hba[i]->cmd_pool = (CommandList_struct *)pci_alloc_consistent(
3163 hba[i]->pdev, NR_CMDS * sizeof(CommandList_struct),
3164 &(hba[i]->cmd_pool_dhandle));
3165 hba[i]->errinfo_pool = (ErrorInfo_struct *)pci_alloc_consistent(
3166 hba[i]->pdev, NR_CMDS * sizeof( ErrorInfo_struct),
3167 &(hba[i]->errinfo_pool_dhandle));
3168 if((hba[i]->cmd_pool_bits == NULL)
3169 || (hba[i]->cmd_pool == NULL)
3170 || (hba[i]->errinfo_pool == NULL)) {
3171 printk( KERN_ERR "cciss: out of memory");
3172 goto clean4;
3173 }
3da8b713 3174#ifdef CONFIG_CISS_SCSI_TAPE
3175 hba[i]->scsi_rejects.complete =
3176 kmalloc(sizeof(hba[i]->scsi_rejects.complete[0]) *
3177 (NR_CMDS + 5), GFP_KERNEL);
3178 if (hba[i]->scsi_rejects.complete == NULL) {
3179 printk( KERN_ERR "cciss: out of memory");
3180 goto clean4;
3181 }
3182#endif
1da177e4 3183 spin_lock_init(&hba[i]->lock);
1da177e4
LT
3184
3185 /* Initialize the pdev driver private data.
3186 have it point to hba[i]. */
3187 pci_set_drvdata(pdev, hba[i]);
3188 /* command and error info recs zeroed out before
3189 they are used */
3190 memset(hba[i]->cmd_pool_bits, 0, ((NR_CMDS+BITS_PER_LONG-1)/BITS_PER_LONG)*sizeof(unsigned long));
3191
3192#ifdef CCISS_DEBUG
3193 printk(KERN_DEBUG "Scanning for drives on controller cciss%d\n",i);
3194#endif /* CCISS_DEBUG */
3195
3196 cciss_getgeometry(i);
3197
3198 cciss_scsi_setup(i);
3199
3200 /* Turn the interrupts on so we can service requests */
3201 hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
3202
3203 cciss_procinit(i);
d6dbf42e 3204 hba[i]->busy_initializing = 0;
1da177e4 3205
ad2b9312
MM
3206 for(j=0; j < NWD; j++) { /* mfm */
3207 drive_info_struct *drv = &(hba[i]->drv[j]);
3208 struct gendisk *disk = hba[i]->gendisk[j];
3209
3210 q = blk_init_queue(do_cciss_request, &hba[i]->lock);
3211 if (!q) {
3212 printk(KERN_ERR
3213 "cciss: unable to allocate queue for disk %d\n",
3214 j);
3215 break;
3216 }
3217 drv->queue = q;
3218
3219 q->backing_dev_info.ra_pages = READ_AHEAD;
a9925a06
JA
3220 blk_queue_bounce_limit(q, hba[i]->pdev->dma_mask);
3221
3222 /* This is a hardware imposed limit. */
3223 blk_queue_max_hw_segments(q, MAXSGENTRIES);
1da177e4 3224
a9925a06
JA
3225 /* This is a limit in the driver and could be eliminated. */
3226 blk_queue_max_phys_segments(q, MAXSGENTRIES);
1da177e4 3227
a9925a06 3228 blk_queue_max_sectors(q, 512);
1da177e4 3229
a9925a06 3230 blk_queue_softirq_done(q, cciss_softirq_done);
1da177e4 3231
ad2b9312 3232 q->queuedata = hba[i];
1da177e4
LT
3233 sprintf(disk->disk_name, "cciss/c%dd%d", i, j);
3234 sprintf(disk->devfs_name, "cciss/host%d/target%d", i, j);
3235 disk->major = hba[i]->major;
3236 disk->first_minor = j << NWD_SHIFT;
3237 disk->fops = &cciss_fops;
ad2b9312 3238 disk->queue = q;
1da177e4
LT
3239 disk->private_data = drv;
3240 /* we must register the controller even if no disks exist */
3241 /* this is for the online array utilities */
3242 if(!drv->heads && j)
3243 continue;
ad2b9312 3244 blk_queue_hardsect_size(q, drv->block_size);
1da177e4
LT
3245 set_capacity(disk, drv->nr_blocks);
3246 add_disk(disk);
3247 }
ad2b9312 3248
1da177e4
LT
3249 return(1);
3250
3251clean4:
3da8b713 3252#ifdef CONFIG_CISS_SCSI_TAPE
1acc0b0b 3253 kfree(hba[i]->scsi_rejects.complete);
3da8b713 3254#endif
6044ec88 3255 kfree(hba[i]->cmd_pool_bits);
1da177e4
LT
3256 if(hba[i]->cmd_pool)
3257 pci_free_consistent(hba[i]->pdev,
3258 NR_CMDS * sizeof(CommandList_struct),
3259 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
3260 if(hba[i]->errinfo_pool)
3261 pci_free_consistent(hba[i]->pdev,
3262 NR_CMDS * sizeof( ErrorInfo_struct),
3263 hba[i]->errinfo_pool,
3264 hba[i]->errinfo_pool_dhandle);
fb86a35b 3265 free_irq(hba[i]->intr[SIMPLE_MODE_INT], hba[i]);
1da177e4
LT
3266clean2:
3267 unregister_blkdev(hba[i]->major, hba[i]->devname);
3268clean1:
3269 release_io_mem(hba[i]);
1f8ef380 3270 hba[i]->busy_initializing = 0;
61808c2b 3271 free_hba(i);
1da177e4
LT
3272 return(-1);
3273}
3274
3275static void __devexit cciss_remove_one (struct pci_dev *pdev)
3276{
3277 ctlr_info_t *tmp_ptr;
3278 int i, j;
3279 char flush_buf[4];
3280 int return_code;
3281
3282 if (pci_get_drvdata(pdev) == NULL)
3283 {
3284 printk( KERN_ERR "cciss: Unable to remove device \n");
3285 return;
3286 }
3287 tmp_ptr = pci_get_drvdata(pdev);
3288 i = tmp_ptr->ctlr;
3289 if (hba[i] == NULL)
3290 {
3291 printk(KERN_ERR "cciss: device appears to "
3292 "already be removed \n");
3293 return;
3294 }
3295 /* Turn board interrupts off and send the flush cache command */
3296 /* sendcmd will turn off interrupt, and send the flush...
3297 * To write all data in the battery backed cache to disks */
3298 memset(flush_buf, 0, 4);
3299 return_code = sendcmd(CCISS_CACHE_FLUSH, i, flush_buf, 4, 0, 0, 0, NULL,
3300 TYPE_CMD);
3301 if(return_code != IO_OK)
3302 {
3303 printk(KERN_WARNING "Error Flushing cache on controller %d\n",
3304 i);
3305 }
fb86a35b
MM
3306 free_irq(hba[i]->intr[2], hba[i]);
3307
3308#ifdef CONFIG_PCI_MSI
3309 if (hba[i]->msix_vector)
3310 pci_disable_msix(hba[i]->pdev);
3311 else if (hba[i]->msi_vector)
3312 pci_disable_msi(hba[i]->pdev);
3313#endif /* CONFIG_PCI_MSI */
3314
1da177e4
LT
3315 pci_set_drvdata(pdev, NULL);
3316 iounmap(hba[i]->vaddr);
3317 cciss_unregister_scsi(i); /* unhook from SCSI subsystem */
3318 unregister_blkdev(hba[i]->major, hba[i]->devname);
3319 remove_proc_entry(hba[i]->devname, proc_cciss);
3320
3321 /* remove it from the disk list */
3322 for (j = 0; j < NWD; j++) {
3323 struct gendisk *disk = hba[i]->gendisk[j];
6f5a0f7c 3324 if (disk) {
3325 request_queue_t *q = disk->queue;
3326
3327 if (disk->flags & GENHD_FL_UP)
3328 del_gendisk(disk);
3329 if (q)
3330 blk_cleanup_queue(q);
6a445d3b 3331 }
1da177e4
LT
3332 }
3333
1da177e4
LT
3334 pci_free_consistent(hba[i]->pdev, NR_CMDS * sizeof(CommandList_struct),
3335 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
3336 pci_free_consistent(hba[i]->pdev, NR_CMDS * sizeof( ErrorInfo_struct),
3337 hba[i]->errinfo_pool, hba[i]->errinfo_pool_dhandle);
3338 kfree(hba[i]->cmd_pool_bits);
3da8b713 3339#ifdef CONFIG_CISS_SCSI_TAPE
3340 kfree(hba[i]->scsi_rejects.complete);
3341#endif
1da177e4
LT
3342 release_io_mem(hba[i]);
3343 free_hba(i);
3344}
3345
3346static struct pci_driver cciss_pci_driver = {
3347 .name = "cciss",
3348 .probe = cciss_init_one,
3349 .remove = __devexit_p(cciss_remove_one),
3350 .id_table = cciss_pci_device_id, /* id_table */
3351};
3352
3353/*
3354 * This is it. Register the PCI driver information for the cards we control
3355 * the OS will call our registered routines when it finds one of our cards.
3356 */
3357static int __init cciss_init(void)
3358{
3359 printk(KERN_INFO DRIVER_NAME "\n");
3360
3361 /* Register for our PCI devices */
9bfab8ce 3362 return pci_register_driver(&cciss_pci_driver);
1da177e4
LT
3363}
3364
3365static void __exit cciss_cleanup(void)
3366{
3367 int i;
3368
3369 pci_unregister_driver(&cciss_pci_driver);
3370 /* double check that all controller entrys have been removed */
3371 for (i=0; i< MAX_CTLR; i++)
3372 {
3373 if (hba[i] != NULL)
3374 {
3375 printk(KERN_WARNING "cciss: had to remove"
3376 " controller %d\n", i);
3377 cciss_remove_one(hba[i]->pdev);
3378 }
3379 }
3380 remove_proc_entry("cciss", proc_root_driver);
3381}
3382
33079b21
MM
3383static void fail_all_cmds(unsigned long ctlr)
3384{
3385 /* If we get here, the board is apparently dead. */
3386 ctlr_info_t *h = hba[ctlr];
3387 CommandList_struct *c;
3388 unsigned long flags;
3389
3390 printk(KERN_WARNING "cciss%d: controller not responding.\n", h->ctlr);
3391 h->alive = 0; /* the controller apparently died... */
3392
3393 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
3394
3395 pci_disable_device(h->pdev); /* Make sure it is really dead. */
3396
3397 /* move everything off the request queue onto the completed queue */
3398 while( (c = h->reqQ) != NULL ) {
3399 removeQ(&(h->reqQ), c);
3400 h->Qdepth--;
3401 addQ (&(h->cmpQ), c);
3402 }
3403
3404 /* Now, fail everything on the completed queue with a HW error */
3405 while( (c = h->cmpQ) != NULL ) {
3406 removeQ(&h->cmpQ, c);
3407 c->err_info->CommandStatus = CMD_HARDWARE_ERR;
3408 if (c->cmd_type == CMD_RWREQ) {
3409 complete_command(h, c, 0);
3410 } else if (c->cmd_type == CMD_IOCTL_PEND)
3411 complete(c->waiting);
3412#ifdef CONFIG_CISS_SCSI_TAPE
3413 else if (c->cmd_type == CMD_SCSI)
3414 complete_scsi_command(c, 0, 0);
3415#endif
3416 }
3417 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
3418 return;
3419}
3420
1da177e4
LT
3421module_init(cciss_init);
3422module_exit(cciss_cleanup);
This page took 0.302332 seconds and 5 git commands to generate.